diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 40f23173..9463fb9e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,6 +6,6 @@ version: 2 updates: - package-ecosystem: "cargo" - directory: "/" + directory: "/rust" schedule: interval: "weekly" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 982b2960..add1e2be 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,6 +25,8 @@ jobs: os: ubuntu-latest - name: macOS os: macos-latest + - name: Windows + os: windows-latest steps: - uses: actions/checkout@v5 @@ -34,16 +36,20 @@ jobs: rustflags: "" - name: Cargo tests + working-directory: rust run: cargo test - name: Cargo tests (RP and JEM) + if: runner.os != 'Windows' + working-directory: rust run: | cargo test --features rp cargo test --features jem python_tests: name: Python tests (${{ matrix.name }}) - needs: rust_tests + needs: + - rust_tests runs-on: ${{ matrix.os }} timeout-minutes: 45 strategy: @@ -56,6 +62,9 @@ jobs: - name: macOS os: macos-latest skip_rpi_test: 1 + - name: Windows + os: windows-latest + skip_rpi_test: 1 env: DO_DOCKER: 0 SKIP_RPI_TEST: ${{ matrix.skip_rpi_test }} @@ -72,7 +81,7 @@ jobs: with: python-version: "3.12" cache: "pip" - cache-dependency-path: open-codegen/setup.py + cache-dependency-path: python/pyproject.toml - uses: egor-tensin/setup-clang@v1 if: runner.os == 'Linux' @@ -102,6 +111,69 @@ jobs: if: runner.os == 'macOS' run: bash ./ci/script.sh python-tests + - name: Install Python package + if: runner.os == 'Windows' + run: | + Set-Location python + python -m pip install --upgrade pip + python -m pip install . + + - name: Run Python test.py + if: runner.os == 'Windows' + run: | + Set-Location python + $env:PYTHONPATH = "." + python -W ignore test/test.py -v + + ros2_tests: + name: ROS2 tests + needs: python_tests + runs-on: ubuntu-latest + timeout-minutes: 45 + container: + image: ubuntu:noble + options: --user 0 + env: + DO_DOCKER: 0 + steps: + - uses: actions/checkout@v5 + + - name: Install container bootstrap dependencies + run: | + apt-get update + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + cmake \ + curl \ + ca-certificates \ + git \ + gnupg2 \ + locales \ + lsb-release + + - uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: stable + rustflags: "" + + - uses: actions/setup-python@v6 + with: + python-version: "3.12" + + - name: Setup ROS 2 + # `ros-tooling/setup-ros@v0.7` still runs as a Node.js 20 action. + # Force it onto Node 24 now so CI keeps working as GitHub deprecates + # Node 20, and upgrade `setup-ros` to a Node 24-compatible release + # when one becomes available. + env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true + uses: ros-tooling/setup-ros@v0.7 + with: + required-ros-distributions: jazzy + + - name: Run ROS2 Python tests + run: bash ./ci/script.sh ros2-tests + ocp_tests: name: OCP tests (${{ matrix.name }}) needs: python_tests @@ -115,6 +187,8 @@ jobs: os: ubuntu-latest - name: macOS os: macos-latest + - name: Windows + os: windows-latest env: DO_DOCKER: 0 steps: @@ -129,7 +203,22 @@ jobs: with: python-version: "3.12" cache: "pip" - cache-dependency-path: open-codegen/setup.py + cache-dependency-path: python/pyproject.toml - name: Run OCP Python tests + if: runner.os != 'Windows' run: bash ./ci/script.sh ocp-tests + + - name: Install Python package + if: runner.os == 'Windows' + run: | + Set-Location python + python -m pip install --upgrade pip + python -m pip install . + + - name: Run OCP Python tests + if: runner.os == 'Windows' + run: | + Set-Location python + $env:PYTHONPATH = "." + python -W ignore test/test_ocp.py -v diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index aa1efe3d..594af46e 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -8,14 +8,11 @@ jobs: clippy_check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v5 + - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - profile: minimal toolchain: stable - override: true components: clippy - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --examples + - name: Cargo clippy + working-directory: rust + run: cargo clippy --all-targets --all-features --examples diff --git a/.github/workflows/rustfmt.yml b/.github/workflows/rustfmt.yml index 102f0e3d..aa7e8c7c 100644 --- a/.github/workflows/rustfmt.yml +++ b/.github/workflows/rustfmt.yml @@ -10,14 +10,11 @@ jobs: name: Rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v5 + - uses: actions-rust-lang/setup-rust-toolchain@v1 with: - profile: minimal toolchain: stable - override: true components: rustfmt - - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + - name: Cargo fmt + working-directory: rust + run: cargo fmt --all -- --check diff --git a/.gitignore b/.gitignore index 19ddedec..67b5085e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,77 +1,48 @@ -# Generated by Cargo -# will have compiled files and executables -.eggs -.idea -TODOS/ +# Rust +.cargo /target target/ -.cargo -*.odp# -*.egg-info -*.pyc -open-codegen/opengen/icasadi/extern/Makefile -virt -data -my_optimizers -.python_test_build -dist/ - -# Python tests create this folder: -open-codegen/opengen/.python_test_build/ - -# Haphazard files: -__main.rs -design/open-card.odp -design/open-interfaces.jpeg -design/open-interfaces.odp -design/open-interfaces.png -main.rs -open-codegen/run/ -open-codegen/venv* - - -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -/Cargo.lock +/rust/Cargo.lock open-clib/Cargo.lock - -# These are backup files generated by rustfmt *.rs.bk -# Docusaurus website (autogenerated files) -/website/i18n/ -/website/node_modules/ -/website/yarn.lock -/website/build/ -/website/.docusaurus/ -!/website/pages/ -!/website/static/ -!/website/*.js -!/website/blog/ -!/website/core/Footer.js -!/website/README.md -!/website/publish.sh +# Python +virt -# icasadi external files -/icasadi/extern/ +# Docs / website +/docs/website/i18n/ +/docs/website/node_modules/ +/docs/website/yarn.lock +/docs/website/build/ +/docs/website/.docusaurus/ -# Visual studio code files +# Editors / local state +.idea /.vscode/ +.DS_Store -# Main file -/src/main.rs - -# Autogenerated files (builds) +# Project-local scratch files +TODOS/ +data +*.odp# +__main.rs +main.rs +/rust/src/main.rs /autogenerated_* /build/ !/build/README.md -# Other files +# Design assets that should stay local +design/open-card.odp +design/open-interfaces.jpeg +design/open-interfaces.odp +design/open-interfaces.png /design/panoc-only.png + +# Misc temporary files *.autosave *~ .#* -*private* -.DS_Store -/img/ + +# Private work area private/ diff --git a/CHANGELOG.md b/CHANGELOG.md index f3933a30..a9626dee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,373 +1,61 @@ -# Change Log +# Changelog -All notable changes to this project will be documented in this file. +This file is the project-wide changelog for **Optimization Engine (OpEn)**. -The format is based on [Keep a Changelog](http://keepachangelog.com/) -and this project adheres to [Semantic Versioning](http://semver.org/). +It serves as the entrypoint for releases across the repository and points to the +component-specific changelogs, where the detailed changes for each module are +recorded. -Note: This is the main Changelog file for the Rust solver. The Changelog file for the Python interface (`opengen`) can be found in [/open-codegen/CHANGELOG.md](open-codegen/CHANGELOG.md) +## Component Changelogs +- [Rust library changelog](rust/CHANGELOG.md) +- [Python interface (`opengen`) changelog](python/CHANGELOG.md) +- [MATLAB interface changelog](matlab/CHANGELOG.md) +- [Docker image changelog](docker/CHANGELOG.md) - -## [v0.11.1] - 2026-03-23 +## How To Read This Changelog +- Use this file for a high-level overview of changes across the whole project. +- Use the component changelogs above for detailed release notes, migration + notes, and module-specific fixes. +- Not every repository change appears here. Day-to-day implementation details + usually live only in the relevant component changelog files. -### Fixed +## Versioning Notes -- Return best PANOC half-step on early exit (issue #325) +OpEn is a multi-component project, and not every part evolves on the same +version number or schedule. +- The Rust solver has its own release history in [`rust/CHANGELOG.md`](rust/CHANGELOG.md). +- The Python package `opengen` has its own release history in + [`python/CHANGELOG.md`](python/CHANGELOG.md). +- The MATLAB interface and Docker image also track their changes separately. - -## [v0.11.0] - 2026-03-14 +When a release affects multiple parts of the repository, this root changelog can +be used to summarize the release and point readers to the detailed component +entries. -### Added +## Current Development Snapshot -- Implementation of `BallP` in Rust: projection on lp-ball +At the time of writing, the main ongoing release tracks include: -### Changed +- Rust library: `0.12.0` +- Python interface (`opengen`): `0.11.0` +- MATLAB interface: `0.1.0` +- Docker image: `0.7.0` -- Algorithmic improvements in `EpigraphSquaredNorm` (numerically stable Newton refinement) and more detailed docs -- Assertion in `Ball1`, `Ball2`, and `BallInf` to check that that dimensions of `x` and `xc` are compatible (with unit test) -- Added validation in `Rectangle` and `Hyperplane` for invalid constructor inputs and strengthened dimension checks in hyperplane projection -- Added validation in `Sphere2` for empty inputs and incompatible center dimensions -- Added validation in `FiniteSet` for incompatible projection dimensions and corrected convexity detection for singleton sets -- Added unit tests for invalid `Rectangle`, `Simplex`, `Hyperplane`, `Sphere2`, and `FiniteSet` edge cases +Please consult the linked component changelogs for the exact release state of +each module. -### Fixed +## Repository Layout -- Typos and doctest annotations in docs of `CartesianProduct` (in Rust), `Rectangle`, and `Hyperplane`, with more detailed documentation +The main components live in: +- [rust/](rust/) +- [python/](python/) +- [matlab/](matlab/) +- [docker/](docker/) +- [docs/](docs/) - -## [v0.10.0] - 2026-03-10 - -### Added - -- Custom implementation of Cholesky factorisation (and solver); this is used in `AffineSpace` now. -- New function in `matrix_operations` to compute AA' given a matrix A - -### Changed - -- Update version of `ndarray`, in `Cargo.toml` -- Removed `modcholesky` because it was causing a bug (see issue #378) - - -## [v0.9.1] - 2024-08-14 - - -### Fixed - -- Order of dependencies in `Cargo.toml` fixes crate-not-found issue on Windows - - - - -## [v0.9.0] - 2024-03-20 - -### Added - -- Rust implementation of epigraph of squared Euclidean norm (constraint) -- Implementation of `AffineSpace` - -### Fixed - -- Clippy fixes - - -## [v0.8.1] - 2023-10-27 - -### Fixed - -- Fix bug in implementation of `ball2.rs` (radius was being ignored for balls centered not at the origin) - - - - -## [v0.8.1] - 2023-10-27 - -### Added - -- New constraint: sphere of Euclidean norm - - -## [v0.7.7] - 2023-01-17 - -### Fixed - -- Change `time::Instant` to `instant::Instant` to support WASM - - - - -## [v0.7.6] - 2022-10-11 - -### Added - -- Update functions in `AlmOptimizerStatus` - - - - - -## [v0.7.5] - 2022-06-22 - -### Fixed - -- Fixed estimation of initial Lipschitz constant, `L`, when it is close to or equal to zero (e.g., Huber loss function) -- Fixed issue in `AlmFactory` related to (F2) penalty constraints - - - -## [v0.7.4] - 2021-11-15 - -### Added - -- Optional feature `wasm` in `Cargo.toml` (WebAssembly support); see https://alphaville.github.io/optimization-engine/docs/openrust-features for details -- Using `instant::Instant` instead of `std::Instant` (Wasm-compatible) -- Fixed Rust documentation of `Ball1` - - -## [v0.7.3] - 2021-11-1 - -### Added - -* Implementation of Simplex and Ball1 constraints in Rust -* Fix issue with simultaneous use of features `jem` and `rp` - - - -## [v0.7.2] - 2021-10-27 - -### Changed - -* Removed unnecessary `#[no_mangle]` annotations -* Took care of additional clippy warnings -* Bump versions: `cbindgen`: `0.8 --> 0.20` and `libc`: `0.2.0 -> 0.2.*` - -### Added - -* Support for [`rpmalloc`](https://github.com/EmbarkStudios/rpmalloc-rs) and [`jemalloc`](https://github.com/gnzlbg/jemallocator) using the features `jem` and `rp` - - - - - -## [v0.7.1] - 2020-09-04 - -### Added - -* Introduced `Halfspace` (implemented and tested) -* Introduced `Hyperplane` (implemented and tested) -* New types: `FunctionCallResult`, `MappingType` and `JacobianMappingType` -* Various clippy-related code improvements - - - -## [v0.7.0] - 2020-05-04 - - -### Added - -* ALM: compute cost value at solution - - - - -## [v0.6.2] - 2019-10-29 - -### Fixed - -* Bug in codegen for Cartesian products (PR #147) -* Removed the use of `Default` in Rust (does not work for large slices) -* Python: fixed typo in method `with_lfbgs_memory` - -### Added - -* New support for C-to-Rust interface via bindgen -* Generation of example C code for C-to-Rust interface -* CMakeLists for auto-generated example in C -* Additional Python examples on web page -* Chat button in web page (for gitter) -* Added option `local_path` in `with_open_version` - -### Changed - -* Homotopy module in Rust is annotated as deprecated -* TCP server response is cast into Python objects (PR #144) -* Auto-generated code links to most recent crate, unless overriden -* Changed `jacobian` to `gradient` in Python - -## [v0.6.1-alpha.2] - 2019-09-7 - -### Fixed - -* TCP server: Malformed error JSON is now fixed -* Algorithm now returns `u_bar`, which is feasible (not `u`) - -### Added - -* Introduced C interface to CasADi-generated C functions -* Rust and Python implementations of joint ALM/PM algorithms -* Rust docs for augmented Lagrangian method (ALM) -* Release of crate version `0.6.1-alpha.1` and `0.6.1-alpha.2` -* Introduced `#![allow(dead_code)]` in ALM implementation -* New AKKT-compliant termination criterion -* Tolerance relaxation in penalty method -* Finite sets supported in Rust -* Rust/Python: setting CBFGS parameters -* Second-order cones supported in Rust -* Rust docs: support for equations with KaTeX - -### Changed - -* Updated README - - -### Removed - -* Support for Python <3.6 (deprecated) -* Module `continuation` is going to become obsolete - - -## [v0.5.0] - 2019-06-22 - -### Fixed - -* Fixed `with_max_duration` in `PANOC` not following the builder pattern -* Fixed misplaced `.unwrap()` in the `HomotopyOptimizer` -* Fixed so the Python builder uses the current directory as default - -### Added - -* Generation of C/C++ bindings added in the Python interface and included in the test suite -* Support in Rust for Cartesian product of constraints - -### Removed - -* Deprecated: `enable_tcp_interface` and `enable_c_bindings_generation` - - - -## [v0.4.0] - 2019-06-03 - -### Fixed - -* Windows interoperability of `matlab_open_root()` [closes #24] -* Issues with file separator on Windows [#26 and #27] -* Handling corner cases such as wrong input parameters -* Rust: checking for `NaN` and `Inf` values in solution - -### Added - -* New Python interface for code generation (works with Python 2.7, 3.4 and 3.6) -* Homotopy method implemented in Rust -* TCP interface in Rust is generated automatically on request -* Support for OSX and linux distros on [travis] [closes #25] -* Continuous integration on [Appveyor] -* Experimental C bindings library -* Documentation for new Rust code and Python code -* Unit tests in Python using `unittest` - -### Changed - -* Rust API: Using `Option<>` and `Result<>` to handle errors -* Updated L-BFGS dependency; now using version `0.2` (no NonZeroUsize) - - -## [v0.3.1] - 2019-05-21 - -### Fixed - -* An error in the Matlab codegen which made it inoperable - -### Added - -* Support for compiling for different targets - - - -## [v0.3.0] - 2019-05-16 - -This is a breaking API change. - -### Fixed - -* A lot of internal fixes and clean up -* `PANOCEngine` and `FBSEngine` is no longer explicitly needed -* Simplified import system -* Cost functions now need to return a `Result<(), Error>` to indicate if the evaluation was successful - -### Added - -* Started an `examples` folder - - - - -[v0.11.1]: https://github.com/alphaville/optimization-engine/compare/v0.11.0...v0.11.1 -[v0.11.0]: https://github.com/alphaville/optimization-engine/compare/v0.10.0...v0.11.0 -[v0.10.0]: https://github.com/alphaville/optimization-engine/compare/v0.9.1...v0.10.0 -[v0.9.1]: https://github.com/alphaville/optimization-engine/compare/v0.9.0...v0.9.1 -[v0.9.0]: https://github.com/alphaville/optimization-engine/compare/v0.8.1...v0.9.0 -[v0.8.1]: https://github.com/alphaville/optimization-engine/compare/v0.8.0...v0.8.1 -[v0.8.0]: https://github.com/alphaville/optimization-engine/compare/v0.7.7...v0.8.0 -[v0.7.7]: https://github.com/alphaville/optimization-engine/compare/v0.7.6...v0.7.7 -[v0.7.6]: https://github.com/alphaville/optimization-engine/compare/v0.7.5...v0.7.6 -[v0.7.5]: https://github.com/alphaville/optimization-engine/compare/v0.7.4...v0.7.5 -[v0.7.4]: https://github.com/alphaville/optimization-engine/compare/v0.7.3...v0.7.4 -[v0.7.3]: https://github.com/alphaville/optimization-engine/compare/v0.7.2...v0.7.3 -[v0.7.2]: https://github.com/alphaville/optimization-engine/compare/v0.7.1...v0.7.2 -[v0.7.1]: https://github.com/alphaville/optimization-engine/compare/v0.7.0...v0.7.1 -[v0.7.0]: https://github.com/alphaville/optimization-engine/compare/v0.6.2...v0.7.0 -[v0.6.2]: https://github.com/alphaville/optimization-engine/compare/v0.6.1-alpha.2...v0.6.2 -[v0.6.1-alpha.2]: https://github.com/alphaville/optimization-engine/compare/v0.5.0...v0.6.1-alpha.2 -[v0.5.0]: https://github.com/alphaville/optimization-engine/compare/v0.4.0...v0.5.0 -[v0.4.0]: https://github.com/alphaville/optimization-engine/compare/v0.3.1...v0.4.0 -[v0.3.1]: https://github.com/alphaville/optimization-engine/compare/v0.3.0...v0.3.1 -[v0.3.0]: https://github.com/alphaville/optimization-engine/compare/v0.2.2...v0.3.0 - - -[closes #24]: https://github.com/alphaville/optimization-engine/issues/24 -[closes #25]: https://github.com/alphaville/optimization-engine/issues/25 - - -[travis]: https://travis-ci.org/alphaville/optimization-engine/builds/537155440 -[Appveyor]: https://ci.appveyor.com/project/alphaville/optimization-engine +This changelog is intentionally lightweight, so it can remain a stable landing +page even as individual components evolve independently. diff --git a/README.md b/README.md index d4f5bb43..1cb95b5d 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ OpEn allows application developers and researchers to focus on the challenges of ### Embedded applications OpEn can run on embedded devices; here we see it running on an intel Atom for the autonomous navigation of a lab-scale micro aerial vehicle - the controller runs at **20Hz** using only **15%** CPU! -Fast NMPC of MAV +Fast NMPC of MAV ## Optimal Control @@ -196,7 +196,7 @@ let status = optimizer.solve(&mut u)?; ``` See the dedicated [Rust documentation](https://alphaville.github.io/optimization-engine/docs/openrust-basic) for a full introduction and more complete examples. -See more Rust examples [here](examples). +See more Rust examples [here](rust/examples). ## Check out next... diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index e91d5115..00000000 --- a/appveyor.yml +++ /dev/null @@ -1,74 +0,0 @@ -#This Appveyor configuration file is based on the configuration -#file of the following project: -# -#https://github.com/starkat99/appveyor-rust/ -# - -## Operating System (VM environment) ## - -# Rust needs at least Visual Studio 2013 Appveyor OS for MSVC targets. -os: Visual Studio 2015 - - -environment: - matrix: - -### MSVC Toolchains ### - - # Stable 64-bit MSVC - - channel: stable - target: x86_64-pc-windows-msvc - # Stable 32-bit MSVC - # - channel: stable - # target: i686-pc-windows-msvc - -### GNU Toolchains ### - - # Stable 64-bit GNU - # - channel: stable - # target: x86_64-pc-windows-gnu - # Stable 32-bit GNU - # - channel: stable - # target: i686-pc-windows-gnu - -### Allowed failures ### - -# See Appveyor documentation for specific details. In short, place any channel or targets you wish -# to allow build failures on (usually nightly at least is a wise choice). This will prevent a build -# or test failure in the matching channels/targets from failing the entire build. -#matrix: -# allow_failures: -# - channel: nightly - -# If you only care about stable channel build failures, uncomment the following line: - #- channel: beta - -## Install Script ## - -# This is the most important part of the Appveyor configuration. This installs the version of Rust -# specified by the 'channel' and 'target' environment variables from the build matrix. This uses -# rustup to install Rust. -# -# For simple configurations, instead of using the build matrix, you can simply set the -# default-toolchain and default-host manually here. -install: - - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe - - rustup-init -yv --default-toolchain %channel% --default-host %target% - - set PATH=%PATH%;%USERPROFILE%\.cargo\bin - - rustc -vV - - cargo -vV - -## Build Script ## - -# 'cargo test' takes care of building for us, so disable Appveyor's build stage. This prevents -# the "directory does not contain a project or solution file" error. -build: false - -# Uses 'cargo test' to run tests and build. Alternatively, the project may call compiled programs -#directly or perform other testing commands. Rust will automatically be placed in the PATH -# environment variable. -test_script: - - cargo add roots - - cargo add ndarray --features approx - - cargo build - - cargo test --verbose %cargoflags% diff --git a/build/README.md b/build/README.md deleted file mode 100644 index dd1346dd..00000000 --- a/build/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Build folder - -This folder is used (by MATLAB and Python) as the default destination to store auto-generated optimizers. diff --git a/ci/script.sh b/ci/script.sh index 18f1f2b7..cb599288 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -1,7 +1,19 @@ #!/bin/bash set -euxo pipefail +# To use locally, from the root directory and from a bash shell... +# +# 1. To run the core Python tests: +# ci/script.sh python-tests +# +# 2. To run the Python OCP tests: +# ci/script.sh ocp-tests +# +# 3. To run the Python ROS2 tests: +# ci/script.sh ros2-tests + SKIP_RPI_TEST="${SKIP_RPI_TEST:-0}" +DO_DOCKER="${DO_DOCKER:-0}" TASK="${1:-all-python-tests}" function run_clippy_test() { @@ -25,7 +37,7 @@ function run_clippy_test() { } setup_python_test_env() { - cd open-codegen + cd python export PYTHONPATH=. python -m venv venv @@ -60,6 +72,35 @@ run_python_core_tests() { generated_clippy_tests } +run_python_ros2_tests() { + export PYTHONPATH=. + set +u + if [ -n "${ROS_DISTRO:-}" ] && [ -f "/opt/ros/${ROS_DISTRO}/setup.bash" ]; then + # setup-ros installs the ROS underlay but does not source it for our shell + source "/opt/ros/${ROS_DISTRO}/setup.bash" + elif [ -f "/opt/ros/jazzy/setup.bash" ]; then + source "/opt/ros/jazzy/setup.bash" + else + set -u + echo "ROS2 environment setup script not found" + exit 1 + fi + set -u + + if ! python -c "import em, lark, catkin_pkg" >/dev/null 2>&1; then + # ROS2 build helpers run under the active Python interpreter. The test venv + # already has NumPy from `pip install .`, but we also need the ROS-side + # Python packages used during interface and package metadata generation. + # Empy 4 has broken older ROS message generators in the past, so keep it + # on the 3.x API here. + python -m pip install "empy<4" lark catkin_pkg + fi + + command -v ros2 >/dev/null + command -v colcon >/dev/null + python -W ignore test/test_ros2.py -v +} + run_python_ocp_tests() { export PYTHONPATH=. python -W ignore test/test_ocp.py -v @@ -77,31 +118,37 @@ test_docker() { } main() { - if [ $DO_DOCKER -eq 0 ]; then - case "$TASK" in - python-tests) - echo "Running Python tests and generated Clippy tests" - setup_python_test_env - run_python_core_tests - ;; - ocp-tests) - echo "Running OCP Python tests" - setup_python_test_env - run_python_ocp_tests - ;; - all-python-tests) - echo "Running Python tests, generated Clippy tests, and OCP tests" - all_python_tests - ;; - *) - echo "Unknown task: $TASK" - exit 1 - ;; - esac - else + if [ "$DO_DOCKER" -ne 0 ]; then echo "Building Docker image" test_docker + return fi + + case "$TASK" in + python-tests) + echo "Running Python tests and generated Clippy tests" + setup_python_test_env + run_python_core_tests + ;; + ros2-tests) + echo "Running ROS2 Python tests" + setup_python_test_env + run_python_ros2_tests + ;; + ocp-tests) + echo "Running OCP Python tests" + setup_python_test_env + run_python_ocp_tests + ;; + all-python-tests) + echo "Running Python tests, generated Clippy tests, and OCP tests" + all_python_tests + ;; + *) + echo "Unknown task: $TASK" + exit 1 + ;; + esac } main diff --git a/ci/sphinx-documentation.sh b/ci/sphinx-documentation.sh index 452f057e..12a3794b 100644 --- a/ci/sphinx-documentation.sh +++ b/ci/sphinx-documentation.sh @@ -27,7 +27,7 @@ pip install sphinx pip install sphinx-rtd-theme # Install opengen -pushd open-codegen +pushd python pip install . popd # back to $GITHUB_WORKSPACE @@ -48,11 +48,11 @@ git checkout $current_branch # Build the docs rm -rf sphinx mkdir -p sphinx -pushd sphinx-dox -sphinx-apidoc -o ./source/ ../open-codegen/opengen +pushd docs/sphinx +sphinx-apidoc -o ./source/ ../../python/opengen echo Last updated: $(date -u) >> source/index.rst; sed '$d' source/index.rst; # update date at the end of file make html || : -cp -r build/html/ ../sphinx +cp -r build/html/ ../../sphinx git checkout source/index.rst # no need to commit this popd # back to $GITHUB_WORKSPACE diff --git a/docs/algorithm.md b/docs/content/algorithm.md similarity index 100% rename from docs/algorithm.md rename to docs/content/algorithm.md diff --git a/docs/cite_open.md b/docs/content/cite_open.md similarity index 100% rename from docs/cite_open.md rename to docs/content/cite_open.md diff --git a/docs/content/contributing.mdx b/docs/content/contributing.mdx new file mode 100644 index 00000000..0355dd19 --- /dev/null +++ b/docs/content/contributing.mdx @@ -0,0 +1,430 @@ +--- +id: contributing +sidebar_label: Contributing +title: Contributing to OpEn +description: How do I contribute to OpEn +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## How can I contribute to OpEn? +Thank you for considering contributing to Optimization Engine (OpEn)! + +OpEn is an open source project and welcomes contributions from the community. + +You can contribute in several ways: + +- Submit an [**issue**](https://github.com/alphaville/optimization-engine/issues): + Often bugs will go unnoticed by the core development team and certain + use cases and user needs will have evaded our attention. + Consider submitting an issue if: + - You would like to report a [bug]; please, use the provided template for reporting + bugs. It is essential to give information about your system (OS, OpEn version) + and outline a sequence of steps to reproduce the error. When possible, please + provide a [minimum working example] + - You would like to request a [new feature]; please use the provided template + - You would like to propose modifications in OpEn's documentation, such as + for some concepts to be better elucidated or a request for an additional example +- Share with us a **success story** on [**Discord**](https://discord.gg/mfYpn4V) +- Create a **pull request** (see below) + +or, show us your love: + +- Give us a [**star on gitub**](https://github.com/alphaville/optimization-engine) +- Spread the word on [**Twitter**] + +![Star](https://media.giphy.com/media/ZxblqUVrPVmcqATkC4/giphy.gif) + +## I just have a question! +The easiest and quickest way to ask a question is to reach us on [**Discord**](https://discord.gg/mfYpn4V) or [**Gitter**](https://gitter.im/alphaville/optimization-engine). + +You may also consult the [**frequently asked questions**](/optimization-engine/docs/faq). + + +## Submitting issues +You may submit an issue regarding anything related to **OpEn**, such as: + +- a bug +- insufficient/vague documentation +- request for a feature +- request for an example + +You should, however, make sure that the same - or a very similar - issue is not already open. In that case, you may write a comment in an existing issue. + + +## Contributing code or docs + +In order to contribute code or documentation, you need to [fork] our github repository, make you modifications and submit a pull request. You should follow these rules: + +- create one or more [issues on github] that will be associated with your changes +- take it from `master`: fork OpEn and create a branch on `master` + +```console +git checkout -b fix/xyz master +``` + +- read the [style guide](#coding-style-guide) below (and write unit/integration tests) +- create a pull request in which you need to explain the key changes + +## Coding style guide + +Things to keep in mind: + +- **Code**: intuitive structure and variable names, short atomic functions, +- **Comments**: help others better understand your code +- **Docs**: document all functions (even private ones) +- **Tests**: write comprehnsive, exhaustive tests + +### Rust + +*General guidelines:* Read the Rust [API guidelines] and this [API checklist] + +*Naming convention:* We follow the [standard naming convention](https://rust-lang-nursery.github.io/api-guidelines/naming.html) of Rust. + +*Documentation:* We follow [these guidelines](https://rust-lang-nursery.github.io/api-guidelines/documentation.html). Everything should be documented. + +### Python + +We follow [this style guide](https://www.python.org/dev/peps/pep-0008) and its [naming convention](https://www.python.org/dev/peps/pep-0008/#naming-conventions) + + +### Website +This documentation is generated with Docusaurus - read a detailed guide [here](https://github.com/alphaville/optimization-engine/blob/master/docs/website/README.md). + +- All docs are in `docs/content/` +- Blog entries are in `docs/website/blog/` + +To start the website locally (at [http://localhost:3000/optimization-engine](http://localhost:3000/optimization-engine)) change directory to `docs/website` and run `yarn start`. To update the website, execute `./publish.sh` from there (you need to be a collaborator on github). + +## Using Git +When using Git, keep in mind the following guidelines: + +- Create simple, atomic, commits +- Write comprehensive commit messages +- Work on a forked repository +- When you're done, submit a pull request to +[`alphaville/optimization-engine`](https://github.com/alphaville/optimization-engine/); +it will be promptly delegated to a reviewer and we will contact you +as soon as possible. + +Branch `master` is protected and all pull requests need to be reviewed by a person +other than their proposer before they can be merged into `master`. + +## Versioning +This project consists of independent modules: +(i) the core Rust library, +(ii) the MATLAB interface, +(iii) the Python interface. +Each module has a different version number (`X.Y.Z`). + +We use the **SemVer** standard - we quote from [semver.org](https://semver.org/): + +Given a version number `MAJOR.MINOR.PATCH`, increment the: + +- `MAJOR` version when you make incompatible API changes, +- `MINOR` version when you add functionality in a backwards-compatible manner, and +- `PATCH` version when you make backwards-compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions to the `MAJOR.MINOR.PATCH` format. + +We also keep a [log of changes](https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md) where we summarize the main changes since last version. + +## Releasing + +Each time the major or minor number of the Rust library is updated, a new crate should be published on [crates.io](https://crates.io/crates/optimization_engine). + +In order to release a new version make sure that +you have done the following: + +--- + + + + +Checklist: + + + +Then, create a tag and push it... + + ```bash + git tag -a v0.10.0 -m "v0.10.0" + git push --tags + ``` + +Lastly, update the [docker image](https://github.com/alphaville/optimization-engine/tree/master/docker). +This will have to be a new PR. + + + + + + +Checklist: + + + +Then, create a tag and push it... + + ```bash + git tag -a opengen-0.10.0 -m "opengen-0.10.0" + git push --tags + ``` + + + +Lastly, update the [docker image](https://github.com/alphaville/optimization-engine/tree/master/docker). +This will have to be a new PR. + + + + + + + Update the [Dockerfile](https://github.com/alphaville/optimization-engine/blob/master/docker/Dockerfile). + You may need to bump the versions of open and opengen: + + ```Dockerfile + ARG OPENGEN_VERSION=0.10.0 + ARG OPTIMIZATION_ENGINE_CRATE_VERSION=0.11.0 + ``` + + Update the [CHANGELOG](https://github.com/alphaville/optimization-engine/blob/master/docker/CHANGELOG.md). + Update the [README](https://github.com/alphaville/optimization-engine/blob/master/docker/README.md) file. + Build, test, and push with + + ```bash + docker push alphaville/open:0.7.0 + ``` + + Update the [website docs](./docker) and the promo on the [main page](..) + + + + +--- + +To update the website, run +```bash +GIT_USER=alphaville \ + CURRENT_BRANCH=master \ + USE_SSH=true \ + yarn deploy +``` +from within `docs/website/`. Then, update the opengen API docs too; +just push a commit with message starting with `[docit]`. +You can also issue a commit without git-add. Run + +```bash +git commit -m '[docit] update api docs' --allow-empty +``` + + + +[CHANGELOG]: https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md +[VERSION]: https://github.com/alphaville/optimization-engine/blob/master/python/VERSION +[Cargo.toml]: https://github.com/alphaville/optimization-engine/blob/master/rust/Cargo.toml +[setup.py]: https://github.com/alphaville/optimization-engine/blob/master/python/setup.py +[release v0.4.0]: https://github.com/alphaville/optimization-engine/releases/tag/v0.4.0 +[bug]: https://github.com/alphaville/optimization-engine/issues/new?template=bug_report.md +[issues on github]: https://github.com/alphaville/optimization-engine/issues +[**Twitter**]: https://twitter.com/intent/tweet?original_referer=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&ref_src=twsrc%5Etfw&text=Fast%20and%20accurate%20embedded%20nonconvex%20optimization%20with%20%23OptimizationEngine&tw_p=tweetbutton&url=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&via=isToxic +[minimum working example]: https://en.wikipedia.org/wiki/Minimal_working_example +[new feature]: https://github.com/alphaville/optimization-engine/issues/new?template=feature_request.md +[fork]: https://github.com/alphaville/optimization-engine +[API guidelines]: https://rust-lang-nursery.github.io/api-guidelines/about.html +[API checklist]: https://rust-lang-nursery.github.io/api-guidelines/checklist.html + +## Running tests locally + +If you are working on the Python interface (`opengen`) or the website/docs, +it is best to use a dedicated Python virtual environment. + +### Set up a virtual environment + +From within `python/`, create and activate a virtual environment: + +```bash +cd python +python3 -m venv venv +source venv/bin/activate +python -m pip install --upgrade pip +pip install -e '.[dev]' +``` + +If you plan to run the benchmark suite as well, install the extra dependency: + +```bash +pip install "pytest-benchmark[histogram]" +``` + +### Run the Rust tests + +From the repository root, run: + +```bash +cargo test +``` + +This will run all unit tests, including the examples in the docstrings. +To run only the library unit tests, do: + +```bash +cargo test --lib +``` + +If you want a faster compile-only check, you can also run: + +```bash +cargo check +``` + +### Run the Python and code-generation tests + +From within `python/`, run the following tests after you activate `venv`. +The package's optional development dependencies include `pytest`, so the +recommended install command is: + +```bash +pip install -e '.[dev]' +``` + +You can keep using the existing `unittest` commands: + +```bash +# Activate venv first +python -W ignore test/test_constraints.py -v +python -W ignore test/test.py -v +python -W ignore test/test_ocp.py -v +``` + +or execute the same files with `pytest`: + +```bash +pytest test/test_constraints.py +pytest test/test.py +pytest test/test_ocp.py +``` + +The ROS2 tests should normally be run from an environment where ROS2 is already +installed and configured, for example a dedicated `micromamba` environment. +They should not be assumed to run from the plain `venv` above unless that +environment also contains a working ROS2 installation together with `ros2` and +`colcon`. + +For example: + +```bash +cd python +micromamba activate ros_env +pip install . +python -W ignore test/test_ros2.py -v +``` + +If ROS2 is not installed locally, you can still run the rest of the Python +test suite. + +### Run linting and extra checks + +From the repository root, it is also useful to run: + +```bash +cargo clippy --all-targets +``` + +Before opening a pull request, please run the tests that are relevant to the +part of the codebase you changed and make sure they pass locally. + +### Run the benchmarks + +The Python benchmark suite uses `pytest-benchmark`. If you have not already +installed the development dependencies, do: + +```bash +cd python +source venv/bin/activate +pip install -e '.[dev]' +pip install pytest-benchmark[histogram] +``` + +Before running the benchmarks, generate the benchmarkable optimizers: + +```bash +python test/prepare_benchmarks.py +``` + +Then run the benchmark suite with `pytest`: + +```bash +pytest test/benchmark_open.py --benchmark-only +``` + +To generate a histogram report, pass an output prefix: + +```bash +pytest test/benchmark_open.py --benchmark-histogram=out +``` + +This will produce a file such as `out.svg` in the current directory. + +### Produce coverage reports + +For Python coverage reports, activate your virtual environment in `python/` +and install `coverage` if needed: + +```bash +cd python +source venv/bin/activate +pip install coverage +``` + +Then run the main Python test suite under coverage and print a summary: + +```bash +coverage erase +coverage run --source=opengen test/test_constraints.py +coverage run -a --source=opengen test/test.py +coverage run -a --source=opengen test/test_ocp.py +coverage report -m +``` + +To generate an HTML report, run: + +```bash +coverage html +``` + +This writes the report to `python/htmlcov/index.html`. + +For Rust coverage reports, install `cargo-llvm-cov` once: + +```bash +cargo install cargo-llvm-cov +``` + +Then, from `rust/`, run: + +```bash +cargo llvm-cov --html +``` + +The HTML report will be written to `rust/target/llvm-cov/html/index.html`. diff --git a/docs/docker.md b/docs/content/docker.md similarity index 100% rename from docs/docker.md rename to docs/content/docker.md diff --git a/docs/example-nav.md b/docs/content/example-nav.md similarity index 100% rename from docs/example-nav.md rename to docs/content/example-nav.md diff --git a/docs/example-nmpc.md b/docs/content/example-nmpc.md similarity index 100% rename from docs/example-nmpc.md rename to docs/content/example-nmpc.md diff --git a/docs/example_bnp_py.md b/docs/content/example_bnp_py.md similarity index 100% rename from docs/example_bnp_py.md rename to docs/content/example_bnp_py.md diff --git a/docs/example_estimation_py.md b/docs/content/example_estimation_py.md similarity index 100% rename from docs/example_estimation_py.md rename to docs/content/example_estimation_py.md diff --git a/docs/example_invpend_py.md b/docs/content/example_invpend_py.md similarity index 100% rename from docs/example_invpend_py.md rename to docs/content/example_invpend_py.md diff --git a/docs/example_navigation_py.mdx b/docs/content/example_navigation_py.mdx similarity index 97% rename from docs/example_navigation_py.mdx rename to docs/content/example_navigation_py.mdx index 3e61824e..14682c00 100644 --- a/docs/example_navigation_py.mdx +++ b/docs/content/example_navigation_py.mdx @@ -154,9 +154,9 @@ sys.path.insert(1, './my_optimizers/navigation') import navigation solver = navigation.solver() -result = solver.run(p=[-1.0, 2.0, 0.0], - initial_guess=[1.0] * (nu*N)) -u_star = result.solution +response = solver.run(p=[-1.0, 2.0, 0.0], + initial_guess=[1.0] * (nu*N)) +u_star = response.get().solution # Plot solution @@ -289,5 +289,5 @@ problem = og.builder.Problem(u, p, cost).with_constraints(bounds) Then, when we use the optimiser we to provide the vector `p`. For example, if `z0 = (-1, 2, 0)` and `xref = 1`, `yref = 0.6`, `thetaref = 0.05` we use ```python -result = solver.run(p=[-1.0, 2.0, 0.0, 1.0, 0.6, 0.05]) +result = solver.run(p=[-1.0, 2.0, 0.0, 1.0, 0.6, 0.05]).get() ``` diff --git a/docs/example_navigation_ros_codegen.md b/docs/content/example_navigation_ros_codegen.md similarity index 99% rename from docs/example_navigation_ros_codegen.md rename to docs/content/example_navigation_ros_codegen.md index 2f090e43..4ff92d29 100644 --- a/docs/example_navigation_ros_codegen.md +++ b/docs/content/example_navigation_ros_codegen.md @@ -326,7 +326,7 @@ private: */ void updateInputData() { - init_penalty = (params.initial_penalty > 1.0) + init_penalty = (params.initial_penalty > std::numeric_limits::epsilon()) ? params.initial_penalty : ROS_NODE_MPC_CONTROLLER_DEFAULT_INITIAL_PENALTY; diff --git a/docs/example_rosenbrock_py.mdx b/docs/content/example_rosenbrock_py.mdx similarity index 100% rename from docs/example_rosenbrock_py.mdx rename to docs/content/example_rosenbrock_py.mdx diff --git a/docs/example_tanks_py.md b/docs/content/example_tanks_py.md similarity index 100% rename from docs/example_tanks_py.md rename to docs/content/example_tanks_py.md diff --git a/docs/faq.md b/docs/content/faq.md similarity index 100% rename from docs/faq.md rename to docs/content/faq.md diff --git a/docs/installation.md b/docs/content/installation.md similarity index 92% rename from docs/installation.md rename to docs/content/installation.md index 22d4e95f..836a00fa 100644 --- a/docs/installation.md +++ b/docs/content/installation.md @@ -66,8 +66,8 @@ need to create such an environment, then activate it, and lastly, install `opengen` as above using `pip`. That is, you need to run: ```console -virtualenv -p python3.6 venv36 -source venv36/bin/activate +virtualenv -p python3.13 venv +source venv/bin/activate pip install opengen ``` @@ -122,11 +122,11 @@ If you want to contribute to OpEn, you should rather *fork* OpEn on [github](htt ### Install opengen -Go intro `optimization-engine/open-codegen` and create a virtual environment: +Go into `optimization-engine/python` and create a virtual environment: ```sh -cd optimization-engine/open-codegen -virtualenv -p python3.6 venvopen +cd optimization-engine/python +virtualenv -p python3.13 venvopen source venvopen/bin/activate python setup.py install ``` @@ -139,7 +139,7 @@ Use the above virtual environment (`venvopen`) in PyCharm: - go to Run > Edit Configurations > Add new configuration - Script path: specify `main.py` -- Working dir: `optimization-engine/open-codegen/opengen` +- Working dir: `optimization-engine/python/opengen` - Python interpreter: `venvopen` ### Install OpEn in Rust @@ -153,6 +153,6 @@ cargo build ``` If you need to use `opengen` - the Python interface of OpEn - with a local -version of the Rust library, use `with_open_version(local_path=...)` in +version of the Rust library, use `with_open_version(local_path="/path/to/optimization-engine/rust")` in your code. Read the [advanced options](python-advanced#build-options) for details. diff --git a/docs/content/matlab-api.mdx b/docs/content/matlab-api.mdx new file mode 100644 index 00000000..c8f8d0ff --- /dev/null +++ b/docs/content/matlab-api.mdx @@ -0,0 +1,238 @@ +--- +id: matlab-api +title: MATLAB API +description: TCP-based MATLAB API for parametric and OCP-generated OpEn optimizers +--- + +# MATLAB API + +This page documents the current MATLAB API of OpEn. + +The current MATLAB interface lives in `matlab/api` and communicates with +optimizers that were generated in Python and expose a TCP server. + +If you are looking for the older MATLAB code-generation workflow, see the +[legacy MATLAB interface](./matlab-interface) and the +[MATLAB examples](./matlab-examples). + +## Overview + +The current MATLAB API supports: + +- TCP-based calls to standard parametric optimizers +- TCP-based calls to OCP-generated optimizers created with `opengen.ocp` +- Loading `optimizer_manifest.json` for OCP optimizers +- Automatic endpoint discovery from the sibling `optimizer.yml` +- Warm-start inputs: + - `InitialGuess` + - `InitialLagrangeMultipliers` + - `InitialPenalty` +- Health-check and shutdown operations through `ping()` and `kill()` + +The main MATLAB entry points are: + +- `OpEnTcpOptimizer` +- `createOpEnTcpOptimizer` + +:::important +The MATLAB API is a TCP client only. It does **not** start the optimizer +server; the generated optimizer must already be running. +::: + +## Getting Started + +Add the MATLAB API directory to your path: + +```matlab +addpath(fullfile(pwd, 'matlab', 'api')); +``` + +Create a client using either: + +- `OpEnTcpOptimizer(port)` +- `OpEnTcpOptimizer(port, ip)` +- `OpEnTcpOptimizer(ip, port)` + +For example: + +```matlab +client = OpEnTcpOptimizer(3301); +pong = client.ping(); +disp(pong.Pong); +``` + +## Parametric Optimizers + +For a standard parametric optimizer, call `solve` with the flat parameter +vector: + +```matlab +client = OpEnTcpOptimizer(3301); + +response = client.solve([2.0, 10.0]); + +if response.ok + disp(response.solution); + disp(response.cost); + disp(response.exit_status); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +You can also provide warm-start data: + +```matlab +response1 = client.solve([2.0, 10.0]); + +response2 = client.solve( ... + [2.0, 10.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +To stop the server gracefully: + +```matlab +client.kill(); +``` + +## OCP Optimizers + +For OCP-generated optimizers, the MATLAB API uses the optimizer manifest to +pack named parameter blocks into the flat parameter vector expected by the +underlying TCP solver. + +This matches the Python `GeneratedOptimizer.solve(...)` workflow conceptually, +but in MATLAB you pass the values as **name-value pairs**. + +### Loading an OCP Manifest + +If `optimizer_manifest.json` and `optimizer.yml` are in the same generated +optimizer directory, MATLAB can read the TCP endpoint automatically: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_single_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +disp(client.parameterNames()); +``` + +You can also override the TCP endpoint explicitly: + +```matlab +client = OpEnTcpOptimizer(3391, 'ManifestPath', manifestPath); +``` + +### Single-Shooting OCP Example + +The following example uses a generated OCP optimizer whose manifest defines +the parameter blocks `x0` and `xref`: + +```matlab +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +if response.ok + disp(response.solution); + disp(response.inputs); + disp(response.exit_status); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +If some manifest parameters have defaults, only the required ones need to be +provided: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_manifest_bindings', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +response = client.solve('x0', [1.0, 0.0]); +``` + +### Multiple-Shooting OCP Example + +For multiple-shooting OCPs, the MATLAB response also contains the reconstructed +state trajectory: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_multiple_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); + +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +disp(response.inputs); +disp(response.states); +``` + +### OCP Warm-Start Example + +Warm-start options can be combined with the named OCP parameters: + +```matlab +response1 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +response2 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +## Response Format + +All successful solver calls return the low-level solver fields produced by the +TCP server, together with a MATLAB-friendly `ok` flag. + +Typical fields include: + +- `ok` +- `solution` +- `cost` +- `exit_status` +- `solve_time_ms` +- `penalty` +- `num_outer_iterations` +- `num_inner_iterations` +- `last_problem_norm_fpr` +- `f1_infeasibility` +- `f2_norm` +- `lagrange_multipliers` + +For OCP optimizers, the response also includes: + +- `packed_parameter` +- `inputs` +- `states` for multiple-shooting formulations + +## Related Pages + +- [Python TCP/IP interface](./python-tcp-ip) +- [Python OCP guide](./python-ocp-1) +- [Legacy MATLAB interface](./matlab-interface) +- [MATLAB examples](./matlab-examples) diff --git a/docs/matlab-examples.md b/docs/content/matlab-examples.md similarity index 100% rename from docs/matlab-examples.md rename to docs/content/matlab-examples.md diff --git a/docs/matlab-interface.md b/docs/content/matlab-interface.md similarity index 100% rename from docs/matlab-interface.md rename to docs/content/matlab-interface.md diff --git a/docs/open-intro.md b/docs/content/open-intro.md similarity index 100% rename from docs/open-intro.md rename to docs/content/open-intro.md diff --git a/docs/openrust-alm.md b/docs/content/openrust-alm.md similarity index 98% rename from docs/openrust-alm.md rename to docs/content/openrust-alm.md index 44d0ad75..f9f6bf8f 100644 --- a/docs/openrust-alm.md +++ b/docs/content/openrust-alm.md @@ -115,7 +115,7 @@ fn main() { } ``` -A complete example is available at [`pm.rs`](https://github.com/alphaville/optimization-engine/blob/master/examples/pm.rs). +A complete example is available at [`pm.rs`](https://github.com/alphaville/optimization-engine/blob/master/rust/examples/pm.rs). ## Augmented Lagrangian @@ -323,7 +323,7 @@ the norm-distance of $F_1(u)$ from $C$. We see that this is indeed below $\delta ## Additional Examples -See [`alm_pm.rs`](https://github.com/alphaville/optimization-engine/blob/master/examples/alm_pm.rs). +See [`alm_pm.rs`](https://github.com/alphaville/optimization-engine/blob/master/rust/examples/alm_pm.rs). [`AlmOptimizer`]: https://docs.rs/optimization_engine/*/optimization_engine/alm/struct.AlmOptimizer.html [`AlmFactory`]: https://docs.rs/optimization_engine/*/optimization_engine/alm/struct.AlmFactory.html diff --git a/docs/content/openrust-arithmetic.mdx b/docs/content/openrust-arithmetic.mdx new file mode 100644 index 00000000..9d00c77b --- /dev/null +++ b/docs/content/openrust-arithmetic.mdx @@ -0,0 +1,175 @@ +--- +id: openrust-arithmetic +title: Single and double precision +description: OpEn with f32 and f64 number types +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note Info +The functionality presented here was introduced in OpEn version [`0.12.0`](https://crates.io/crates/optimization_engine/0.12.0-alpha.1). +The new API is fully backward-compatible with previous versions of OpEn +with `f64` being the default scalar type. +::: + +## Overview + +OpEn's Rust API now supports both `f64` and `f32`. Note that with `f32` +you may encounter issues with convergence, especially if you are solving +particularly ill-conditioned problems. On the other hand, `f32` is sometimes +the preferred type for embedded applications and can lead to lower +solve times. + +When using `f32`: (i) make sure the problem is properly scaled, +and (ii) you may want to opt for less demanding tolerances. + +## PANOC example + +Below you can see two examples of using the solver with single and double +precision arithmetic. + + + + + +```rust +use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; +use optimization_engine::panoc::PANOCOptimizer; + +let tolerance = 1e-4_f32; +let lbfgs_memory = 10; +let radius = 1.0_f32; + +let bounds = constraints::Ball2::new(None, radius); + +let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0_f32; + grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; + Ok(()) +}; + +let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = PANOCCache::::new(2, tolerance, lbfgs_memory); +let mut optimizer = PANOCOptimizer::new(problem, &mut cache); + +let mut u = [0.0_f32, 0.0_f32]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + + + + +```rust +use optimization_engine::{constraints, panoc::PANOCCache, Problem, SolverError}; +use optimization_engine::panoc::PANOCOptimizer; + +let tolerance = 1e-6; +let lbfgs_memory = 10; +let radius = 1.0; + +let bounds = constraints::Ball2::new(None, radius); + +let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) +}; + +let f = |u: &[f64], cost: &mut f64| -> Result<(), SolverError> { + *cost = 0.5 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = PANOCCache::new(2, tolerance, lbfgs_memory); +let mut optimizer = PANOCOptimizer::new(problem, &mut cache); + +let mut u = [0.0, 0.0]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + + + + +To use single precision, make sure that the following are all using `f32`: + +- the initial guess `u` +- the closures for the cost and gradient +- the constraints +- the cache +- any tolerances and numerical constants +- You are explicitly using `PANOCCache::` as in the above example + +## Example with FBS + +The same pattern applies to other solvers. + +```rust +use optimization_engine::{constraints, Problem, SolverError}; +use optimization_engine::fbs::{FBSCache, FBSOptimizer}; +use std::num::NonZeroUsize; + +let bounds = constraints::Ball2::new(None, 0.2_f32); + +let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + 1.0_f32; + grad[1] = u[0] + 2.0_f32 * u[1] - 1.0_f32; + Ok(()) +}; + +let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = u[0] * u[0] + 2.0_f32 * u[1] * u[1] + u[0] - u[1] + 3.0_f32; + Ok(()) +}; + +let problem = Problem::new(&bounds, df, f); +let mut cache = FBSCache::::new(NonZeroUsize::new(2).unwrap(), 0.1_f32, 1e-6_f32); +let mut optimizer = FBSOptimizer::new(problem, &mut cache); + +let mut u = [0.0_f32, 0.0_f32]; +let status = optimizer.solve(&mut u).unwrap(); +assert!(status.has_converged()); +``` + +## Example with ALM + +ALM also supports both precisions. As with PANOC and FBS, the scalar type should be chosen once and then used consistently throughout the ALM problem, cache, mappings, and tolerances. + +For example, if you use: + +- `AlmCache::` +- `PANOCCache::` +- `Ball2::` +- closures of type `|u: &[f32], ...|` + +then the whole ALM solve runs in single precision. + +If instead you use plain `f64` literals and `&[f64]` closures, the solver runs in double precision. This is the default behaviour. + +## Type inference tips + +Rust usually infers the scalar type correctly, but explicit annotations are often helpful for `f32`. + +Good ways to make `f32` intent clear are: + +- suffix literals, for example `1.0_f32` and `1e-4_f32` +- annotate vectors and arrays, for example `let mut u = [0.0_f32; 2];` +- annotate caches explicitly, for example `PANOCCache::::new(...)` +- annotate closure arguments, for example `|u: &[f32], grad: &mut [f32]|` + +:::warning Important rule: do not mix `f32` and `f64` +For example, the following combinations are problematic: + +- `u: &[f32]` with a cost function writing to `&mut f64` +- `Ball2::new(None, 1.0_f64)` together with `PANOCCache::` + +Choose one scalar type per optimization problem and use it everywhere. +::: diff --git a/docs/openrust-basic.md b/docs/content/openrust-basic.md similarity index 93% rename from docs/openrust-basic.md rename to docs/content/openrust-basic.md index edaaaf13..7a1ec0bb 100644 --- a/docs/openrust-basic.md +++ b/docs/content/openrust-basic.md @@ -25,6 +25,13 @@ The definition of an optimization problem consists in specifying the following t - the set of constraints, $U$, as an implementation of a trait ### Cost functions + +:::note Info +Throughout this document we will be using `f64`, which is the default +scalar type. However, OpEn now supports `f32` as well. +::: + + The **cost function** `f` is a Rust function of type `|u: &[f64], cost: &mut f64| -> Result<(), SolverError>`. The first argument, `u`, is the argument of the function. The second argument, is a mutable reference to the result (cost). The function returns a *status code* of the type `Result<(), SolverError>` and the status code `Ok(())` means that the computation was successful. Other status codes can be used to encode errors/exceptions as defined in the [`SolverError`] enum. As an example, consider the cost function $f:\mathbb{R}^2\to\mathbb{R}$ that maps a two-dimensional @@ -33,8 +40,8 @@ vector $u$ to $f(u) = 5 u_1 - u_2^2$. This will be: ```rust let f = |u: &[f64], c: &mut f64| -> Result<(), SolverError> { - *c = 5.0 * u[0] - u[1].powi(2); - Ok(()) + *c = 5.0 * u[0] - u[1].powi(2); + Ok(()) }; ``` @@ -50,9 +57,9 @@ This function can be implemented as follows: ```rust let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { - grad[0] = 5.0; - grad[1] = -2.0*u[1]; - Ok(()) + grad[0] = 5.0; + grad[1] = -2.0*u[1]; + Ok(()) }; ``` @@ -81,6 +88,22 @@ Constraints implement the namesake trait, [`Constraint`]. Implementations of [`C These are the most common constraints in practice. +:::note Cartesian products in Rust +The Rust API for [`CartesianProduct`] uses cumulative lengths, equivalently +exclusive end indices. + +For example, if `x0 = x[0..3]` and `x1 = x[3..5]`, then you would write: + +```rust +let cart_prod = constraints::CartesianProduct::new() + .add_constraint(3, c0) + .add_constraint(5, c1); +``` + +This differs from the Python API, which uses inclusive last indices such as +`[2, 4]` for the same two segments. +::: + The construction of a constraint is very easy. Here is an example of a Euclidean ball centered at the origin with given radius: ```rust @@ -208,11 +231,12 @@ fn main() { .with_max_iter(max_iters); // Invoke the solver - let status = panoc.solve(&mut u); + let status = panoc.solve(&mut u).unwrap(); + assert!(status.has_converged()); } ``` -This example can be found in [`examples/panoc_ex1.rs`](https://github.com/alphaville/optimization-engine/blob/master/examples/panoc_ex1.rs). +This example can be found in [`examples/panoc_ex1.rs`](https://github.com/alphaville/optimization-engine/blob/master/rust/examples/panoc_ex1.rs). ## Solving parametric problems @@ -290,9 +314,9 @@ fn main() { } }; - // define the bounds at every iteration - let bounds = constraints::Ball2::new(None, radius); - + // define the bounds at every iteration + let bounds = constraints::Ball2::new(None, radius); + // the problem definition is updated at every iteration let problem = Problem::new(&bounds, df, f); @@ -353,8 +377,8 @@ the imposition of a maximum allowed duration, the exit status will be ## Examples -- [`panoc_ex1.rs`](https://github.com/alphaville/optimization-engine/blob/master/examples/panoc_ex1.rs) -- [`panoc_ex2.rs`](https://github.com/alphaville/optimization-engine/blob/master/examples/panoc_ex2.rs) +- [`panoc_ex1.rs`](https://github.com/alphaville/optimization-engine/blob/master/rust/examples/panoc_ex1.rs) +- [`panoc_ex2.rs`](https://github.com/alphaville/optimization-engine/blob/master/rust/examples/panoc_ex2.rs) diff --git a/docs/openrust-features.mdx b/docs/content/openrust-features.mdx similarity index 86% rename from docs/openrust-features.mdx rename to docs/content/openrust-features.mdx index e047113d..11552814 100644 --- a/docs/openrust-features.mdx +++ b/docs/content/openrust-features.mdx @@ -42,6 +42,12 @@ You cannot use both `rp` and `jem`. ### WebAssembly Support +:::warning +In version 0.12.0 of OpEn, wasm was replaced by [`web-time`](https://crates.io/crates/web-time), +so this feature is not supported any more. OpEn can still be used in WebAssembly without +the need to specify any features. +::: + If you intend to use OpEn in WebAssembly you need to use the feature `wasm`. ```.toml diff --git a/docs/python-advanced.mdx b/docs/content/python-advanced.mdx similarity index 99% rename from docs/python-advanced.mdx rename to docs/content/python-advanced.mdx index a9b5a8fa..0eb6dc5b 100644 --- a/docs/python-advanced.mdx +++ b/docs/content/python-advanced.mdx @@ -200,7 +200,7 @@ All build options are shown below | `with_target_system` | Target system (to be used when you need to cross-compile) | | `with_build_c_bindings` | Enalbe generation of C/C++ bindings | | `with_rebuild` | Whether to do a clean build | -| `with_open_version` | Use a certain version of OpEn (see [all versions]), e.g., `with_open_version("0.6.0")`, or a local version of OpEn (this is useful when you want to download the latest version of OpEn from github). You can do so using `with_open_version(local_path="/path/to/open/")`. | +| `with_open_version` | Use a certain version of OpEn (see [all versions]), e.g., `with_open_version("0.6.0")`, or a local version of OpEn (this is useful when you want to download the latest version of OpEn from github). You can do so using `with_open_version(local_path="/path/to/open/rust")`. | |`with_allocator` | Available in `opengen >= 0.6.6`. Compile with a different memory allocator. The available allocators are the entries of `RustAllocator`. OpEn currently supports [Jemalloc](https://github.com/gnzlbg/jemallocator) and [Rpmalloc](https://github.com/EmbarkStudios/rpmalloc-rs).| [all versions]: https://crates.io/crates/optimization_engine/versions diff --git a/docs/python-bindings.md b/docs/content/python-bindings.md similarity index 74% rename from docs/python-bindings.md rename to docs/content/python-bindings.md index eeb76a13..c4157bfc 100644 --- a/docs/python-bindings.md +++ b/docs/content/python-bindings.md @@ -70,6 +70,8 @@ so you will have to add it before you can import the optimizer. This can be done very easily: ```python +import sys + sys.path.insert(1, './my_optimizers/rosenbrock') import rosenbrock ``` @@ -78,13 +80,17 @@ Then you will be able to use it as follows: ```python solver = rosenbrock.solver() -result = solver.run(p=[20., 1.]) +response = solver.run(p=[20., 1.]) +if not response.is_ok(): + raise RuntimeError(response.get().message) + +result = response.get() u_star = result.solution ``` In the first line, `solver = rosenbrock.solver()`, we obtain an instance of `Solver`, which can be used to solve parametric optimization problems. -In the second line, `result = solver.run(p=[20., 1.])`, we call the solver +In the second line, `response = solver.run(p=[20., 1.])`, we call the solver with parameter $p=(20, 1)$. Method `run` accepts another three optional arguments, namely: @@ -92,8 +98,32 @@ arguments, namely: - `initial_lagrange_multipliers`, and - `initial_penalty` -The solver returns an object of type `OptimizerSolution` with the following -properties: +The solver returns an object of type `SolverResponse`, similar to the TCP +interface. First call `response.is_ok()` to determine whether the call +succeeded, then call `response.get()` to obtain either a `SolverStatus` +object or a `SolverError`. This mirrors the Python TCP interface, but without +the socket transport layer. + +```python +response = solver.run(p=[20., 1.]) +if response.is_ok(): + result = response.get() + u_star = result.solution +else: + error = response.get() + print(error.code, error.message) +``` + +The returned objects also implement `__repr__`, which makes them convenient to +inspect in a Python REPL or notebook: + +```python +response = solver.run(p=[20., 1.]) +print(response) +print(response.get()) +``` + +The `SolverStatus` object exposes the following properties: | Property | Explanation | @@ -112,6 +142,25 @@ properties: These are the same properties as those of `opengen.tcp.SolverStatus`. +For backward compatibility, the generated module also exposes +`OptimizerSolution` as an alias of `SolverStatus`. + +If the call fails, `response.get()` returns a `SolverError` with: + +| Property | Explanation | +|-----------|-------------| +| `code` | Error code, aligned with the TCP interface | +| `message` | Detailed error message | + +The most common error codes are: + +| Code | Meaning | +|------|---------| +| `1600` | Initial guess has incompatible dimensions | +| `1700` | Wrong dimension of initial Lagrange multipliers | +| `2000` | Problem solution failed; the message includes the solver-side reason | +| `3003` | Wrong number of parameters | + ## Importing optimizer with variable name @@ -122,6 +171,9 @@ The limitation of this syntax is that it makes it difficult to change the name o A better syntax would be: ```python +import os +import sys + optimizers_dir = "my_optimizers" optimizer_name = "rosenbrock" sys.path.insert(1, os.path.join(optimizers_dir, optimizer_name)) diff --git a/docs/python-c.mdx b/docs/content/python-c.mdx similarity index 71% rename from docs/python-c.mdx rename to docs/content/python-c.mdx index bbe387e8..3de4d262 100644 --- a/docs/python-c.mdx +++ b/docs/content/python-c.mdx @@ -77,7 +77,7 @@ The generated C/C++ bindings are in the auto-generated solver library. In particular * The header files are at `the_optimizer/the_optimizer_bindings.{h,hpp}` -* The static and dynamical library files are located in `the_optimizer/target/{debug,release}` (depending on whether it was a [*debug*] or [*release*] build) +* The static and dynamic library files are located in `the_optimizer/target/{debug,release}` (depending on whether it was a [*debug*] or [*release*] build) Note that `the_optimizer` is the name given to the optimizer in the Python codegen above. @@ -112,13 +112,16 @@ typedef struct exampleCache exampleCache; typedef struct { exampleExitStatus exit_status; + int error_code; + char error_message[1024]; unsigned long num_outer_iterations; unsigned long num_inner_iterations; - double last_problem_norm_fpr; + double last_problem_norm_fpr; unsigned long long solve_time_ns; double penalty; double delta_y_norm_over_c; double f2_norm; + double cost; const double *lagrange; } exampleSolverStatus; @@ -137,10 +140,61 @@ This is designed to follow a new-use-free pattern. Function `{optimizer-name}_new` will allocate memory and setup a new solver instance and can be used to create as many solvers as necessary. Each solver instance can be used with `{optimizer-name}_solve` to solve the corresponding problem as many times as needed. -Parameter `u` is the starting guess and also the return of the decision variables and `params` is the array of static parameters. The size of `u` and `params` are `{optimizer-name}_NUM_DECISION_VARIABLES` and `{optimizer-name}_NUM_PARAMETERS` respectively. +Parameter `u` is the starting guess and also the return of the decision variables and `params` is the array of static parameters. The size of `u` and `params` are `{optimizer-name}_NUM_DECISION_VARIABLES` and `{optimizer-name}_NUM_PARAMETERS` respectively. Arguments `y0` and `c0` are optional: pass `0` (or `NULL`) to use the default initial Lagrange multipliers and penalty parameter. + +The returned `exampleSolverStatus` always contains a coarse solver outcome in +`exit_status`. On success it also contains `error_code = 0` and an empty +`error_message`. If the solver fails internally, the bindings return a +structured error report with a nonzero `error_code` and a descriptive +`error_message`. Finally, when done with the solver, use `{optimizer-name}_free` to release the memory allocated by `{optimizer-name}_new`. +## Handling errors + +The C bindings always return a value of type `exampleSolverStatus`. This means +that solver calls do not report failure by returning `NULL` or by using a +separate exception-like mechanism. Instead, callers should inspect both +`exit_status` and `error_code`. + +- `error_code = 0` means the solver call completed without an internal error +- `error_code != 0` means the solver failed and `error_message` contains a + descriptive explanation +- `exit_status` gives the coarse outcome of the solve attempt, such as + converged, reached the iteration limit, or failed because of a numerical + issue + +The recommended pattern is: + +1. Call `{optimizer-name}_solve(...)` +2. Check whether `status.error_code != 0` +3. If so, report `status.error_message` and treat the call as failed +4. Otherwise, inspect `status.exit_status` to determine whether the solver + converged or returned the best available non-converged iterate + +For example: + +```c +exampleSolverStatus status = example_solve(cache, u, p, 0, &initial_penalty); + +if (status.error_code != 0) { + fprintf(stderr, "Solver failed: [%d] %s\n", + status.error_code, status.error_message); + example_free(cache); + return EXIT_FAILURE; +} + +if (status.exit_status != exampleConverged) { + fprintf(stderr, "Warning: solver did not converge fully\n"); +} +``` + +The generated C example follows exactly this pattern. + +At the ABI level, callers are still responsible for passing valid pointers and +correctly sized arrays for `u`, `params`, and optional arguments such as `y0`. +Those are contract violations, not recoverable solver errors. + ## Using the bindings in an app @@ -155,26 +209,41 @@ The auto-generated example has the following form: /* File: the_optimizer/example_optimizer.c */ #include +#include #include "example_bindings.h" -int main() { - double p[EXAMPLE_NUM_PARAMETERS] = {1.0, 10.0}; // parameter +int main(void) { + double p[EXAMPLE_NUM_PARAMETERS] = {0}; // parameter double u[EXAMPLE_NUM_DECISION_VARIABLES] = {0}; // initial guess + double initial_penalty = 15.0; exampleCache *cache = example_new(); - exampleSolverStatus status = example_solve(cache, u, p); - example_free(cache); - - for (int i = 0; i < EXAMPLE_NUM_DECISION_VARIABLES; ++i) { - printf("u[%d] = %g\n", i, u[i]); + if (cache == NULL) { + fprintf(stderr, "Could not allocate solver cache\n"); + return EXIT_FAILURE; } + exampleSolverStatus status = example_solve(cache, u, p, 0, &initial_penalty); + printf("exit status = %d\n", status.exit_status); + printf("error code = %d\n", status.error_code); + printf("error message = %s\n", status.error_message); printf("iterations = %lu\n", status.num_inner_iterations); printf("outer iterations = %lu\n", status.num_outer_iterations); printf("solve time = %f ms\n", (double)status.solve_time_ns / 1e6); - return 0; + if (status.error_code != 0) { + example_free(cache); + return EXIT_FAILURE; + } + + for (int i = 0; i < EXAMPLE_NUM_DECISION_VARIABLES; ++i) { + printf("u[%d] = %g\n", i, u[i]); + } + + example_free(cache); + + return EXIT_SUCCESS; } ``` @@ -185,19 +254,18 @@ int main() { To compile your C program you need to link to the auto-generated C bindings (see [next section](#compile-your-own-code)). However, OpEn generates automatically a `CMakeLists.txt` file -to facilitate the compilation/linking procedure. To build the -auto-generated example run +to facilitate the compilation/linking procedure. A typical build is: ```bash -cmake . -make +cmake -S . -B build +cmake --build build ``` -once you build your optimizer you can run the executable (`optimizer`) -with +Once you build your optimizer you can run the executable (`optimizer`) +with: ```bash -make run +cmake --build build --target run ``` #### Compile your own code @@ -260,14 +328,18 @@ LD_LIBRARY_PATH=./target/release ./optimizer The output looks like this: ```text +exit status = 0 +error code = 0 +error message = +iterations = 69 +outer iterations = 5 +solve time = 0.140401 ms u[0] = 0.654738 u[1] = 0.982045 u[2] = 0.98416 u[3] = 0.984188 u[4] = 0.969986 -exit status = 0 -iterations = 69 -outer iterations = 5 -solve time = 0.140401 ms -``` ``` + +If `error_code` is nonzero, the solver failed to produce a valid result and +`error_message` contains the propagated reason from the generated Rust solver. diff --git a/docs/python-examples.md b/docs/content/python-examples.md similarity index 100% rename from docs/python-examples.md rename to docs/content/python-examples.md diff --git a/docs/python-interface.md b/docs/content/python-interface.md similarity index 98% rename from docs/python-interface.md rename to docs/content/python-interface.md index fb798a6b..11be40eb 100644 --- a/docs/python-interface.md +++ b/docs/content/python-interface.md @@ -99,6 +99,14 @@ following types of constraints: A Cartesian product is a set $C = C_0 \times C_1 \times \ldots \times C_{s}$. In $\mathbb{R}^n$, a vector $x$ can be segmented as $$x=(x_{(0)}, x_{(1)}, \ldots, x_{(s)}),$$ into $s$ segments, $x_{(i)}\in\mathbb{R}^{m_i}$. The constraint $x \in C$ means $$x_{(i)} \in C_i,$$ for all $i=0,\ldots, s$. For example, consider the vector $x = ({\color{blue}{x_0}}, {\color{blue}{x_1}}, {\color{red}{x_2}}, {\color{red}{x_3}}, {\color{red}{x_4}})$; define the segments $$x_{(0)} = ({\color{blue}{x_0}}, {\color{blue}{x_1}}),\ x_{(1)} = ({\color{red}{x_2}}, {\color{red}{x_3}}, {\color{red}{x_4}})$$ These can be identified by the indices `1` and `4` (last indices of segments). +:::note +In Python, `CartesianProduct` uses inclusive last indices for each segment. +For example, `segment_ids = [1, 4]` means the segments `x[0:2]` and `x[2:5]`. + +This is different from the Rust API, where Cartesian products are specified +using cumulative lengths / exclusive end indices. +::: + Let us give an example: we will define the Cartesian product of a ball with a rectangle. Suppose that $U$ is a Euclidean ball with radius $r=1.5$ centered at diff --git a/docs/python-ocp-1.mdx b/docs/content/python-ocp-1.mdx similarity index 100% rename from docs/python-ocp-1.mdx rename to docs/content/python-ocp-1.mdx diff --git a/docs/python-ocp-2.md b/docs/content/python-ocp-2.md similarity index 100% rename from docs/python-ocp-2.md rename to docs/content/python-ocp-2.md diff --git a/docs/python-ocp-3.md b/docs/content/python-ocp-3.md similarity index 100% rename from docs/python-ocp-3.md rename to docs/content/python-ocp-3.md diff --git a/docs/python-ocp-4.md b/docs/content/python-ocp-4.md similarity index 100% rename from docs/python-ocp-4.md rename to docs/content/python-ocp-4.md diff --git a/docs/python-ros.md b/docs/content/python-ros.md similarity index 89% rename from docs/python-ros.md rename to docs/content/python-ros.md index f06fa078..c2e12d45 100644 --- a/docs/python-ros.md +++ b/docs/content/python-ros.md @@ -2,9 +2,13 @@ id: python-ros title: Generation of ROS packages sidebar_label: ROS packages -description: Code generation for ROS packages using OpEn in Python +description: Code generation for ROS packages using opengen --- +:::note Info +Opengen now supports [ROS2](./python-ros2). +::: + ## What is ROS The [Robot Operating System](https://www.ros.org/) (ROS) is a collection of tools and libraries, as well as a framework that facilitates the data exchange among them. ROS is popular in the robotics community and is used to design and operate modern robotic systems. @@ -21,11 +25,11 @@ OpEn (with opengen version `0.5.0` or newer) can generate ready-to-use ROS packa The input parameters message follows the following specification: -``` +```msg float64[] parameter # parameter p (mandatory) float64[] initial_guess # u0 (optional/recommended) float64[] initial_y # y0 (optional) -float64 initial_penalty # initial penalty (optional) +float64 initial_penalty # positive initial penalty (optional) ``` An example of such a message is @@ -37,12 +41,35 @@ initial_penalty: 1000, initial_y: [] ``` +`initial_penalty` is applied whenever it is strictly greater than a small +positive epsilon. If it is omitted, zero, or too close to zero, the generated +default initial penalty is used instead. + #### Result -A result message (`OptimizationResult`) contains the solution of the parametric optimization problem and details about the solution procedure such as the number of inner/outer iterations and the solution time. The result of an auto-generated OpEn node is a message with the following specification: +A result message (`OptimizationResult`) contains the solution of the parametric optimization problem and details about the solution procedure such as the number of inner/outer iterations and the solution time. +An example of such a message is given below: +```yaml +solution: [0.5317, 0.7975, 0.6761, 0.7760, 0.5214] +inner_iterations: 159 +outer_iterations: 5 +status: 0 +norm_fpr: 2.142283848e-06 +penalty: 111250.0 +lagrange_multipliers: [] +infeasibility_f1: 0.0 +infeasibility_f2: 2.44131958366e-05 +solve_time_ms: 2.665959 ``` + +
+Specification of OptimizationResult + +The message `OptimizationResult` is described by the following message file + +```msg # Constants match the enumeration of status codes uint8 STATUS_CONVERGED=0 uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 @@ -51,8 +78,8 @@ uint8 STATUS_NOT_CONVERGED_COST=3 uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 float64[] solution # optimizer (solution) -uint8 inner_iterations # number of inner iterations -uint16 outer_iterations # number of outer iterations +uint64 inner_iterations # number of inner iterations +uint64 outer_iterations # number of outer iterations uint8 status # status code float64 cost # cost value at solution float64 norm_fpr # norm of FPR of last inner problem @@ -63,20 +90,7 @@ float64 infeasibility_f2 # infeasibility wrt F2 float64 solve_time_ms # solution time in ms ``` -An example of such a message is given below: - -```yaml -solution: [0.5317, 0.7975, 0.6761, 0.7760, 0.5214] -inner_iterations: 159 -outer_iterations: 5 -status: 0 -norm_fpr: 2.142283848e-06 -penalty: 111250.0 -lagrange_multipliers: [] -infeasibility_f1: 0.0 -infeasibility_f2: 2.44131958366e-05 -solve_time_ms: 2.665959 -``` +
### Configuration Parameters @@ -172,4 +186,4 @@ OpEn generates a README file that you will find in `my_optimizers/rosenbrock/par - Compile with `catkin_make` - Run using the auto-generated launch file -Check out the auto-generated README.md file for details. \ No newline at end of file +Check out the auto-generated README.md file for details. diff --git a/docs/content/python-ros2.mdx b/docs/content/python-ros2.mdx new file mode 100644 index 00000000..eff65771 --- /dev/null +++ b/docs/content/python-ros2.mdx @@ -0,0 +1,246 @@ +--- +id: python-ros2 +title: Generation of ROS2 packages +sidebar_label: ROS2 packages +description: Code generation for ROS2 packages using opengen +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note Info +The functionality presented here was introduced in `opengen` version [`0.11.0a1`](https://pypi.org/project/opengen/#history). +::: + +## What is ROS2 + +[ROS2](https://docs.ros.org/en/jazzy/index.html) is the successor of the Robot Operating System (ROS). It provides tools, libraries, and communication mechanisms that make it easier to build distributed robotic applications. + +In ROS2, functionality is organised in **nodes** which exchange data by publishing and subscribing to **topics** using typed **messages**. This makes ROS2 a natural fit for connecting optimizers, controllers, estimators, and sensors in robotics systems. + +## ROS2 + OpEn + +OpEn can generate ready-to-use ROS2 packages directly from a parametric optimizer. The generated package exposes the optimizer as a ROS2 node, includes the required message definitions, and provides the files needed to build, configure, and launch it inside a ROS2 workspace. + +The input message matches the [ROS1 package documentation](./python-ros#messages). The ROS2 output message additionally includes `error_code` and `error_message` fields so that invalid requests and solver failures can be reported with more detail. + +For scalar warm-starting, `initial_penalty` is used whenever it is strictly +greater than a small positive epsilon. If it is omitted, zero, or too close to +zero, the generated default initial penalty is used instead. + +## Configuration Parameters + +The configuration parameters are the same as in the [ROS1 package documentation](./python-ros#configuration-parameters): you can configure the node rate, the input topic name, and the output topic name. + +In ROS2, these settings are stored using the ROS2 parameter-file format in `config/open_params.yaml`: + +```yaml +/**: + ros__parameters: + result_topic: "result" + params_topic: "parameters" + rate: 10.0 +``` + +## Code generation + +To generate a ROS2 package from Python, create a `RosConfiguration` object and attach it to the build configuration using `.with_ros2(...)`. + +### Example + +```py +import opengen as og +import casadi.casadi as cs + +u = cs.SX.sym("u", 5) +p = cs.SX.sym("p", 2) +phi = og.functions.rosenbrock(u, p) + +problem = og.builder.Problem(u, p, phi) \ + .with_constraints(og.constraints.Ball2(None, 1.5)) + +meta = og.config.OptimizerMeta() \ + .with_optimizer_name("rosenbrock_ros2") + +ros2_config = og.config.RosConfiguration() \ + .with_package_name("parametric_optimizer_ros2") \ + .with_node_name("open_node_ros2") \ + .with_rate(10) + +build_config = og.config.BuildConfiguration() \ + .with_build_directory("my_optimizers") \ + .with_ros2(ros2_config) + +builder = og.builder.OpEnOptimizerBuilder(problem, meta, build_config) +builder.build() +``` + +Note the use of `with_ros2` and note that `RosConfiguration` is the same config +class as in [ROS1](./python-ros). +This generates the optimizer in `my_optimizers/rosenbrock_ros2`, and the ROS2 +package is created inside that directory as `parametric_optimizer_ros2`. +You can inspect the auto-generated ROS2 package [here](https://github.com/alphaville/open_ros/tree/master/ros2). + +## Use the auto-generated ROS2 package + +OpEn generates a `README.md` file inside the generated ROS2 package with detailed instructions. In brief, the workflow is: + +1. Build the package with `colcon build` +2. Source the generated workspace setup script +3. Run the node with `ros2 run` +4. Publish optimization requests on the input topic and read results from the output topic + +For example, from inside the generated package directory: + + + + +```bash +colcon build --packages-select parametric_optimizer_ros2 +source install/setup.bash +ros2 run parametric_optimizer_ros2 open_node_ros2 +``` + + + + +```bash +colcon build --packages-select parametric_optimizer_ros2 +source install/setup.zsh +ros2 run parametric_optimizer_ros2 open_node_ros2 +``` + + + + +In a second terminal: + + + + +```bash +source install/setup.bash +ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" +ros2 topic echo /result --once +``` + + + + +```bash +source install/setup.zsh +ros2 topic pub --once /parameters parametric_optimizer_ros2/msg/OptimizationParameters \ + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}" +ros2 topic echo /result --once +``` + + + + +If ROS2 cannot write to its default log directory, set an explicit writable log path before running the node: + +```bash +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + +:::note Troubleshooting +On some systems, the generated node may start but not appear in the ROS2 graph. If `ros2 topic pub` keeps printing `Waiting for at least 1 matching subscription(s)...`, set +`RMW_IMPLEMENTATION=rmw_fastrtps_cpp` in both terminals before sourcing the generated workspace and running any `ros2` commands: + +```bash +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +``` + +This should only be needed if ROS2 discovery is not working correctly with your default middleware. +::: + +To verify that the node is visible, you can run: + +```bash +ros2 node list --no-daemon --spin-time 5 +ros2 topic list --no-daemon --spin-time 5 +``` + +The first command should list the running node, for example `/open_node_ros2`. The second should list the available topics, including `/parameters` and `/result`. + +To read a single optimizer response, you can use: + +```bash +ros2 topic echo /result --once +``` + +This subscribes to the result topic, prints one `OptimizationResult` message, and then exits. +The above command will return a message that looks as follows + +```yaml +solution: +- 0.5352476095477849 +- 0.8028586510585609 +- 0.6747818561706652 +- 0.7747513439588263 +- 0.5131839675113338 +inner_iterations: 41 +outer_iterations: 6 +status: 0 +error_code: 0 +error_message: '' +cost: 1.1656771801253916 +norm_fpr: 2.1973496274068953e-05 +penalty: 150000.0 +lagrange_multipliers: [] +infeasibility_f1: 0.0 +infeasibility_f2: 3.3074097972366455e-05 +solve_time_ms: 0.2175 +``` + +
+ See the specification of `OptimizationResult` + ```msg + # Constants match the enumeration of status codes + uint8 STATUS_CONVERGED=0 + uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 + uint8 STATUS_NOT_CONVERGED_OUT_OF_TIME=2 + uint8 STATUS_NOT_CONVERGED_COST=3 + uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 + uint8 STATUS_INVALID_REQUEST=5 + + float64[] solution # solution + uint64 inner_iterations # number of inner iterations + uint64 outer_iterations # number of outer iterations + uint8 status # coarse status code + uint16 error_code # detailed error code (0 on success) + string error_message # detailed error message (empty on success) + float64 cost # cost value at solution + float64 norm_fpr # norm of FPR of last inner problem + float64 penalty # penalty value + float64[] lagrange_multipliers # vector of Lagrange multipliers + float64 infeasibility_f1 # infeasibility wrt F1 + float64 infeasibility_f2 # infeasibility wrt F2 + float64 solve_time_ms # solution time in ms + ``` +
+ +If the request is invalid, the node publishes a result with `status: 5` (`STATUS_INVALID_REQUEST`) and fills `error_code` and `error_message`. For example, if the parameter vector has the wrong length, `error_code` is `3003` and `error_message` explains the mismatch. + +Instead of starting the node with `ros2 run`, you can also use the generated launch file: + +```bash +ros2 launch parametric_optimizer_ros2 open_optimizer.launch.py +``` + +The launch file starts the auto-generated node and loads its parameters from `config/open_params.yaml`, where you can adjust settings such as the input topic, output topic, and node rate. + + +## Inside the ROS2 package + +The auto-generated ROS2 package contains everything needed to build and run the optimizer as a ROS2 node. + +- `msg/` contains the auto-generated message definitions, including `OptimizationParameters.msg` and `OptimizationResult.msg` +- `src/` contains the C++ node implementation that wraps the optimizer +- `include/` contains the corresponding C++ headers +- `config/open_params.yaml` stores runtime parameters such as the input topic, output topic, and node rate +- `launch/open_optimizer.launch.py` provides a ready-to-use ROS2 launch file +- `CMakeLists.txt` and `package.xml` define the ROS2 package and its build dependencies +- `README.md` contains package-specific build and usage instructions diff --git a/docs/python-tcp-ip.md b/docs/content/python-tcp-ip.md similarity index 73% rename from docs/python-tcp-ip.md rename to docs/content/python-tcp-ip.md index a8ee9d48..fdb5d3c3 100644 --- a/docs/python-tcp-ip.md +++ b/docs/content/python-tcp-ip.md @@ -131,24 +131,56 @@ $ echo '{ "Kill" : 1 }' | nc localhost 4598 ### Error reporting -In case a request cannot be processed, e.g., because the provided JSON is malformed, the provided vectors have incompatible dimensions, the TCP server will return to the client an error report. This is a JSON with three attributes: (i) a key-value pair `"type": "Error"`, to allow the client to tell that an error has occurred, (ii) a `code`, which can be used to uniquely identify the type of error and (iii) a `message`, which offers some human-readable details. +If a request cannot be processed, for example because the JSON payload is +malformed, the request body is not valid UTF-8, or one of the provided vectors +has the wrong dimension, the TCP server returns an error report instead of a +solution. -For example, if the client provides an incompatible number of parameters, that is, if vector `parameter` is of the wrong length, then the server will return the following error: +The error report is a JSON document with three fields: + +- `"type": "Error"` so the client can distinguish errors from successful solver + responses +- `code`, a machine-readable integer code +- `message`, a human-readable string with more context + +In particular, the `message` field is now intended to be descriptive. For +dimension-related errors it includes the provided and expected lengths, and for +solver failures it includes the underlying solver-side reason whenever +available. + +For example, if the client provides an incompatible number of parameters, that +is, if vector `parameter` has the wrong length, the server returns an error like +the following: + +```json +{ + "type": "Error", + "code": 3003, + "message": "wrong number of parameters: provided 1, expected 2" +} +``` + +Likewise, if the solver itself fails, the server returns code `2000` together +with the propagated solver reason, for example: ```json { - "type": "Error", - "code": 3003, - "message": "wrong number of parameters" + "type": "Error", + "code": 2000, + "message": "problem solution failed: non-finite computation: gradient evaluation returned a non-finite value during an FBS step" } ``` -The following errors may be returned to the client +The following error codes may be returned to the client: | Code | Explanation | |-----------|---------------------------------------------| -| 1000 | Invalid request: Malformed or invalid JSON | +| 1000 | Invalid request: malformed JSON or invalid UTF-8 payload | | 1600 | Initial guess has incompatible dimensions | -| 1700 | Wrong dimension of Langrange multipliers | -| 2000 | Problem solution failed (solver error) | +| 1700 | Wrong dimension of Lagrange multipliers | +| 2000 | Problem solution failed; the message contains the solver-side reason | | 3003 | Vector `parameter` has wrong length | + +When using the Python TCP client, these responses are surfaced as +`opengen.tcp.solver_error.SolverError`, whose `code` and `message` properties +mirror the JSON payload returned by the TCP server. diff --git a/docs/udp-sockets.md b/docs/content/udp-sockets.md similarity index 100% rename from docs/udp-sockets.md rename to docs/content/udp-sockets.md diff --git a/docs/contributing.md b/docs/contributing.md deleted file mode 100644 index dcae4bae..00000000 --- a/docs/contributing.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -id: contributing -sidebar_label: Contributing -title: Contributing to OpEn -description: How do I contribute to OpEn ---- - -## How can I contribute to OpEn? -Thank you for considering contributing to Optimization Engine (OpEn)! - -OpEn is an open source project and welcomes contributions from the community. - -You can contribute in several ways: - -- Submit an [**issue**](https://github.com/alphaville/optimization-engine/issues): - Often bugs will go unnoticed by the core development team and certain - use cases and user needs will have evaded our attention. - Consider submitting an issue if: - - You would like to report a [bug]; please, use the provided template for reporting - bugs. It is essential to give information about your system (OS, OpEn version) - and outline a sequence of steps to reproduce the error. When possible, please - provide a [minimum working example] - - You would like to request a [new feature]; please use the provided template - - You would like to propose modifications in OpEn's documentation, such as - for some concepts to be better elucidated or a request for an additional example -- Share with us a **success story** on [**Discord**](https://discord.gg/mfYpn4V) -- Create a **pull request** (see below) - -or, show us your love: - -- Give us a [**star on gitub**](https://github.com/alphaville/optimization-engine) -- Spread the word on [**Twitter**] - -![Star](https://media.giphy.com/media/ZxblqUVrPVmcqATkC4/giphy.gif) - -## I just have a question! -The easiest and quickest way to ask a question is to reach us on [**Discord**](https://discord.gg/mfYpn4V) or [**Gitter**](https://gitter.im/alphaville/optimization-engine). - -You may also consult the [**frequently asked questions**](/optimization-engine/docs/faq). - - -## Submitting issues -You may submit an issue regarding anything related to **OpEn**, such as: - -- a bug -- insufficient/vague documentation -- request for a feature -- request for an example - -You should, however, make sure that the same - or a very similar - issue is not already open. In that case, you may write a comment in an existing issue. - - -## Contributing code or docs - -In order to contribute code or documentation, you need to [fork] our github repository, make you modifications and submit a pull request. You should follow these rules: - -- create one or more [issues on github] that will be associated with your changes -- take it from `master`: fork OpEn and create a branch on `master` - -```console -git checkout -b fix/xyz master -``` - -- read the [style guide](#coding-style-guide) below (and write unit/integration tests) -- create a pull request in which you need to explain the key changes - -## Coding style guide - -Things to keep in mind: - -- **Code**: intuitive structure and variable names, short atomic functions, -- **Comments**: help others better understand your code -- **Docs**: document all functions (even private ones) -- **Tests**: write comprehnsive, exhaustive tests - -### Rust - -*General guidelines:* Read the Rust [API guidelines] and this [API checklist] - -*Naming convention:* We follow the [standard naming convention](https://rust-lang-nursery.github.io/api-guidelines/naming.html) of Rust. - -*Documentation:* We follow [these guidelines](https://rust-lang-nursery.github.io/api-guidelines/documentation.html). Everything should be documented. - -### Python - -We follow [this style guide](https://www.python.org/dev/peps/pep-0008) and its [naming convention](https://www.python.org/dev/peps/pep-0008/#naming-conventions) - - -### Website -This documentation is generated with Docusaurus - read a detailed guide [here](https://github.com/alphaville/optimization-engine/blob/master/website/README.md). - -- All docs are in `docs/` -- Blog entries are in `website/blog/` - -To start the website locally (at [http://localhost:3000/optimization-engine](http://localhost:3000/optimization-engine)) change directory to `website` and run `yarn start`. To update the website, execute `./publish.sh` (you need to be a collaborator on github). - -## Using Git -When using Git, keep in mind the following guidelines: - -- Create simple, atomic, commits -- Write comprehensive commit messages -- Work on a forked repository -- When you're done, submit a pull request to -[`alphaville/optimization-engine`](https://github.com/alphaville/optimization-engine/); -it will be promptly delegated to a reviewer and we will contact you -as soon as possible. - -Branch `master` is protected and all pull requests need to be reviewed by a person -other than their proposer before they can be merged into `master`. - -## Versioning -This project consists of independent modules: -(i) the core Rust library, -(ii) the MATLAB interface, -(iii) the Python interface. -Each module has a different version number (`X.Y.Z`). - -We use the **SemVer** standard - we quote from [semver.org](https://semver.org/): - -Given a version number `MAJOR.MINOR.PATCH`, increment the: - -- `MAJOR` version when you make incompatible API changes, -- `MINOR` version when you add functionality in a backwards-compatible manner, and -- `PATCH` version when you make backwards-compatible bug fixes. - -Additional labels for pre-release and build metadata are available as extensions to the `MAJOR.MINOR.PATCH` format. - -We also keep a [log of changes](https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md) where we summarize the main changes since last version. - -## Releasing - -Each time the major or minor number of the Rust library is updated, a new crate should be published on [crates.io](https://crates.io/crates/optimization_engine). - -In order to release a new version make sure that -you have done the following: - -- Updated [CHANGELOG] -- Updated the version in (SemVer): - - [CHANGELOG] - - [Cargo.toml] - - [setup.py] -- Resolved all associated issues on github (and you have created tests for these) -- Updated the documentation (Rust/Python API docs + website) -- Merged into master (your pull request has been approved) -- All tests pass on Travis CI and Appveyor -- Set `publish=true` in `Cargo.toml` (set it back to `false` for safety) -- Publish `opengen` on PyPI (if necessary) - - before doing so, make sure that the cargo.toml template - points to the correct version of OpEn -- Changed "Unreleased" into the right version in [CHANGELOG] and created - a release on github (example [release v0.4.0]) - -[CHANGELOG]: https://github.com/alphaville/optimization-engine/blob/master/CHANGELOG.md -[Cargo.toml]: https://github.com/alphaville/optimization-engine/blob/master/Cargo.toml -[setup.py]: https://github.com/alphaville/optimization-engine/blob/master/open-codegen/setup.py -[release v0.4.0]: https://github.com/alphaville/optimization-engine/releases/tag/v0.4.0 -[bug]: https://github.com/alphaville/optimization-engine/issues/new?template=bug_report.md -[issues on github]: https://github.com/alphaville/optimization-engine/issues -[**Twitter**]: https://twitter.com/intent/tweet?original_referer=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&ref_src=twsrc%5Etfw&text=Fast%20and%20accurate%20embedded%20nonconvex%20optimization%20with%20%23OptimizationEngine&tw_p=tweetbutton&url=https%3A%2F%2Falphaville.github.io%2Foptimization-engine&via=isToxic -[minimum working example]: https://en.wikipedia.org/wiki/Minimal_working_example -[new feature]: https://github.com/alphaville/optimization-engine/issues/new?template=feature_request.md -[fork]: https://github.com/alphaville/optimization-engine -[API guidelines]: https://rust-lang-nursery.github.io/api-guidelines/about.html -[API checklist]: https://rust-lang-nursery.github.io/api-guidelines/checklist.html diff --git a/sphinx-dox/Makefile b/docs/sphinx/Makefile similarity index 100% rename from sphinx-dox/Makefile rename to docs/sphinx/Makefile diff --git a/sphinx-dox/make.bat b/docs/sphinx/make.bat similarity index 100% rename from sphinx-dox/make.bat rename to docs/sphinx/make.bat diff --git a/sphinx-dox/source/conf.py b/docs/sphinx/source/conf.py similarity index 94% rename from sphinx-dox/source/conf.py rename to docs/sphinx/source/conf.py index 44ff04d7..f580a304 100644 --- a/sphinx-dox/source/conf.py +++ b/docs/sphinx/source/conf.py @@ -5,7 +5,7 @@ import os import sys -sys.path.insert(0, os.path.abspath("../../open-codegen/opengen")) +sys.path.insert(0, os.path.abspath("../../../python/opengen")) def skip(app, what, name, obj, would_skip, options): diff --git a/sphinx-dox/source/index.rst b/docs/sphinx/source/index.rst similarity index 100% rename from sphinx-dox/source/index.rst rename to docs/sphinx/source/index.rst diff --git a/docs/website/README.md b/docs/website/README.md new file mode 100644 index 00000000..4357f6ca --- /dev/null +++ b/docs/website/README.md @@ -0,0 +1,59 @@ +This website now uses Docusaurus v3. + +# Development + +Install dependencies: + +```sh +yarn +``` + +Start the local dev server: + +```sh +yarn start +``` + +Build the production site: + +```sh +yarn build +``` + +Preview the production build locally: + +```sh +yarn serve +``` + +Deploy to GitHub Pages: + +```sh +yarn deploy +``` + +# Project Layout + +``` +optimization-engine/ + docs/ + content/ # documentation markdown files + sphinx/ # Sphinx API docs + website/ + blog/ # blog posts + src/ + css/ + pages/ + static/ + img/ + js/ + docusaurus.config.js + sidebars.js + package.json +``` + +# Notes + +- The docs content lives under `/docs/content`. +- Legacy inline MathJax and widget scripts are stripped at build time, and equivalent site-wide support is loaded from `docs/website/static/js`. +- Sidebar ordering now lives in `docs/website/sidebars.js`. diff --git a/website/blog/2019-02-28-new-version.md b/docs/website/blog/2019-02-28-new-version.md similarity index 100% rename from website/blog/2019-02-28-new-version.md rename to docs/website/blog/2019-02-28-new-version.md diff --git a/website/blog/2019-03-01-blog.md b/docs/website/blog/2019-03-01-blog.md similarity index 100% rename from website/blog/2019-03-01-blog.md rename to docs/website/blog/2019-03-01-blog.md diff --git a/website/blog/2019-03-02-superscs.md b/docs/website/blog/2019-03-02-superscs.md similarity index 100% rename from website/blog/2019-03-02-superscs.md rename to docs/website/blog/2019-03-02-superscs.md diff --git a/website/blog/2019-03-03-risk-averse.md b/docs/website/blog/2019-03-03-risk-averse.md similarity index 100% rename from website/blog/2019-03-03-risk-averse.md rename to docs/website/blog/2019-03-03-risk-averse.md diff --git a/website/blog/2019-03-05-matlab-raspberry.md b/docs/website/blog/2019-03-05-matlab-raspberry.md similarity index 100% rename from website/blog/2019-03-05-matlab-raspberry.md rename to docs/website/blog/2019-03-05-matlab-raspberry.md diff --git a/website/blog/2019-03-05-udp-sockets.md b/docs/website/blog/2019-03-05-udp-sockets.md similarity index 100% rename from website/blog/2019-03-05-udp-sockets.md rename to docs/website/blog/2019-03-05-udp-sockets.md diff --git a/website/blog/2019-03-06-talk-to-us.md b/docs/website/blog/2019-03-06-talk-to-us.md similarity index 100% rename from website/blog/2019-03-06-talk-to-us.md rename to docs/website/blog/2019-03-06-talk-to-us.md diff --git a/website/blog/2019-03-15-pure-rust-optimization.md b/docs/website/blog/2019-03-15-pure-rust-optimization.md similarity index 100% rename from website/blog/2019-03-15-pure-rust-optimization.md rename to docs/website/blog/2019-03-15-pure-rust-optimization.md diff --git a/website/blog/2019-03-19-rust-robotics.md b/docs/website/blog/2019-03-19-rust-robotics.md similarity index 100% rename from website/blog/2019-03-19-rust-robotics.md rename to docs/website/blog/2019-03-19-rust-robotics.md diff --git a/website/blog/2019-03-21-fast-udp-connection.md b/docs/website/blog/2019-03-21-fast-udp-connection.md similarity index 100% rename from website/blog/2019-03-21-fast-udp-connection.md rename to docs/website/blog/2019-03-21-fast-udp-connection.md diff --git a/website/blog/2022-07-30-blog.md b/docs/website/blog/2022-07-30-blog.md similarity index 100% rename from website/blog/2022-07-30-blog.md rename to docs/website/blog/2022-07-30-blog.md diff --git a/website/blog/2026-03-21-python-ocp-module.md b/docs/website/blog/2026-03-21-python-ocp-module.md similarity index 100% rename from website/blog/2026-03-21-python-ocp-module.md rename to docs/website/blog/2026-03-21-python-ocp-module.md diff --git a/docs/website/blog/2026-03-28-open-0-12-0.md b/docs/website/blog/2026-03-28-open-0-12-0.md new file mode 100644 index 00000000..c8d8d0e5 --- /dev/null +++ b/docs/website/blog/2026-03-28-open-0-12-0.md @@ -0,0 +1,41 @@ +--- +title: Announcing OpEn 0.12.0 for Rust +author: Pantelis Sopasakis +authorURL: https://github.com/alphaville +authorImageURL: https://avatars.githubusercontent.com/u/125415?v=4 +--- + +OpEn 0.12.0 brings important updates to the core Rust library. +The key changes are: + +- Support for both `f32` and `f64` ([docs](/optimization-engine/docs/openrust-arithmetic)) +- Better error handling and reporting +- Using [web-time](https://crates.io/crates/web-time) which works in browsers + + + +## Highlights + +### Support for both `f32` and `f64` + +The Rust solver now supports generic floating-point types, so you can work with either `f32` or `f64` depending on your target platform and performance needs. + +This is particularly useful for embedded and resource-constrained systems, where `f32` can be a better fit, while `f64` remains available for applications that need higher precision. + +### Replaced `instant` and wasm with `web-time` + +The timing layer has been simplified by switching to `web-time`. + +This replaces the previous `instant` setup and the extra wasm-specific feature wiring around it, making timing support cleaner and more portable across native and web-oriented targets. + +### Better error handling + +Error reporting in the Rust library has been significantly improved. + +Projection failures, invalid numerical states, linear algebra failures, and other internal solver issues now surface as richer Rust-side errors with clearer, human-readable explanations. Constraint projections are also fallible now, which means these failures can propagate through FBS, PANOC, and ALM instead of being silently flattened into harder-to-debug behavior. + +## See also + +- [Rust documentation](/optimization-engine/docs/openrust-basic) +- [Rust features](/optimization-engine/docs/openrust-features) +- [Project repository](https://github.com/alphaville/optimization-engine) diff --git a/website/docusaurus.config.js b/docs/website/docusaurus.config.js similarity index 99% rename from website/docusaurus.config.js rename to docs/website/docusaurus.config.js index 99628988..59e6f016 100644 --- a/website/docusaurus.config.js +++ b/docs/website/docusaurus.config.js @@ -54,7 +54,7 @@ module.exports = { 'classic', { docs: { - path: '../docs', + path: '../content', routeBasePath: 'docs', sidebarPath: require.resolve('./sidebars.js'), }, diff --git a/website/package.json b/docs/website/package.json similarity index 100% rename from website/package.json rename to docs/website/package.json diff --git a/website/publish.sh b/docs/website/publish.sh similarity index 100% rename from website/publish.sh rename to docs/website/publish.sh diff --git a/website/sidebars.js b/docs/website/sidebars.js similarity index 87% rename from website/sidebars.js rename to docs/website/sidebars.js index 94729cf6..0608b097 100644 --- a/website/sidebars.js +++ b/docs/website/sidebars.js @@ -14,6 +14,7 @@ module.exports = { 'python-c', 'python-bindings', 'python-tcp-ip', + 'python-ros2', 'python-ros', 'python-examples', ], @@ -26,12 +27,12 @@ module.exports = { { type: 'category', label: 'Rust', - items: ['openrust-basic', 'openrust-alm', 'openrust-features'], + items: ['openrust-basic', 'openrust-alm', 'openrust-features', 'openrust-arithmetic'], }, { type: 'category', label: 'MATLAB', - items: ['matlab-interface', 'matlab-examples'], + items: ['matlab-api', 'matlab-interface', 'matlab-examples'], }, { type: 'category', diff --git a/website/src/css/custom.css b/docs/website/src/css/custom.css similarity index 79% rename from website/src/css/custom.css rename to docs/website/src/css/custom.css index 09bce1f7..568b4152 100644 --- a/website/src/css/custom.css +++ b/docs/website/src/css/custom.css @@ -32,7 +32,6 @@ body { } .navbar { - backdrop-filter: blur(16px); box-shadow: 0 10px 30px rgba(86, 44, 28, 0.12); } @@ -197,7 +196,7 @@ body { .homeCodeBlock { background: var(--open-page-surface); border: 1px solid var(--open-page-border); - border-radius: 24px; + border-radius: 15px; box-shadow: var(--open-page-shadow); } @@ -368,6 +367,111 @@ body { width: 100%; } +.homeRos2Promo { + display: grid; + grid-template-columns: minmax(0, 1.05fr) minmax(320px, 0.95fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; + padding: 2rem; + background: + linear-gradient(145deg, rgba(164, 62, 53, 0.88), rgba(141, 33, 183, 0.92)), + #843129; + border: 1px solid rgba(255, 224, 204, 0.2); + border-radius: 28px; + box-shadow: var(--open-page-shadow); +} + +.homeRos2Promo__content, +.homeRos2Promo__code { + min-width: 0; +} + +.homeRos2Promo__content h2 { + margin: 0 0 0.85rem; + color: #fff8f3; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeRos2Promo__content p { + color: rgba(255, 248, 243, 0.88); +} + +.homeRos2Promo__robot { + display: block; + width: 200px; + height: 200px; + margin: 0 auto 1rem; +} + +.homeRos2Promo__attribution { + margin: -0.4rem 0 0.9rem; + text-align: center; + font-size: 0.68rem; + line-height: 1.25; +} + +.homeRos2Promo__attribution a { + color: rgba(255, 248, 243, 0.82); + text-decoration: none; +} + +.homeRos2Promo__attribution a:hover { + color: #fff8f3; + text-decoration: underline; +} + +.homeRos2Promo__codeBlock { + margin-top: 0; + background: rgba(255, 248, 243, 0.96); + border-color: rgba(255, 224, 204, 0.3); +} + +.homeRos2Promo__codeBlock .theme-code-block { + margin-bottom: 0; +} + +.homeDockerPromo { + display: grid; + grid-template-columns: minmax(0, 1.02fr) minmax(320px, 0.98fr); + gap: 1.5rem; + align-items: center; + width: min(1100px, calc(100% - 2rem)); + margin: 0 auto; +} + +.homeDockerPromo__content, +.homeDockerPromo__visual { + min-width: 0; +} + +.homeDockerPromo__content h2 { + margin: 0 0 0.85rem; + color: #2f1a14; + font-size: clamp(2rem, 4vw, 3rem); + line-height: 1.08; +} + +.homeDockerPromo__content p { + color: var(--open-page-muted); +} + +.homeDockerPromo__image { + display: block; + width: min(100%, 280px); + margin: 0 auto 1rem; +} + +.homeDockerPromo__codeBlock { + margin-top: 0; +} + +.homeDockerPromo__codeBlock .theme-code-block { + margin-bottom: 0; +} + .homeSplit__copy, .homeSplit__media { min-width: 0; @@ -617,11 +721,47 @@ body { max-width: none; } - .homeOcpPromo { + .homeOcpPromo, + .homeRos2Promo, + .homeDockerPromo { grid-template-columns: 1fr; } } +@media (min-width: 997px) { + .navbar { + backdrop-filter: blur(16px); + } +} + +@media (max-width: 996px) { + .navbar-sidebar, + .navbar-sidebar__items, + .navbar-sidebar__item.menu { + background: #f8eee7; + } + + .navbar-sidebar__brand, + .navbar-sidebar__back, + .navbar-sidebar__close, + .navbar-sidebar .menu__link, + .navbar-sidebar .menu__caret, + .navbar-sidebar .menu__link--sublist::after { + color: #221714; + } + + .navbar-sidebar .menu__link { + font-weight: 500; + } + + .navbar-sidebar .menu__link:hover, + .navbar-sidebar .menu__link--active, + .navbar-sidebar .menu__list-item-collapsible:hover { + background: rgba(122, 31, 31, 0.08); + color: #221714; + } +} + @media (max-width: 640px) { .homeHero { padding-top: 2rem; @@ -640,4 +780,8 @@ body { width: 64px; height: 64px; } + + .homeRos2Promo { + padding: 1.5rem; + } } diff --git a/website/src/pages/help.js b/docs/website/src/pages/help.js similarity index 100% rename from website/src/pages/help.js rename to docs/website/src/pages/help.js diff --git a/website/src/pages/index.js b/docs/website/src/pages/index.js similarity index 77% rename from website/src/pages/index.js rename to docs/website/src/pages/index.js index 8a0fa34d..fb458b1e 100644 --- a/website/src/pages/index.js +++ b/docs/website/src/pages/index.js @@ -35,6 +35,17 @@ builder = og.builder.OpEnOptimizerBuilder( ) builder.build()`; +const ros2PromoCode = String.raw`ros2_config = og.config.RosConfiguration() \ + .with_package_name("my_ros_pkg") \ + .with_node_name("open_node_ros2") \ + .with_rate(10.0) + +build_config = og.config.BuildConfiguration() \ + .with_build_directory("my_optimizers") \ + .with_ros2(ros2_config)`; + +const dockerPromoCode = String.raw`docker pull alphaville/open:0.7.0` + const heroStats = [ {label: 'Core language', value: 'Rust'}, {label: 'Primary uses', value: 'MPC, MHE, Robotics'}, @@ -111,7 +122,9 @@ export default function Home() { const assetUrl = (path) => `${baseUrl}${path.replace(/^\//, '')}`; const promoGif = assetUrl('img/open-promo.gif'); const boxLogo = assetUrl('img/box.png'); + const dockerGif = assetUrl('img/docker.gif'); const ocpStatesImage = assetUrl('img/ocp-states.png'); + const ros2RobotImage = assetUrl('img/ros2-robot.png'); const [zoomedImage, setZoomedImage] = useState(null); useEffect(() => { @@ -352,6 +365,94 @@ export default function Home() { + +
+
+
+

New in opegen 0.11

+

ROS2 packages

+

+ OpEn can now generate ROS2 packages directly from a parametric + optimizer. The generated package includes ROS2 messages, + configuration files, a launch file, and a node that exposes the + solver through topics. +

+

+ This makes it easy to connect optimization-based controllers, + estimators, and planning modules into a modern robotics stack + without writing the ROS2 wrapper code by hand. +

+
+ + Learn more + + + Legacy ROS1 + +
+
+
+ Cartoon robot icon +

+ + Bot icons created by pbig - Flaticon + +

+
+ {ros2PromoCode} +
+
+
+
+ +
+
+
+

Docker image

+

Run OpEn in a ready-made container

+

+ OpEn ships with a Docker image that gets you straight into a + working environment with Jupyter, Python, and the tooling needed + to explore examples without local setup friction. +

+

+ It is a convenient way to try the Python interface, browse the + notebooks, and experiment with the OCP workflows in a clean, + reproducible environment. +

+
+ + Learn more + + + Docker Hub + +
+
+
+ OpEn running inside the Docker image with Jupyter +
+ {dockerPromoCode} +
+
+
+
{zoomedImage ? (
{ + if (lang === 'php') { + require('prismjs/components/prism-markup-templating.js'); + } + require(`prismjs/components/prism-${lang}`); + }); + + registerMsgLanguage(PrismObject); + + delete globalThis.Prism; + if (typeof PrismBefore !== 'undefined') { + globalThis.Prism = PrismObject; + } +} diff --git a/website/static/css/codeblock.css b/docs/website/static/css/codeblock.css similarity index 100% rename from website/static/css/codeblock.css rename to docs/website/static/css/codeblock.css diff --git a/website/static/css/custom.css b/docs/website/static/css/custom.css similarity index 100% rename from website/static/css/custom.css rename to docs/website/static/css/custom.css diff --git a/website/static/img/115ba54c2ad0.gif b/docs/website/static/img/115ba54c2ad0.gif similarity index 100% rename from website/static/img/115ba54c2ad0.gif rename to docs/website/static/img/115ba54c2ad0.gif diff --git a/website/static/img/6f6ea4f8d194.gif b/docs/website/static/img/6f6ea4f8d194.gif similarity index 100% rename from website/static/img/6f6ea4f8d194.gif rename to docs/website/static/img/6f6ea4f8d194.gif diff --git a/website/static/img/about-open.png b/docs/website/static/img/about-open.png similarity index 100% rename from website/static/img/about-open.png rename to docs/website/static/img/about-open.png diff --git a/website/static/img/bnp.png b/docs/website/static/img/bnp.png similarity index 100% rename from website/static/img/bnp.png rename to docs/website/static/img/bnp.png diff --git a/website/static/img/bnp_1.png b/docs/website/static/img/bnp_1.png similarity index 100% rename from website/static/img/bnp_1.png rename to docs/website/static/img/bnp_1.png diff --git a/website/static/img/bnp_2.png b/docs/website/static/img/bnp_2.png similarity index 100% rename from website/static/img/bnp_2.png rename to docs/website/static/img/bnp_2.png diff --git a/website/static/img/bnp_3.png b/docs/website/static/img/bnp_3.png similarity index 100% rename from website/static/img/bnp_3.png rename to docs/website/static/img/bnp_3.png diff --git a/website/static/img/bnp_4.png b/docs/website/static/img/bnp_4.png similarity index 100% rename from website/static/img/bnp_4.png rename to docs/website/static/img/bnp_4.png diff --git a/website/static/img/bnp_5.png b/docs/website/static/img/bnp_5.png similarity index 100% rename from website/static/img/bnp_5.png rename to docs/website/static/img/bnp_5.png diff --git a/website/static/img/bnp_6.png b/docs/website/static/img/bnp_6.png similarity index 100% rename from website/static/img/bnp_6.png rename to docs/website/static/img/bnp_6.png diff --git a/website/static/img/box.png b/docs/website/static/img/box.png similarity index 100% rename from website/static/img/box.png rename to docs/website/static/img/box.png diff --git a/website/static/img/bullseye.svg b/docs/website/static/img/bullseye.svg similarity index 100% rename from website/static/img/bullseye.svg rename to docs/website/static/img/bullseye.svg diff --git a/website/static/img/cart_schematic.jpg b/docs/website/static/img/cart_schematic.jpg similarity index 100% rename from website/static/img/cart_schematic.jpg rename to docs/website/static/img/cart_schematic.jpg diff --git a/website/static/img/chat.jpeg b/docs/website/static/img/chat.jpeg similarity index 100% rename from website/static/img/chat.jpeg rename to docs/website/static/img/chat.jpeg diff --git a/website/static/img/docker.gif b/docs/website/static/img/docker.gif similarity index 100% rename from website/static/img/docker.gif rename to docs/website/static/img/docker.gif diff --git a/website/static/img/e8f236af8d38.gif b/docs/website/static/img/e8f236af8d38.gif similarity index 100% rename from website/static/img/e8f236af8d38.gif rename to docs/website/static/img/e8f236af8d38.gif diff --git a/website/static/img/edge_intelligence.png b/docs/website/static/img/edge_intelligence.png similarity index 100% rename from website/static/img/edge_intelligence.png rename to docs/website/static/img/edge_intelligence.png diff --git a/website/static/img/examples_almpm.jpg b/docs/website/static/img/examples_almpm.jpg similarity index 100% rename from website/static/img/examples_almpm.jpg rename to docs/website/static/img/examples_almpm.jpg diff --git a/website/static/img/examples_bnp_nmpc.jpg b/docs/website/static/img/examples_bnp_nmpc.jpg similarity index 100% rename from website/static/img/examples_bnp_nmpc.jpg rename to docs/website/static/img/examples_bnp_nmpc.jpg diff --git a/website/static/img/examples_estimation_chaotic.jpg b/docs/website/static/img/examples_estimation_chaotic.jpg similarity index 100% rename from website/static/img/examples_estimation_chaotic.jpg rename to docs/website/static/img/examples_estimation_chaotic.jpg diff --git a/website/static/img/examples_invpend.jpg b/docs/website/static/img/examples_invpend.jpg similarity index 100% rename from website/static/img/examples_invpend.jpg rename to docs/website/static/img/examples_invpend.jpg diff --git a/website/static/img/examples_navigation_gvehicle.jpg b/docs/website/static/img/examples_navigation_gvehicle.jpg similarity index 100% rename from website/static/img/examples_navigation_gvehicle.jpg rename to docs/website/static/img/examples_navigation_gvehicle.jpg diff --git a/website/static/img/examples_rosenbrock.jpg b/docs/website/static/img/examples_rosenbrock.jpg similarity index 100% rename from website/static/img/examples_rosenbrock.jpg rename to docs/website/static/img/examples_rosenbrock.jpg diff --git a/website/static/img/examples_tanks.jpg b/docs/website/static/img/examples_tanks.jpg similarity index 100% rename from website/static/img/examples_tanks.jpg rename to docs/website/static/img/examples_tanks.jpg diff --git a/website/static/img/external-link-square-alt-solid.svg b/docs/website/static/img/external-link-square-alt-solid.svg similarity index 100% rename from website/static/img/external-link-square-alt-solid.svg rename to docs/website/static/img/external-link-square-alt-solid.svg diff --git a/website/static/img/f1-10-main-car_orig.png b/docs/website/static/img/f1-10-main-car_orig.png similarity index 100% rename from website/static/img/f1-10-main-car_orig.png rename to docs/website/static/img/f1-10-main-car_orig.png diff --git a/website/static/img/favicon.png b/docs/website/static/img/favicon.png similarity index 100% rename from website/static/img/favicon.png rename to docs/website/static/img/favicon.png diff --git a/website/static/img/fbe.png b/docs/website/static/img/fbe.png similarity index 100% rename from website/static/img/fbe.png rename to docs/website/static/img/fbe.png diff --git a/website/static/img/happy.png b/docs/website/static/img/happy.png similarity index 100% rename from website/static/img/happy.png rename to docs/website/static/img/happy.png diff --git a/website/static/img/hsk.png b/docs/website/static/img/hsk.png similarity index 100% rename from website/static/img/hsk.png rename to docs/website/static/img/hsk.png diff --git a/website/static/img/husky.jpg b/docs/website/static/img/husky.jpg similarity index 100% rename from website/static/img/husky.jpg rename to docs/website/static/img/husky.jpg diff --git a/website/static/img/husky_video.mp4 b/docs/website/static/img/husky_video.mp4 similarity index 100% rename from website/static/img/husky_video.mp4 rename to docs/website/static/img/husky_video.mp4 diff --git a/website/static/img/i_tanks.png b/docs/website/static/img/i_tanks.png similarity index 100% rename from website/static/img/i_tanks.png rename to docs/website/static/img/i_tanks.png diff --git a/website/static/img/invpend_1.png b/docs/website/static/img/invpend_1.png similarity index 100% rename from website/static/img/invpend_1.png rename to docs/website/static/img/invpend_1.png diff --git a/website/static/img/invpend_2.png b/docs/website/static/img/invpend_2.png similarity index 100% rename from website/static/img/invpend_2.png rename to docs/website/static/img/invpend_2.png diff --git a/website/static/img/lv-oc-sol-2.jpg b/docs/website/static/img/lv-oc-sol-2.jpg similarity index 100% rename from website/static/img/lv-oc-sol-2.jpg rename to docs/website/static/img/lv-oc-sol-2.jpg diff --git a/website/static/img/lv-oc-sol.jpg b/docs/website/static/img/lv-oc-sol.jpg similarity index 100% rename from website/static/img/lv-oc-sol.jpg rename to docs/website/static/img/lv-oc-sol.jpg diff --git a/website/static/img/matlab_logo.png b/docs/website/static/img/matlab_logo.png similarity index 100% rename from website/static/img/matlab_logo.png rename to docs/website/static/img/matlab_logo.png diff --git a/website/static/img/mav.png b/docs/website/static/img/mav.png similarity index 100% rename from website/static/img/mav.png rename to docs/website/static/img/mav.png diff --git a/website/static/img/mhe.png b/docs/website/static/img/mhe.png similarity index 100% rename from website/static/img/mhe.png rename to docs/website/static/img/mhe.png diff --git a/website/static/img/microchip.svg b/docs/website/static/img/microchip.svg similarity index 100% rename from website/static/img/microchip.svg rename to docs/website/static/img/microchip.svg diff --git a/website/static/img/mpc56.png b/docs/website/static/img/mpc56.png similarity index 100% rename from website/static/img/mpc56.png rename to docs/website/static/img/mpc56.png diff --git a/website/static/img/nav-oc-sol-refs.jpg b/docs/website/static/img/nav-oc-sol-refs.jpg similarity index 100% rename from website/static/img/nav-oc-sol-refs.jpg rename to docs/website/static/img/nav-oc-sol-refs.jpg diff --git a/website/static/img/nav-oc-sol-theta.jpg b/docs/website/static/img/nav-oc-sol-theta.jpg similarity index 100% rename from website/static/img/nav-oc-sol-theta.jpg rename to docs/website/static/img/nav-oc-sol-theta.jpg diff --git a/website/static/img/nav-oc-sol-xy-obst.jpg b/docs/website/static/img/nav-oc-sol-xy-obst.jpg similarity index 100% rename from website/static/img/nav-oc-sol-xy-obst.jpg rename to docs/website/static/img/nav-oc-sol-xy-obst.jpg diff --git a/website/static/img/nav-oc-sol-xy.jpg b/docs/website/static/img/nav-oc-sol-xy.jpg similarity index 100% rename from website/static/img/nav-oc-sol-xy.jpg rename to docs/website/static/img/nav-oc-sol-xy.jpg diff --git a/website/static/img/nav-oc-sol-xyt.jpg b/docs/website/static/img/nav-oc-sol-xyt.jpg similarity index 100% rename from website/static/img/nav-oc-sol-xyt.jpg rename to docs/website/static/img/nav-oc-sol-xyt.jpg diff --git a/website/static/img/obstructed_navigation_trajectories_python.png b/docs/website/static/img/obstructed_navigation_trajectories_python.png similarity index 100% rename from website/static/img/obstructed_navigation_trajectories_python.png rename to docs/website/static/img/obstructed_navigation_trajectories_python.png diff --git a/website/static/img/ocp-inputs.png b/docs/website/static/img/ocp-inputs.png similarity index 100% rename from website/static/img/ocp-inputs.png rename to docs/website/static/img/ocp-inputs.png diff --git a/website/static/img/ocp-states.png b/docs/website/static/img/ocp-states.png similarity index 100% rename from website/static/img/ocp-states.png rename to docs/website/static/img/ocp-states.png diff --git a/website/static/img/open-functionality.jpg b/docs/website/static/img/open-functionality.jpg similarity index 100% rename from website/static/img/open-functionality.jpg rename to docs/website/static/img/open-functionality.jpg diff --git a/website/static/img/open-promo.gif b/docs/website/static/img/open-promo.gif similarity index 100% rename from website/static/img/open-promo.gif rename to docs/website/static/img/open-promo.gif diff --git a/website/static/img/open.png b/docs/website/static/img/open.png similarity index 100% rename from website/static/img/open.png rename to docs/website/static/img/open.png diff --git a/website/static/img/openbenchmark.png b/docs/website/static/img/openbenchmark.png similarity index 100% rename from website/static/img/openbenchmark.png rename to docs/website/static/img/openbenchmark.png diff --git a/website/static/img/python-iface-workflow.jpg b/docs/website/static/img/python-iface-workflow.jpg similarity index 100% rename from website/static/img/python-iface-workflow.jpg rename to docs/website/static/img/python-iface-workflow.jpg diff --git a/website/static/img/python-interfaces.jpg b/docs/website/static/img/python-interfaces.jpg similarity index 100% rename from website/static/img/python-interfaces.jpg rename to docs/website/static/img/python-interfaces.jpg diff --git a/website/static/img/python_estimation.png b/docs/website/static/img/python_estimation.png similarity index 100% rename from website/static/img/python_estimation.png rename to docs/website/static/img/python_estimation.png diff --git a/website/static/img/python_estimation_data.png b/docs/website/static/img/python_estimation_data.png similarity index 100% rename from website/static/img/python_estimation_data.png rename to docs/website/static/img/python_estimation_data.png diff --git a/website/static/img/rocket.svg b/docs/website/static/img/rocket.svg similarity index 100% rename from website/static/img/rocket.svg rename to docs/website/static/img/rocket.svg diff --git a/docs/website/static/img/ros2-robot.png b/docs/website/static/img/ros2-robot.png new file mode 100644 index 00000000..f5ec4f94 Binary files /dev/null and b/docs/website/static/img/ros2-robot.png differ diff --git a/website/static/img/rpi.jpeg b/docs/website/static/img/rpi.jpeg similarity index 100% rename from website/static/img/rpi.jpeg rename to docs/website/static/img/rpi.jpeg diff --git a/website/static/img/rust1.jpeg b/docs/website/static/img/rust1.jpeg similarity index 100% rename from website/static/img/rust1.jpeg rename to docs/website/static/img/rust1.jpeg diff --git a/website/static/img/rustybot.jpeg b/docs/website/static/img/rustybot.jpeg similarity index 100% rename from website/static/img/rustybot.jpeg rename to docs/website/static/img/rustybot.jpeg diff --git a/website/static/img/saturn.png b/docs/website/static/img/saturn.png similarity index 100% rename from website/static/img/saturn.png rename to docs/website/static/img/saturn.png diff --git a/website/static/img/tanks_1.png b/docs/website/static/img/tanks_1.png similarity index 100% rename from website/static/img/tanks_1.png rename to docs/website/static/img/tanks_1.png diff --git a/website/static/img/tanks_2.png b/docs/website/static/img/tanks_2.png similarity index 100% rename from website/static/img/tanks_2.png rename to docs/website/static/img/tanks_2.png diff --git a/website/static/img/track.gif b/docs/website/static/img/track.gif similarity index 100% rename from website/static/img/track.gif rename to docs/website/static/img/track.gif diff --git a/website/static/img/udp_socket.png b/docs/website/static/img/udp_socket.png similarity index 100% rename from website/static/img/udp_socket.png rename to docs/website/static/img/udp_socket.png diff --git a/website/static/img/unobstructed_navigation_python.png b/docs/website/static/img/unobstructed_navigation_python.png similarity index 100% rename from website/static/img/unobstructed_navigation_python.png rename to docs/website/static/img/unobstructed_navigation_python.png diff --git a/website/static/img/unobstructed_navigation_trajectories_python.png b/docs/website/static/img/unobstructed_navigation_trajectories_python.png similarity index 100% rename from website/static/img/unobstructed_navigation_trajectories_python.png rename to docs/website/static/img/unobstructed_navigation_trajectories_python.png diff --git a/website/static/js/legacy-docs.js b/docs/website/static/js/legacy-docs.js similarity index 100% rename from website/static/js/legacy-docs.js rename to docs/website/static/js/legacy-docs.js diff --git a/website/static/js/mathjax-config.js b/docs/website/static/js/mathjax-config.js similarity index 100% rename from website/static/js/mathjax-config.js rename to docs/website/static/js/mathjax-config.js diff --git a/matlab/CHANGELOG.md b/matlab/CHANGELOG.md new file mode 100644 index 00000000..93ff6a10 --- /dev/null +++ b/matlab/CHANGELOG.md @@ -0,0 +1,25 @@ +# Change Log + +All notable changes to the MATLAB interface will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +Note: This is the Changelog file for the MATLAB interface of OpEn. + + +## 0.1.0 - 31 March 2026 + +### Added + +- New MATLAB TCP client in `matlab/api/OpEnTcpOptimizer.m` for TCP-enabled optimizers generated in Python. +- Convenience constructor helper `matlab/api/createOpEnTcpOptimizer.m`. +- Support for parametric optimizers over TCP using calls of the form `response = client.solve(p)`. +- Support for OCP optimizers over TCP by loading `optimizer_manifest.json` and allowing named-parameter calls such as `response = client.solve('x0', x0, 'xref', xref)`. +- Automatic packing of named OCP parameter blocks according to the manifest order, including support for manifest defaults. +- MATLAB-side helpers for `ping`, `kill`, warm-start options, and normalized solver responses. + +### Changed + +- Added a dedicated MATLAB API area under `matlab/api` for the current interface, separate from the legacy MATLAB code. +- Code generation in MATLAB is now moved to `matlab/legacy` diff --git a/matlab/README.md b/matlab/README.md index 3d49644a..29b20d66 100644 --- a/matlab/README.md +++ b/matlab/README.md @@ -1,5 +1,205 @@ -# MATLAB OpEn Interface +# OpEn MATLAB API -This is the matlab interface of **Optimization Engine**. +This directory contains the MATLAB interface of **Optimization Engine (OpEn)**. -Read the [detailed documentation ](https://alphaville.github.io/optimization-engine/docs/matlab-interface). \ No newline at end of file +The current MATLAB API lives in [`matlab/api`](./api). It communicates with +optimizers generated in Python that expose a TCP server interface. + +The legacy MATLAB code is preserved in [`matlab/legacy`](./legacy). + +## Capabilities + +The current MATLAB toolbox supports: + +- Connecting to TCP-enabled optimizers generated in Python +- Calling standard parametric optimizers using a flat parameter vector +- Calling OCP-generated optimizers using named parameter blocks from + `optimizer_manifest.json` +- Loading OCP manifests and, when available, automatically reading the TCP + endpoint from the sibling `optimizer.yml` +- Sending `ping` and `kill` requests to the optimizer server +- Providing optional warm-start data through: + - `InitialGuess` + - `InitialLagrangeMultipliers` + - `InitialPenalty` +- Returning normalized solver responses with an `ok` flag and solver + diagnostics +- Returning stage-wise `inputs` for OCP optimizers and `states` for + multiple-shooting OCP optimizers + +The main entry points are: + +- [`matlab/api/OpEnTcpOptimizer.m`](./api/OpEnTcpOptimizer.m) +- [`matlab/api/createOpEnTcpOptimizer.m`](./api/createOpEnTcpOptimizer.m) + +## Getting Started + +Add the MATLAB API folder to your path: + +```matlab +addpath(fullfile(pwd, 'matlab', 'api')); +``` + +Make sure the target optimizer TCP server is already running. + +## Simple Optimizers + +### Connect to a parametric optimizer + +Use a TCP port directly. The IP defaults to `127.0.0.1`. + +```matlab +client = OpEnTcpOptimizer(3301); +pong = client.ping(); +disp(pong.Pong); +``` + +You can also specify the endpoint explicitly: + +```matlab +client = OpEnTcpOptimizer('127.0.0.1', 3301); +``` + +### Solve a parametric optimizer + +For a standard parametric optimizer, pass the flat parameter vector: + +```matlab +response = client.solve([2.0, 10.0]); + +if response.ok + disp(response.solution); + disp(response.cost); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +### Solve with warm-start information + +```matlab +response1 = client.solve([2.0, 10.0]); + +response2 = client.solve( ... + [2.0, 10.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +### Stop the server + +```matlab +client.kill(); +``` + +## OCP Optimizers + +For OCP-generated optimizers, MATLAB uses **name-value pairs** to provide the +parameter blocks listed in `optimizer_manifest.json`. + +### Load an OCP optimizer from its manifest + +If `optimizer_manifest.json` and `optimizer.yml` are in the same generated +optimizer directory, the client can infer the TCP endpoint automatically: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_single_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +disp(client.parameterNames()); +``` + +You can also override the endpoint explicitly: + +```matlab +client = OpEnTcpOptimizer(3391, 'ManifestPath', manifestPath); +``` + +### Solve a single-shooting OCP optimizer + +The following example matches the OCP manifest in +`python/.python_test_build_ocp/ocp_single_tcp`: + +```matlab +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +if response.ok + disp(response.solution); + disp(response.inputs); + disp(response.exit_status); +else + error('OpEn:SolverError', '%s', response.message); +end +``` + +If the manifest defines default values for some parameters, you only need to +provide the required ones: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_manifest_bindings', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); +response = client.solve('x0', [1.0, 0.0]); +``` + +### Solve a multiple-shooting OCP optimizer + +For multiple-shooting OCPs, the MATLAB client also returns the state +trajectory reconstructed from the manifest slices: + +```matlab +manifestPath = fullfile( ... + pwd, ... + 'python', ... + '.python_test_build_ocp', ... + 'ocp_multiple_tcp', ... + 'optimizer_manifest.json'); + +client = OpEnTcpOptimizer('ManifestPath', manifestPath); + +response = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +disp(response.inputs); +disp(response.states); +``` + +### OCP warm-start example + +Warm-start options can be combined with named OCP parameters: + +```matlab +response1 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0]); + +response2 = client.solve( ... + 'x0', [1.0, -1.0], ... + 'xref', [0.0, 0.0], ... + 'InitialGuess', response1.solution, ... + 'InitialLagrangeMultipliers', response1.lagrange_multipliers, ... + 'InitialPenalty', response1.penalty); +``` + +## Notes + +- The MATLAB API does not start the optimizer server; it connects to a server + that is already running. +- For plain parametric optimizers, use `client.solve(p)`. +- For OCP optimizers, use `client.solve('name1', value1, 'name2', value2, ...)`. +- The helper function `createOpEnTcpOptimizer(...)` is a thin wrapper around + `OpEnTcpOptimizer(...)`. diff --git a/matlab/api/OpEnTcpOptimizer.m b/matlab/api/OpEnTcpOptimizer.m new file mode 100644 index 00000000..a0eceb5d --- /dev/null +++ b/matlab/api/OpEnTcpOptimizer.m @@ -0,0 +1,898 @@ +classdef OpEnTcpOptimizer < handle + %OPENTCPOPTIMIZER TCP client for Python-generated OpEn optimizers. + % CLIENT = OPENTCPOPTIMIZER(PORT) creates a client that connects to a + % TCP-enabled optimizer running on 127.0.0.1:PORT. + % + % CLIENT = OPENTCPOPTIMIZER(PORT, IP) connects to the optimizer at + % the specified IP address and TCP port. + % + % CLIENT = OPENTCPOPTIMIZER(IP, PORT) is also accepted for callers + % who prefer to provide the endpoint in IP/port order. + % + % CLIENT = OPENTCPOPTIMIZER(..., 'ManifestPath', MANIFESTPATH) loads + % an OCP optimizer manifest created by Python's ``ocp`` module. Once + % a manifest is loaded, the client also supports named-parameter + % calls such as: + % + % response = client.solve(x0=[1; 0], xref=[0; 0]); + % + % Name-value pairs: + % 'ManifestPath' Path to optimizer_manifest.json + % 'Timeout' Socket timeout in seconds (default 10) + % 'MaxResponseBytes' Maximum response size in bytes + % (default 1048576) + % + % If only a manifest path is provided, the constructor attempts to + % read ``optimizer.yml`` next to the manifest and use its TCP/IP + % endpoint automatically. + % + % This interface is intended for optimizers generated in Python with + % the TCP interface enabled. The optimizer server must already be + % running; this class only communicates with it. + % + % Examples: + % client = OpEnTcpOptimizer(3301); + % response = client.solve([2.0, 10.0]); + % + % client = OpEnTcpOptimizer('ManifestPath', 'optimizer_manifest.json'); + % response = client.solve(x0=[1.0, -1.0], xref=[0.0, 0.0]); + + properties (SetAccess = private) + %IP IPv4 address or host name of the optimizer server. + ip + + %PORT TCP port of the optimizer server. + port + + %TIMEOUT Connect and read timeout in seconds. + timeout + + %MAXRESPONSEBYTES Safety limit for incoming payload size. + maxResponseBytes + + %MANIFESTPATH Absolute path to an optional OCP manifest. + manifestPath + + %MANIFEST Decoded OCP manifest data. + manifest + end + + methods + function obj = OpEnTcpOptimizer(varargin) + %OPENTCPOPTIMIZER Construct a TCP client for a generated optimizer. + % + % Supported call patterns: + % OpEnTcpOptimizer(port) + % OpEnTcpOptimizer(port, ip) + % OpEnTcpOptimizer(ip, port) + % OpEnTcpOptimizer(..., 'ManifestPath', path) + % OpEnTcpOptimizer('ManifestPath', path) + + obj.manifestPath = ''; + obj.manifest = []; + + [endpointArgs, options] = OpEnTcpOptimizer.parseConstructorInputs(varargin); + + if ~isempty(options.ManifestPath) + obj.loadManifest(options.ManifestPath); + end + + [port, ip] = obj.resolveEndpoint(endpointArgs); + + obj.port = double(port); + obj.ip = OpEnTcpOptimizer.textToChar(ip); + obj.timeout = double(options.Timeout); + obj.maxResponseBytes = double(options.MaxResponseBytes); + end + + function obj = loadManifest(obj, manifestPath) + %LOADMANIFEST Load an OCP optimizer manifest. + % OBJ = LOADMANIFEST(OBJ, MANIFESTPATH) loads an + % ``optimizer_manifest.json`` file created by the Python OCP + % module. After loading the manifest, the client accepts + % named-parameter calls such as ``solve(x0=..., xref=...)``. + + if ~OpEnTcpOptimizer.isTextScalar(manifestPath) + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'ManifestPath must be a character vector or string scalar.'); + end + manifestPath = OpEnTcpOptimizer.textToChar(manifestPath); + manifestPath = OpEnTcpOptimizer.validateManifestPath(manifestPath); + manifestText = fileread(manifestPath); + manifestData = jsondecode(manifestText); + + if ~isstruct(manifestData) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'The manifest must decode to a MATLAB struct.'); + end + + if ~isfield(manifestData, 'parameters') + error('OpEnTcpOptimizer:InvalidManifest', ... + 'The manifest does not contain a "parameters" field.'); + end + + OpEnTcpOptimizer.validateManifestParameters(manifestData.parameters); + + obj.manifestPath = manifestPath; + obj.manifest = manifestData; + end + + function tf = hasManifest(obj) + %HASMANIFEST True if an OCP manifest has been loaded. + tf = ~isempty(obj.manifest); + end + + function names = parameterNames(obj) + %PARAMETERNAMES Return the ordered OCP parameter names. + if ~obj.hasManifest() + names = {}; + return; + end + + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + names = cell(size(definitions)); + for i = 1:numel(definitions) + names{i} = definitions{i}.name; + end + end + + function response = ping(obj) + %PING Check whether the optimizer server is reachable. + % RESPONSE = PING(OBJ) sends {"Ping":1} and returns the + % decoded JSON response, typically a struct with field "Pong". + response = obj.sendRequest('{"Ping":1}', true); + end + + function kill(obj) + %KILL Ask the optimizer server to stop gracefully. + % KILL(OBJ) sends {"Kill":1}. The server closes the + % connection without returning a JSON payload. + obj.sendRequest('{"Kill":1}', false); + end + + function response = solve(obj, varargin) + %SOLVE Run a parametric or OCP optimizer over TCP. + % RESPONSE = SOLVE(OBJ, P) sends the flat parameter vector P + % to a standard parametric optimizer. + % + % RESPONSE = SOLVE(OBJ, x0=..., xref=..., ...) packs the named + % parameter blocks declared in the loaded OCP manifest and + % sends the resulting flat parameter vector to the solver. + % + % In both modes, the optional solver warm-start arguments are: + % InitialGuess + % InitialLagrangeMultipliers + % InitialPenalty + % + % On success, RESPONSE contains the solver fields returned by + % the server, plus: + % ok = true + % raw = original decoded JSON response + % f1_infeasibility = delta_y_norm_over_c + % + % For OCP solves, RESPONSE also contains: + % packed_parameter = flat parameter vector sent to server + % inputs = stage-wise control inputs, when available + % states = state trajectory for multiple shooting OCPs + + [parameterVector, solverOptions, solveMode] = obj.prepareSolveInputs(varargin); + rawResponse = obj.runSolveRequest(parameterVector, solverOptions); + response = OpEnTcpOptimizer.normalizeSolverResponse(rawResponse); + + if strcmp(solveMode, 'ocp') + response.packed_parameter = parameterVector; + if response.ok + response = obj.enrichOcpResponse(response, parameterVector); + end + end + end + + function response = call(obj, varargin) + %CALL Alias for SOLVE to match the Python TCP interface. + response = obj.solve(varargin{:}); + end + + function response = consume(obj, varargin) + %CONSUME Alias for SOLVE to ease migration from older MATLAB code. + response = obj.solve(varargin{:}); + end + end + + methods (Access = private) + function [parameterVector, solverOptions, solveMode] = prepareSolveInputs(obj, inputArgs) + %PREPARESOLVEINPUTS Parse parametric or OCP solve inputs. + + if isempty(inputArgs) + error('OpEnTcpOptimizer:MissingSolveArguments', ... + 'Provide either a flat parameter vector or named OCP parameters.'); + end + + if OpEnTcpOptimizer.isVectorNumeric(inputArgs{1}) + solveMode = 'parametric'; + parameterVector = OpEnTcpOptimizer.toRowVector(inputArgs{1}, 'parameter'); + solverOptions = OpEnTcpOptimizer.parseSolverOptions(inputArgs(2:end)); + return; + end + + if ~obj.hasManifest() + error('OpEnTcpOptimizer:ManifestRequired', ... + ['Named parameter solves require an OCP manifest. Load one with ' ... + 'loadManifest(...) or the constructor option ''ManifestPath''.']); + end + + [parameterVector, solverOptions] = obj.packOcpParameters(inputArgs); + solveMode = 'ocp'; + end + + function [parameterVector, solverOptions] = packOcpParameters(obj, inputArgs) + %PACKOCPPARAMETERS Pack named OCP parameters using the manifest. + + pairs = OpEnTcpOptimizer.normalizeNameValuePairs(inputArgs, 'OpEnTcpOptimizer.solve'); + solverOptions = OpEnTcpOptimizer.emptySolverOptions(); + providedValues = containers.Map('KeyType', 'char', 'ValueType', 'any'); + + for i = 1:size(pairs, 1) + name = pairs{i, 1}; + value = pairs{i, 2}; + lowerName = lower(name); + + switch lowerName + case 'initialguess' + solverOptions.InitialGuess = value; + case 'initiallagrangemultipliers' + solverOptions.InitialLagrangeMultipliers = value; + case 'initialpenalty' + solverOptions.InitialPenalty = value; + otherwise + if isKey(providedValues, name) + error('OpEnTcpOptimizer:DuplicateParameter', ... + 'Parameter "%s" was provided more than once.', name); + end + providedValues(name) = value; + end + end + + solverOptions = OpEnTcpOptimizer.validateSolverOptions(solverOptions); + + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + parameterVector = []; + missing = {}; + + for i = 1:numel(definitions) + definition = definitions{i}; + if isKey(providedValues, definition.name) + value = providedValues(definition.name); + remove(providedValues, definition.name); + else + value = definition.default; + end + + if isempty(value) + missing{end + 1} = definition.name; %#ok + continue; + end + + parameterVector = [parameterVector, ... %#ok + OpEnTcpOptimizer.normalizeParameterBlock( ... + value, definition.size, definition.name)]; + end + + if ~isempty(missing) + error('OpEnTcpOptimizer:MissingOcpParameters', ... + 'Missing values for parameters: %s.', strjoin(missing, ', ')); + end + + remainingNames = sort(keys(providedValues)); + if ~isempty(remainingNames) + error('OpEnTcpOptimizer:UnknownOcpParameters', ... + 'Unknown OCP parameter(s): %s.', strjoin(remainingNames, ', ')); + end + end + + function response = enrichOcpResponse(obj, response, packedParameters) + %ENRICHOCPRESPONSE Add OCP-oriented views to a successful solve. + + response.inputs = obj.extractOcpInputs(response.solution); + + if strcmp(obj.manifest.shooting, 'multiple') + response.states = obj.extractMultipleShootingStates( ... + response.solution, packedParameters); + end + end + + function inputs = extractOcpInputs(obj, flatSolution) + %EXTRACTOCPINPUTS Extract stage-wise input blocks from the solution. + + flatSolution = OpEnTcpOptimizer.toRowVector(flatSolution, 'solution'); + + if strcmp(obj.manifest.shooting, 'single') + nu = double(obj.manifest.nu); + horizon = double(obj.manifest.horizon); + inputs = cell(1, horizon); + + for stageIdx = 1:horizon + startIdx = (stageIdx - 1) * nu + 1; + stopIdx = stageIdx * nu; + inputs{stageIdx} = flatSolution(startIdx:stopIdx); + end + return; + end + + sliceMatrix = obj.manifest.input_slices; + inputs = cell(1, size(sliceMatrix, 1)); + for i = 1:size(sliceMatrix, 1) + startIdx = sliceMatrix(i, 1) + 1; + stopIdx = sliceMatrix(i, 2); + inputs{i} = flatSolution(startIdx:stopIdx); + end + end + + function states = extractMultipleShootingStates(obj, flatSolution, packedParameters) + %EXTRACTMULTIPLESHOOTINGSTATES Extract the state trajectory. + % + % For multiple shooting OCPs the manifest contains the state + % slices directly, so no extra CasADi dependency is needed in + % MATLAB to reconstruct the state trajectory. + + flatSolution = OpEnTcpOptimizer.toRowVector(flatSolution, 'solution'); + packedParameters = OpEnTcpOptimizer.toRowVector(packedParameters, 'packedParameters'); + + stateSlices = obj.manifest.state_slices; + states = cell(1, size(stateSlices, 1) + 1); + states{1} = obj.extractParameterByName(packedParameters, 'x0'); + + for i = 1:size(stateSlices, 1) + startIdx = stateSlices(i, 1) + 1; + stopIdx = stateSlices(i, 2); + states{i + 1} = flatSolution(startIdx:stopIdx); + end + end + + function value = extractParameterByName(obj, packedParameters, parameterName) + %EXTRACTPARAMETERBYNAME Extract one named parameter block. + + definitions = OpEnTcpOptimizer.manifestParametersAsCell(obj.manifest.parameters); + offset = 0; + + for i = 1:numel(definitions) + definition = definitions{i}; + nextOffset = offset + definition.size; + if strcmp(definition.name, parameterName) + value = packedParameters(offset + 1:nextOffset); + return; + end + offset = nextOffset; + end + + error('OpEnTcpOptimizer:MissingManifestParameter', ... + 'The manifest does not define a parameter named "%s".', parameterName); + end + + function rawResponse = runSolveRequest(obj, parameterVector, solverOptions) + %RUNSOLVEREQUEST Serialize and send a solver execution request. + + request = struct(); + request.Run = struct(); + request.Run.parameter = OpEnTcpOptimizer.toRowVector(parameterVector, 'parameter'); + + if ~isempty(solverOptions.InitialGuess) + request.Run.initial_guess = OpEnTcpOptimizer.toRowVector( ... + solverOptions.InitialGuess, 'InitialGuess'); + end + + if ~isempty(solverOptions.InitialLagrangeMultipliers) + request.Run.initial_lagrange_multipliers = OpEnTcpOptimizer.toRowVector( ... + solverOptions.InitialLagrangeMultipliers, 'InitialLagrangeMultipliers'); + end + + if ~isempty(solverOptions.InitialPenalty) + request.Run.initial_penalty = double(solverOptions.InitialPenalty); + end + + rawResponse = obj.sendRequest(jsonencode(request), true); + end + + function [port, ip] = resolveEndpoint(obj, endpointArgs) + %RESOLVEENDPOINT Resolve the TCP endpoint from inputs or manifest. + + if isempty(endpointArgs) + if ~obj.hasManifest() + error('OpEnTcpOptimizer:MissingEndpoint', ... + 'Provide a TCP endpoint or a manifest with a matching optimizer.yml file.'); + end + + tcpDefaults = OpEnTcpOptimizer.readTcpDefaultsFromManifest(obj.manifestPath); + if isempty(tcpDefaults) + error('OpEnTcpOptimizer:MissingEndpoint', ... + ['No TCP endpoint was provided and no TCP settings could be read from ' ... + 'optimizer.yml next to the manifest.']); + end + + port = tcpDefaults.port; + ip = tcpDefaults.ip; + return; + end + + if numel(endpointArgs) == 1 + if ~OpEnTcpOptimizer.isValidPort(endpointArgs{1}) + error('OpEnTcpOptimizer:InvalidEndpoint', ... + 'A single endpoint argument must be a TCP port.'); + end + port = endpointArgs{1}; + ip = '127.0.0.1'; + return; + end + + if numel(endpointArgs) == 2 + [port, ip] = OpEnTcpOptimizer.normalizeEndpointArguments( ... + endpointArgs{1}, endpointArgs{2}); + return; + end + + error('OpEnTcpOptimizer:InvalidEndpoint', ... + 'Specify the endpoint as (port), (port, ip), or (ip, port).'); + end + + function response = sendRequest(obj, requestText, expectReply) + %SENDREQUEST Send a JSON request and optionally decode a JSON reply. + % + % The generated Rust server reads until the client closes its + % write side. We therefore use Java sockets so we can call + % shutdownOutput() after transmitting the JSON payload. + + socket = []; + cleanup = []; + + try + socket = java.net.Socket(); + timeoutMs = max(1, round(1000 * obj.timeout)); + socket.connect(java.net.InetSocketAddress(obj.ip, obj.port), timeoutMs); + socket.setSoTimeout(timeoutMs); + cleanup = onCleanup(@() OpEnTcpOptimizer.closeSocketQuietly(socket)); + + outputStream = socket.getOutputStream(); + requestBytes = int8(unicode2native(char(requestText), 'UTF-8')); + outputStream.write(requestBytes); + outputStream.flush(); + socket.shutdownOutput(); + + if ~expectReply + return; + end + + inputStream = socket.getInputStream(); + responseBytes = obj.readFully(inputStream); + if isempty(responseBytes) + error('OpEnTcpOptimizer:EmptyResponse', ... + 'The optimizer server closed the connection without sending a response.'); + end + + responseText = native2unicode(responseBytes, 'UTF-8'); + response = jsondecode(responseText); + catch err + % Ensure the socket is closed before rethrowing transport errors. + clear cleanup; + OpEnTcpOptimizer.closeSocketQuietly(socket); + rethrow(err); + end + + clear cleanup; + end + + function bytes = readFully(obj, inputStream) + %READFULLY Read the complete server reply until EOF. + % + % The server sends a single JSON document per connection and + % closes the connection afterwards, so EOF marks the end of + % the response payload. + + byteStream = java.io.ByteArrayOutputStream(); + + while true + nextByte = inputStream.read(); + if nextByte == -1 + break; + end + + byteStream.write(nextByte); + if byteStream.size() > obj.maxResponseBytes + error('OpEnTcpOptimizer:ResponseTooLarge', ... + 'The optimizer response exceeded %d bytes.', obj.maxResponseBytes); + end + end + + rawBytes = uint8(mod(double(byteStream.toByteArray()), 256)); + bytes = reshape(rawBytes, 1, []); + end + end + + methods (Static, Access = private) + function [endpointArgs, options] = parseConstructorInputs(inputArgs) + %PARSECONSTRUCTORINPUTS Split constructor endpoint and options. + + endpointArgs = {}; + options = struct( ... + 'ManifestPath', '', ... + 'Timeout', 10, ... + 'MaxResponseBytes', 1048576); + + idx = 1; + while idx <= numel(inputArgs) + token = inputArgs{idx}; + if OpEnTcpOptimizer.isRecognizedConstructorOption(token) + if idx == numel(inputArgs) + error('OpEnTcpOptimizer:InvalidConstructorInput', ... + 'Missing value for option "%s".', OpEnTcpOptimizer.textToChar(token)); + end + + name = lower(OpEnTcpOptimizer.textToChar(token)); + value = inputArgs{idx + 1}; + switch name + case 'manifestpath' + if ~OpEnTcpOptimizer.isTextScalar(value) + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'ManifestPath must be a character vector or string scalar.'); + end + options.ManifestPath = OpEnTcpOptimizer.textToChar(value); + case 'timeout' + options.Timeout = value; + case 'maxresponsebytes' + options.MaxResponseBytes = value; + end + idx = idx + 2; + else + endpointArgs{end + 1} = token; %#ok + idx = idx + 1; + end + end + + if numel(endpointArgs) == 1 && OpEnTcpOptimizer.isManifestPathToken(endpointArgs{1}) + options.ManifestPath = OpEnTcpOptimizer.textToChar(endpointArgs{1}); + endpointArgs = {}; + end + + if ~(OpEnTcpOptimizer.isValidTimeout(options.Timeout)) + error('OpEnTcpOptimizer:InvalidTimeout', ... + 'Timeout must be a positive scalar.'); + end + + if ~(OpEnTcpOptimizer.isValidMaxResponseBytes(options.MaxResponseBytes)) + error('OpEnTcpOptimizer:InvalidMaxResponseBytes', ... + 'MaxResponseBytes must be a positive integer.'); + end + end + + function tf = isRecognizedConstructorOption(token) + %ISRECOGNIZEDCONSTRUCTOROPTION True for constructor option names. + tf = OpEnTcpOptimizer.isTextScalar(token) && any(strcmpi( ... + OpEnTcpOptimizer.textToChar(token), {'ManifestPath', 'Timeout', 'MaxResponseBytes'})); + end + + function tf = isManifestPathToken(token) + %ISMANIFESTPATHTOKEN Heuristic for a positional manifest path. + if ~OpEnTcpOptimizer.isTextScalar(token) + tf = false; + return; + end + + token = OpEnTcpOptimizer.textToChar(token); + [~, ~, ext] = fileparts(token); + tf = strcmpi(ext, '.json') && isfile(token); + end + + function manifestPath = validateManifestPath(manifestPath) + %VALIDATEMANIFESTPATH Validate and absolutize a manifest path. + if ~isfile(manifestPath) + error('OpEnTcpOptimizer:ManifestNotFound', ... + 'Manifest file not found: %s', manifestPath); + end + + [folder, name, ext] = fileparts(manifestPath); + if ~strcmpi(ext, '.json') + error('OpEnTcpOptimizer:InvalidManifestPath', ... + 'The manifest path must point to a JSON file.'); + end + + manifestPath = fullfile(folder, [name, ext]); + end + + function validateManifestParameters(parameters) + %VALIDATEMANIFESTPARAMETERS Validate manifest parameter entries. + definitions = OpEnTcpOptimizer.manifestParametersAsCell(parameters); + for i = 1:numel(definitions) + definition = definitions{i}; + if ~isfield(definition, 'name') || ~isfield(definition, 'size') + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Each manifest parameter needs "name" and "size" fields.'); + end + if ~ischar(definition.name) && ~isstring(definition.name) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameter names must be text values.'); + end + if ~OpEnTcpOptimizer.isValidPositiveInteger(definition.size) + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameter sizes must be positive integers.'); + end + end + end + + function defaults = readTcpDefaultsFromManifest(manifestPath) + %READTCPDEFAULTSFROMMANIFEST Read TCP defaults from optimizer.yml. + defaults = []; + optimizerDir = fileparts(manifestPath); + yamlPath = fullfile(optimizerDir, 'optimizer.yml'); + + if ~isfile(yamlPath) + return; + end + + defaults = OpEnTcpOptimizer.parseOptimizerYaml(yamlPath); + end + + function defaults = parseOptimizerYaml(yamlPath) + %PARSEOPTIMIZERYAML Read the tcp.ip and tcp.port fields. + defaults = []; + yamlText = fileread(yamlPath); + lines = regexp(yamlText, '\r\n|\n|\r', 'split'); + + inTcpBlock = false; + ip = ''; + port = []; + + for i = 1:numel(lines) + line = lines{i}; + trimmed = strtrim(line); + + if isempty(trimmed) + continue; + end + + if strcmp(trimmed, 'tcp:') + inTcpBlock = true; + continue; + end + + if inTcpBlock && ~isempty(line) && ~isspace(line(1)) + break; + end + + if inTcpBlock + if startsWith(trimmed, 'ip:') + ip = strtrim(extractAfter(trimmed, 3)); + elseif startsWith(trimmed, 'port:') + portText = strtrim(extractAfter(trimmed, 5)); + port = str2double(portText); + end + end + end + + if ~isempty(ip) && OpEnTcpOptimizer.isValidPort(port) + defaults = struct('ip', ip, 'port', port); + end + end + + function response = normalizeSolverResponse(rawResponse) + %NORMALIZESOLVERRESPONSE Add MATLAB-friendly fields to server data. + + response = rawResponse; + response.raw = rawResponse; + + if isfield(rawResponse, 'type') && strcmp(rawResponse.type, 'Error') + response.ok = false; + return; + end + + response.ok = true; + if isfield(rawResponse, 'delta_y_norm_over_c') + response.f1_infeasibility = rawResponse.delta_y_norm_over_c; + end + end + + function solverOptions = parseSolverOptions(inputArgs) + %PARSESOLVEROPTIONS Parse warm-start related name-value pairs. + pairs = OpEnTcpOptimizer.normalizeNameValuePairs(inputArgs, 'OpEnTcpOptimizer.solve'); + solverOptions = OpEnTcpOptimizer.emptySolverOptions(); + + for i = 1:size(pairs, 1) + name = lower(pairs{i, 1}); + value = pairs{i, 2}; + + switch name + case 'initialguess' + solverOptions.InitialGuess = value; + case 'initiallagrangemultipliers' + solverOptions.InitialLagrangeMultipliers = value; + case 'initialpenalty' + solverOptions.InitialPenalty = value; + otherwise + error('OpEnTcpOptimizer:UnknownSolveOption', ... + 'Unknown solve option "%s".', pairs{i, 1}); + end + end + + solverOptions = OpEnTcpOptimizer.validateSolverOptions(solverOptions); + end + + function solverOptions = validateSolverOptions(solverOptions) + %VALIDATESOLVEROPTIONS Validate optional warm-start inputs. + if ~OpEnTcpOptimizer.isOptionalVectorNumeric(solverOptions.InitialGuess) + error('OpEnTcpOptimizer:InvalidInitialGuess', ... + 'InitialGuess must be a numeric vector or [].'); + end + + if ~OpEnTcpOptimizer.isOptionalVectorNumeric(solverOptions.InitialLagrangeMultipliers) + error('OpEnTcpOptimizer:InvalidInitialLagrangeMultipliers', ... + 'InitialLagrangeMultipliers must be a numeric vector or [].'); + end + + if ~OpEnTcpOptimizer.isOptionalScalarNumeric(solverOptions.InitialPenalty) + error('OpEnTcpOptimizer:InvalidInitialPenalty', ... + 'InitialPenalty must be a numeric scalar or [].'); + end + end + + function solverOptions = emptySolverOptions() + %EMPTYSOLVEROPTIONS Return the default solve option bundle. + solverOptions = struct( ... + 'InitialGuess', [], ... + 'InitialLagrangeMultipliers', [], ... + 'InitialPenalty', []); + end + + function pairs = normalizeNameValuePairs(inputArgs, functionName) + %NORMALIZENAMEVALUEPAIRS Validate and normalize name-value pairs. + if isempty(inputArgs) + pairs = cell(0, 2); + return; + end + + if mod(numel(inputArgs), 2) ~= 0 + error('OpEnTcpOptimizer:InvalidNameValueInput', ... + '%s expects name-value arguments in pairs.', functionName); + end + + pairs = cell(numel(inputArgs) / 2, 2); + pairIdx = 1; + for i = 1:2:numel(inputArgs) + name = inputArgs{i}; + if ~OpEnTcpOptimizer.isTextScalar(name) + error('OpEnTcpOptimizer:InvalidNameValueInput', ... + 'Expected a text parameter name at argument position %d.', i); + end + + pairs{pairIdx, 1} = OpEnTcpOptimizer.textToChar(name); + pairs{pairIdx, 2} = inputArgs{i + 1}; + pairIdx = pairIdx + 1; + end + end + + function blocks = manifestParametersAsCell(parameters) + %MANIFESTPARAMETERSASCELL Normalize decoded parameter definitions. + if isempty(parameters) + blocks = {}; + return; + end + + if isstruct(parameters) + blocks = cell(1, numel(parameters)); + for i = 1:numel(parameters) + blocks{i} = parameters(i); + end + return; + end + + error('OpEnTcpOptimizer:InvalidManifest', ... + 'Manifest parameters must decode to a struct array.'); + end + + function vector = normalizeParameterBlock(value, expectedSize, parameterName) + %NORMALIZEPARAMETERBLOCK Normalize one OCP parameter block. + if expectedSize == 1 && isnumeric(value) && isscalar(value) && isfinite(value) + vector = double(value); + return; + end + + if ~OpEnTcpOptimizer.isVectorNumeric(value) + error('OpEnTcpOptimizer:InvalidOcpParameter', ... + 'Parameter "%s" must be a numeric vector.', parameterName); + end + + vector = reshape(double(value), 1, []); + if numel(vector) ~= double(expectedSize) + error('OpEnTcpOptimizer:InvalidOcpParameterDimension', ... + 'Parameter "%s" must have length %d.', parameterName, double(expectedSize)); + end + end + + function vector = toRowVector(value, argumentName) + %TOROWVECTOR Validate a numeric vector and serialize it as a row. + if ~OpEnTcpOptimizer.isVectorNumeric(value) + error('OpEnTcpOptimizer:InvalidVector', ... + '%s must be a numeric vector.', argumentName); + end + + vector = reshape(double(value), 1, []); + end + + function closeSocketQuietly(socket) + %CLOSESOCKETQUIETLY Best-effort socket close for cleanup paths. + if isempty(socket) + return; + end + + try + socket.close(); + catch + % Ignore close errors during cleanup. + end + end + + function [port, ip] = normalizeEndpointArguments(arg1, arg2) + %NORMALIZEENDPOINTARGUMENTS Support both (port, ip) and (ip, port). + if OpEnTcpOptimizer.isValidPort(arg1) && OpEnTcpOptimizer.isTextScalar(arg2) + port = arg1; + ip = arg2; + return; + end + + if OpEnTcpOptimizer.isTextScalar(arg1) && OpEnTcpOptimizer.isValidPort(arg2) + port = arg2; + ip = arg1; + return; + end + + error('OpEnTcpOptimizer:InvalidEndpoint', ... + ['Specify the endpoint as (port), (port, ip), or (ip, port), ' ... + 'where port is an integer in [1, 65535].']); + end + + function tf = isValidPort(value) + %ISVALIDPORT Validate a TCP port number. + tf = OpEnTcpOptimizer.isValidPositiveInteger(value) ... + && double(value) >= 1 && double(value) <= 65535; + end + + function tf = isValidTimeout(value) + %ISVALIDTIMEOUT Validate a positive timeout. + tf = isnumeric(value) && isscalar(value) && isfinite(value) && value > 0; + end + + function tf = isValidMaxResponseBytes(value) + %ISVALIDMAXRESPONSEBYTES Validate the maximum response size. + tf = OpEnTcpOptimizer.isValidPositiveInteger(value); + end + + function tf = isValidPositiveInteger(value) + %ISVALIDPOSITIVEINTEGER Validate a positive integer scalar. + tf = isnumeric(value) && isscalar(value) && isfinite(value) ... + && value == fix(value) && value > 0; + end + + function tf = isTextScalar(value) + %ISTEXTSCALAR True for character vectors and string scalars. + tf = ischar(value) || (isstring(value) && isscalar(value)); + end + + function value = textToChar(value) + %TEXTTOCHAR Convert a MATLAB text scalar to a character vector. + if isstring(value) + value = char(value); + end + end + + function tf = isVectorNumeric(value) + %ISVECTORNUMERIC True for finite numeric vectors. + tf = isnumeric(value) && isvector(value) && all(isfinite(value)); + end + + function tf = isOptionalVectorNumeric(value) + %ISOPTIONALVECTORNUMERIC True for [] or a numeric vector. + tf = isempty(value) || OpEnTcpOptimizer.isVectorNumeric(value); + end + + function tf = isOptionalScalarNumeric(value) + %ISOPTIONALSCALARNUMERIC True for [] or a finite numeric scalar. + tf = isempty(value) || (isnumeric(value) && isscalar(value) && isfinite(value)); + end + end +end diff --git a/matlab/api/createOpEnTcpOptimizer.m b/matlab/api/createOpEnTcpOptimizer.m new file mode 100644 index 00000000..fd1fb57a --- /dev/null +++ b/matlab/api/createOpEnTcpOptimizer.m @@ -0,0 +1,20 @@ +function client = createOpEnTcpOptimizer(varargin) +%CREATEOPENTCPOPTIMIZER Create a MATLAB TCP client for an OpEn optimizer. +% CLIENT = CREATEOPENTCPOPTIMIZER(PORT) connects to a TCP-enabled +% generated optimizer on 127.0.0.1:PORT. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(PORT, IP) connects to the specified +% IP address and port. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(IP, PORT) is also accepted. +% +% CLIENT = CREATEOPENTCPOPTIMIZER('ManifestPath', MANIFESTPATH) creates +% a manifest-aware OCP TCP client and tries to read the endpoint from the +% sibling ``optimizer.yml`` file. +% +% CLIENT = CREATEOPENTCPOPTIMIZER(..., Name, Value) forwards all +% remaining name-value pairs to the OPENTCPOPTIMIZER constructor. See +% "help OpEnTcpOptimizer" for the supported options and methods. + + client = OpEnTcpOptimizer(varargin{:}); +end diff --git a/matlab/@OpEnConstraints/OpEnConstraints.m b/matlab/legacy/@OpEnConstraints/OpEnConstraints.m similarity index 100% rename from matlab/@OpEnConstraints/OpEnConstraints.m rename to matlab/legacy/@OpEnConstraints/OpEnConstraints.m diff --git a/matlab/@OpEnOptimizer/OpEnOptimizer.m b/matlab/legacy/@OpEnOptimizer/OpEnOptimizer.m similarity index 100% rename from matlab/@OpEnOptimizer/OpEnOptimizer.m rename to matlab/legacy/@OpEnOptimizer/OpEnOptimizer.m diff --git a/matlab/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m b/matlab/legacy/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m similarity index 100% rename from matlab/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m rename to matlab/legacy/@OpEnOptimizerBuilder/OpEnOptimizerBuilder.m diff --git a/matlab/@OpEnOptimizerBuilder/build.m b/matlab/legacy/@OpEnOptimizerBuilder/build.m similarity index 100% rename from matlab/@OpEnOptimizerBuilder/build.m rename to matlab/legacy/@OpEnOptimizerBuilder/build.m diff --git a/matlab/legacy/README.md b/matlab/legacy/README.md new file mode 100644 index 00000000..3d49644a --- /dev/null +++ b/matlab/legacy/README.md @@ -0,0 +1,5 @@ +# MATLAB OpEn Interface + +This is the matlab interface of **Optimization Engine**. + +Read the [detailed documentation ](https://alphaville.github.io/optimization-engine/docs/matlab-interface). \ No newline at end of file diff --git a/matlab/examples/example_open_1.m b/matlab/legacy/examples/example_open_1.m similarity index 100% rename from matlab/examples/example_open_1.m rename to matlab/legacy/examples/example_open_1.m diff --git a/matlab/examples/example_open_lv.m b/matlab/legacy/examples/example_open_lv.m similarity index 100% rename from matlab/examples/example_open_lv.m rename to matlab/legacy/examples/example_open_lv.m diff --git a/matlab/examples/example_open_nav.m b/matlab/legacy/examples/example_open_nav.m similarity index 100% rename from matlab/examples/example_open_nav.m rename to matlab/legacy/examples/example_open_nav.m diff --git a/matlab/helpers/casadi_generate_c_code.m b/matlab/legacy/helpers/casadi_generate_c_code.m similarity index 100% rename from matlab/helpers/casadi_generate_c_code.m rename to matlab/legacy/helpers/casadi_generate_c_code.m diff --git a/matlab/helpers/rosenbrock.m b/matlab/legacy/helpers/rosenbrock.m similarity index 100% rename from matlab/helpers/rosenbrock.m rename to matlab/legacy/helpers/rosenbrock.m diff --git a/matlab/matlab_open_root.m b/matlab/legacy/matlab_open_root.m similarity index 100% rename from matlab/matlab_open_root.m rename to matlab/legacy/matlab_open_root.m diff --git a/matlab/private/codegen_get_cache.txt b/matlab/legacy/private/codegen_get_cache.txt similarity index 100% rename from matlab/private/codegen_get_cache.txt rename to matlab/legacy/private/codegen_get_cache.txt diff --git a/matlab/private/codegen_head.txt b/matlab/legacy/private/codegen_head.txt similarity index 100% rename from matlab/private/codegen_head.txt rename to matlab/legacy/private/codegen_head.txt diff --git a/matlab/private/codegen_main_2.txt b/matlab/legacy/private/codegen_main_2.txt similarity index 100% rename from matlab/private/codegen_main_2.txt rename to matlab/legacy/private/codegen_main_2.txt diff --git a/matlab/private/codegen_main_3.txt b/matlab/legacy/private/codegen_main_3.txt similarity index 100% rename from matlab/private/codegen_main_3.txt rename to matlab/legacy/private/codegen_main_3.txt diff --git a/matlab/private/codegen_main_fn_def.txt b/matlab/legacy/private/codegen_main_fn_def.txt similarity index 100% rename from matlab/private/codegen_main_fn_def.txt rename to matlab/legacy/private/codegen_main_fn_def.txt diff --git a/matlab/setup_open.m b/matlab/legacy/setup_open.m similarity index 100% rename from matlab/setup_open.m rename to matlab/legacy/setup_open.m diff --git a/open-codegen/.gitignore b/open-codegen/.gitignore deleted file mode 100644 index 104b7eb7..00000000 --- a/open-codegen/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -__pycache__ -venv -.idea -*.pyc -build -dist -x4356 -TOKEN \ No newline at end of file diff --git a/open-codegen/VERSION b/open-codegen/VERSION deleted file mode 100644 index 71172b43..00000000 --- a/open-codegen/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.10.1 \ No newline at end of file diff --git a/open-codegen/opengen/__init__.py b/open-codegen/opengen/__init__.py deleted file mode 100644 index d7cedb79..00000000 --- a/open-codegen/opengen/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -import opengen.definitions -import opengen.builder -import opengen.config -import opengen.functions -import opengen.constraints -import opengen.tcp -import opengen.ocp diff --git a/open-codegen/opengen/builder/__init__.py b/open-codegen/opengen/builder/__init__.py deleted file mode 100644 index ceb1edb6..00000000 --- a/open-codegen/opengen/builder/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .optimizer_builder import * -from .problem import * -from .set_y_calculator import * diff --git a/open-codegen/opengen/builder/ros_builder.py b/open-codegen/opengen/builder/ros_builder.py deleted file mode 100644 index 0108a8a5..00000000 --- a/open-codegen/opengen/builder/ros_builder.py +++ /dev/null @@ -1,222 +0,0 @@ -import opengen.definitions as og_dfn - -import os -import logging -import jinja2 -import shutil -import datetime - -_ROS_PREFIX = 'ros_node_' - - -def make_dir_if_not_exists(directory): - if not os.path.exists(directory): - os.makedirs(directory) - - -def get_template(name): - file_loader = jinja2.FileSystemLoader(og_dfn.templates_dir()) - env = jinja2.Environment(loader=file_loader, autoescape=True) - return env.get_template(name) - - -def get_ros_template(name): - file_loader = jinja2.FileSystemLoader(og_dfn.templates_subdir('ros')) - env = jinja2.Environment(loader=file_loader, autoescape=True) - return env.get_template(name) - - -class RosBuilder: - """ - Code generation for ROS-related files - - For internal use - """ - - def __init__(self, meta, build_config, solver_config): - self.__meta = meta - self.__build_config = build_config - self.__solver_config = solver_config - self.__logger = logging.getLogger('opengen.builder.RosBuilder') - stream_handler = logging.StreamHandler() - stream_handler.setLevel(1) - c_format = logging.Formatter('[%(levelname)s] <> %(message)s') - stream_handler.setFormatter(c_format) - self.__logger.setLevel(1) - self.__logger.addHandler(stream_handler) - - def __target_dir(self): - return os.path.abspath( - os.path.join( - self.__build_config.build_dir, - self.__meta.optimizer_name)) - - def __ros_target_dir(self): - ros_config = self.__build_config.ros_config - ros_target_dir_name = ros_config.package_name - return os.path.abspath( - os.path.join( - self.__build_config.build_dir, - self.__meta.optimizer_name, ros_target_dir_name)) - - def __generate_ros_dir_structure(self): - self.__logger.info("Generating directory structure") - target_ros_dir = self.__ros_target_dir() - make_dir_if_not_exists(target_ros_dir) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'include'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'extern_lib'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'src'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'msg'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'config'))) - make_dir_if_not_exists(os.path.abspath( - os.path.join(target_ros_dir, 'launch'))) - - def __generate_ros_package_xml(self): - self.__logger.info("Generating package.xml") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('package.xml') - output_template = template.render( - meta=self.__meta, ros=self.__build_config.ros_config) - target_rospkg_path = os.path.join(target_ros_dir, "package.xml") - with open(target_rospkg_path, "w") as fh: - fh.write(output_template) - - def __generate_ros_cmakelists(self): - self.__logger.info("Generating CMakeLists") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('CMakeLists.txt') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) - target_rospkg_path = os.path.join(target_ros_dir, "CMakeLists.txt") - with open(target_rospkg_path, "w") as fh: - fh.write(output_template) - - def __copy__ros_files(self): - self.__logger.info("Copying external dependencies") - # 1. --- copy header file - target_ros_dir = self.__ros_target_dir() - header_file_name = self.__meta.optimizer_name + '_bindings.hpp' - target_include_filename = os.path.abspath( - os.path.join( - target_ros_dir, 'include', header_file_name)) - original_include_file = os.path.abspath( - os.path.join(self.__target_dir(), header_file_name)) - shutil.copyfile(original_include_file, target_include_filename) - - # 2. --- copy library file - lib_file_name = 'lib' + self.__meta.optimizer_name + '.a' - target_lib_file_name = \ - os.path.abspath( - os.path.join( - target_ros_dir, 'extern_lib', lib_file_name)) - original_lib_file = os.path.abspath( - os.path.join( - self.__target_dir(), - 'target', - self.__build_config.build_mode, - lib_file_name)) - shutil.copyfile(original_lib_file, target_lib_file_name) - - # 3. --- copy msg file OptimizationParameters.msg - original_params_msg = os.path.abspath( - os.path.join( - og_dfn.templates_dir(), 'ros', 'OptimizationParameters.msg')) - target_params_msg = \ - os.path.abspath( - os.path.join( - target_ros_dir, 'msg', 'OptimizationParameters.msg')) - shutil.copyfile(original_params_msg, target_params_msg) - - # 4. --- copy msg file OptimizationResult.msg - original_result_msg = os.path.abspath( - os.path.join( - og_dfn.templates_dir(), 'ros', 'OptimizationResult.msg')) - target_result_msg = \ - os.path.abspath( - os.path.join( - target_ros_dir, 'msg', 'OptimizationResult.msg')) - shutil.copyfile(original_result_msg, target_result_msg) - - def __generate_ros_params_file(self): - self.__logger.info("Generating open_params.yaml") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_params.yaml') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) - target_yaml_fname \ - = os.path.join(target_ros_dir, "config", "open_params.yaml") - with open(target_yaml_fname, "w") as fh: - fh.write(output_template) - - def __generate_ros_node_header(self): - self.__logger.info("Generating open_optimizer.hpp") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.hpp') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config, - solver_config=self.__solver_config) - target_rosnode_header_path \ - = os.path.join(target_ros_dir, "include", "open_optimizer.hpp") - with open(target_rosnode_header_path, "w") as fh: - fh.write(output_template) - - def __generate_ros_node_cpp(self): - self.__logger.info("Generating open_optimizer.cpp") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.cpp') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config, - timestamp_created=datetime.datetime.now()) - target_rosnode_cpp_path \ - = os.path.join(target_ros_dir, "src", "open_optimizer.cpp") - with open(target_rosnode_cpp_path, "w") as fh: - fh.write(output_template) - - def __generate_ros_launch_file(self): - self.__logger.info("Generating open_optimizer.launch") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('open_optimizer.launch') - output_template = template.render(meta=self.__meta, - ros=self.__build_config.ros_config) - target_rosnode_launch_path \ - = os.path.join(target_ros_dir, "launch", "open_optimizer.launch") - with open(target_rosnode_launch_path, "w") as fh: - fh.write(output_template) - - def __generate_ros_readme_file(self): - self.__logger.info("Generating README.md") - target_ros_dir = self.__ros_target_dir() - template = get_ros_template('README.md') - output_template = template.render( - ros=self.__build_config.ros_config) - target_readme_path \ - = os.path.join(target_ros_dir, "README.md") - with open(target_readme_path, "w") as fh: - fh.write(output_template) - - def __symbolic_link_info_message(self): - target_ros_dir = self.__ros_target_dir() - self.__logger.info("ROS package was built successfully. Now run:") - self.__logger.info("ln -s %s ~/catkin_ws/src/", target_ros_dir) - self.__logger.info("cd ~/catkin_ws/; catkin_make") - - def build(self): - """ - Build ROS-related files - """ - self.__generate_ros_dir_structure() # generate necessary folders - self.__generate_ros_package_xml() # generate package.xml - self.__generate_ros_cmakelists() # generate CMakeLists.txt - self.__copy__ros_files() # Copy certain files - # # - C++ bindings, library, msg - self.__generate_ros_params_file() # generate params file - self.__generate_ros_node_header() # generate node .hpp file - self.__generate_ros_node_cpp() # generate main node .cpp file - self.__generate_ros_launch_file() # generate launch file - self.__generate_ros_readme_file() # final touch: create README.md - self.__symbolic_link_info_message() # Info: create symbolic link diff --git a/open-codegen/opengen/config/__init__.py b/open-codegen/opengen/config/__init__.py deleted file mode 100644 index 3b81ad6b..00000000 --- a/open-codegen/opengen/config/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .meta import * -from .solver_config import * -from .build_config import * -from .tcp_server_config import * -from .ros_config import * diff --git a/open-codegen/opengen/constraints/__init__.py b/open-codegen/opengen/constraints/__init__.py deleted file mode 100644 index e361c496..00000000 --- a/open-codegen/opengen/constraints/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from .ball1 import * -from .ball2 import * -from .sphere2 import * -from .rectangle import * -from .constraint import * -from .ball_inf import * -from .soc import * -from .no_constraints import * -from .cartesian import * -from .zero import * -from .finite_set import * -from .halfspace import * -from .simplex import * -from .affine_space import * diff --git a/open-codegen/opengen/functions/__init__.py b/open-codegen/opengen/functions/__init__.py deleted file mode 100644 index 2f8b3339..00000000 --- a/open-codegen/opengen/functions/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .rosenbrock import * -from .fmin import * -from .fmax import * -from .is_symbolic import * -from .is_numeric import * -from .sign import * -from .norm2 import * -from .fabs import * -from .norm2_squared import * diff --git a/open-codegen/opengen/tcp/__init__.py b/open-codegen/opengen/tcp/__init__.py deleted file mode 100644 index 49783d98..00000000 --- a/open-codegen/opengen/tcp/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .optimizer_tcp_manager import * -from .solver_status import * -from .solver_error import * -from .solver_response import * diff --git a/open-codegen/opengen/templates/c/example_cmakelists.txt b/open-codegen/opengen/templates/c/example_cmakelists.txt deleted file mode 100644 index 13a79032..00000000 --- a/open-codegen/opengen/templates/c/example_cmakelists.txt +++ /dev/null @@ -1,19 +0,0 @@ -cmake_minimum_required(VERSION 3.5) - -# Project name -project({{meta.optimizer_name}}) - -# Add the executable -add_executable(optimizer example_optimizer.c) - -# Add libraries to the executable -target_link_libraries(optimizer ${CMAKE_SOURCE_DIR}/target/{{ build_config.build_mode }}/lib{{meta.optimizer_name}}.a) -target_link_libraries(optimizer m) -target_link_libraries(optimizer dl) -target_link_libraries(optimizer pthread) - -add_custom_target(run - COMMAND optimizer - DEPENDS optimizer - WORKING_DIRECTORY ${CMAKE_PROJECT_DIR} -) \ No newline at end of file diff --git a/open-codegen/opengen/templates/python/python_bindings.rs b/open-codegen/opengen/templates/python/python_bindings.rs deleted file mode 100644 index 40d5a18a..00000000 --- a/open-codegen/opengen/templates/python/python_bindings.rs +++ /dev/null @@ -1,143 +0,0 @@ -/// -/// Auto-generated python bindings for optimizer: {{ meta.optimizer_name }} -/// -use optimization_engine::alm::*; - -use pyo3::prelude::*; -use pyo3::wrap_pyfunction; - -use {{ meta.optimizer_name }}::*; - -#[pymodule] -fn {{ meta.optimizer_name }}(_py: Python, m: &PyModule) -> PyResult<()> { - m.add_function(wrap_pyfunction!(solver, m)?)?; - m.add_class::()?; - m.add_class::()?; - Ok(()) -} - -#[pyfunction] -fn solver() -> PyResult { - let cache = initialize_solver(); - Ok(Solver { cache }) -} - -/// Solution and solution status of optimizer -#[pyclass] -struct OptimizerSolution { - #[pyo3(get)] - exit_status: String, - #[pyo3(get)] - num_outer_iterations: usize, - #[pyo3(get)] - num_inner_iterations: usize, - #[pyo3(get)] - last_problem_norm_fpr: f64, - #[pyo3(get)] - f1_infeasibility: f64, - #[pyo3(get)] - f2_norm: f64, - #[pyo3(get)] - solve_time_ms: f64, - #[pyo3(get)] - penalty: f64, - #[pyo3(get)] - solution: Vec, - #[pyo3(get)] - lagrange_multipliers: Vec, - #[pyo3(get)] - cost: f64, -} - -#[pyclass] -struct Solver { - cache: AlmCache, -} - -#[pymethods] -impl Solver { - /// Run solver - /// - #[text_signature = "($self, p, initial_guess, initial_y, initial_penalty)"] - fn run( - &mut self, - p: Vec, - initial_guess: Option>, - initial_lagrange_multipliers: Option>, - initial_penalty: Option, - ) -> PyResult> { - let mut u = [0.0; {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES]; - - // ---------------------------------------------------- - // Set initial value - // ---------------------------------------------------- - if let Some(u0) = initial_guess { - if u0.len() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES { - println!( - "1600 -> Initial guess has incompatible dimensions: {} != {}", - u0.len(), - {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES - ); - return Ok(None); - } - u.copy_from_slice(&u0); - } - - // ---------------------------------------------------- - // Check lagrange multipliers - // ---------------------------------------------------- - if let Some(y0) = &initial_lagrange_multipliers { - if y0.len() != {{meta.optimizer_name|upper}}_N1 { - println!( - "1700 -> wrong dimension of Langrange multipliers: {} != {}", - y0.len(), - {{meta.optimizer_name|upper}}_N1 - ); - return Ok(None); - } - } - - // ---------------------------------------------------- - // Check parameter - // ---------------------------------------------------- - if p.len() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS { - println!( - "3003 -> wrong number of parameters: {} != {}", - p.len(), - {{meta.optimizer_name|upper}}_NUM_PARAMETERS - ); - return Ok(None); - } - - // ---------------------------------------------------- - // Run solver - // ---------------------------------------------------- - let solver_status = solve( - &p, - &mut self.cache, - &mut u, - &initial_lagrange_multipliers, - &initial_penalty, - ); - - match solver_status { - Ok(status) => Ok(Some(OptimizerSolution { - exit_status: format!("{:?}", status.exit_status()), - num_outer_iterations: status.num_outer_iterations(), - num_inner_iterations: status.num_inner_iterations(), - last_problem_norm_fpr: status.last_problem_norm_fpr(), - f1_infeasibility: status.delta_y_norm_over_c(), - f2_norm: status.f2_norm(), - penalty: status.penalty(), - lagrange_multipliers: status.lagrange_multipliers().clone().unwrap_or_default(), - solve_time_ms: (status.solve_time().as_nanos() as f64) / 1e6, - solution: u.to_vec(), - cost: status.cost(), - })), - Err(_) => { - println!("2000 -> Problem solution failed (solver error)"); - Ok(None) - } - } - } -} diff --git a/open-codegen/test/__init__.py b/open-codegen/test/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/open-codegen/test/test.py b/open-codegen/test/test.py deleted file mode 100644 index d56f3075..00000000 --- a/open-codegen/test/test.py +++ /dev/null @@ -1,571 +0,0 @@ -import os -import unittest -import casadi.casadi as cs -import opengen as og -import subprocess -import logging -import numpy as np - - -class RustBuildTestCase(unittest.TestCase): - - TEST_DIR = ".python_test_build" - - @staticmethod - def get_open_local_absolute_path(): - cwd = os.getcwd() - return cwd.split('open-codegen')[0] - - # Which version of OpEn Rust library to test against - OPEN_RUSTLIB_VERSION = "*" - - @classmethod - def solverConfig(cls): - solver_config = og.config.SolverConfiguration() \ - .with_lbfgs_memory(15) \ - .with_tolerance(1e-4) \ - .with_initial_tolerance(1e-4) \ - .with_delta_tolerance(1e-4) \ - .with_initial_penalty(15.0) \ - .with_penalty_weight_update_factor(10.0) \ - .with_max_inner_iterations(155) \ - .with_max_duration_micros(1e8) \ - .with_max_outer_iterations(50) \ - .with_sufficient_decrease_coefficient(0.05) \ - .with_cbfgs_parameters(1.5, 1e-10, 1e-12) \ - .with_preconditioning(False) - return solver_config - - @classmethod - def setUpPythonBindings(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - phi = og.functions.rosenbrock(u, p) # cost function - bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("python_bindings") - problem = og.builder.Problem(u, p, phi) \ - .with_constraints(bounds) - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE)\ - .with_build_python_bindings() - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=cls.solverConfig()) \ - .build() - - @classmethod - def setUpOnlyF1(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - f1 = cs.vertcat(1.5 * u[0] - u[1], u[2] - u[3]) - set_c = og.constraints.Rectangle( - xmin=[-0.01, -0.01], xmax=[0.02, 0.03]) - phi = og.functions.rosenbrock(u, p) # cost function - bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin - tcp_config = og.config.TcpServerConfiguration(bind_port=3301) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("only_f1") - problem = og.builder.Problem(u, p, phi) \ - .with_aug_lagrangian_constraints(f1, set_c) \ - .with_constraints(bounds) - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_tcp_interface_config(tcp_interface_config=tcp_config) \ - .with_build_c_bindings() \ - .with_allocator(og.config.RustAllocator.JemAlloc) - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=cls.solverConfig()) \ - .build() - - @classmethod - def setUpOnlyF2(cls, is_preconditioned=False): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - f2 = cs.vertcat(0.2 + 1.5 * u[0] - u[1], u[2] - u[3] - 0.1) - phi = og.functions.rosenbrock(u, p) - bounds = og.constraints.Ball2(None, 1.5) - tcp_config = og.config.TcpServerConfiguration( - bind_port=3302 if not is_preconditioned else 3309) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("only_f2" + ("_precond" if is_preconditioned else "")) - problem = og.builder.Problem(u, p, phi) \ - .with_penalty_constraints(f2) \ - .with_constraints(bounds) - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_tcp_interface_config(tcp_interface_config=tcp_config) \ - .with_build_c_bindings() - slv_cfg = og.config.SolverConfiguration() \ - .with_tolerance(1e-6) \ - .with_initial_tolerance(1e-4) \ - .with_delta_tolerance(1e-5) \ - .with_penalty_weight_update_factor(10.0) \ - .with_max_inner_iterations(1000) \ - .with_max_outer_iterations(50) \ - .with_preconditioning(is_preconditioned) - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=slv_cfg) \ - .build() - - @classmethod - def setUpPlain(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - phi = og.functions.rosenbrock(u, p) - bounds = og.constraints.Ball2(None, 1.5) - tcp_config = og.config.TcpServerConfiguration(bind_port=4598) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("plain") - problem = og.builder.Problem(u, p, phi) \ - .with_constraints(bounds) - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_tcp_interface_config(tcp_interface_config=tcp_config) \ - .with_build_c_bindings() - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=cls.solverConfig()) \ - .build() - - @classmethod - def setUpRosPackageGeneration(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 2) # parameter (np = 2) - phi = og.functions.rosenbrock(u, p) - c = cs.vertcat(1.5 * u[0] - u[1], - cs.fmax(0.0, u[2] - u[3] + 0.1)) - bounds = og.constraints.Ball2(None, 1.5) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("rosenbrock_ros") - problem = og.builder.Problem(u, p, phi) \ - .with_constraints(bounds) \ - .with_penalty_constraints(c) - ros_config = og.config.RosConfiguration() \ - .with_package_name("parametric_optimizer") \ - .with_node_name("open_node") \ - .with_rate(35) \ - .with_description("really cool ROS node") - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_build_c_bindings() \ - .with_ros(ros_config) - og.builder.OpEnOptimizerBuilder(problem, - metadata=meta, - build_configuration=build_config, - solver_configuration=cls.solverConfig()) \ - .build() - - @classmethod - def setUpOnlyParametricF2(cls): - u = cs.MX.sym("u", 5) # decision variable (nu = 5) - p = cs.MX.sym("p", 3) # parameter (np = 3) - f2 = u[0] - p[2] - phi = og.functions.rosenbrock( - u, cs.vertcat(p[0], p[1])) # cost function - bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin - tcp_config = og.config.TcpServerConfiguration(bind_port=4599) - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("parametric_f2") - problem = og.builder.Problem(u, p, phi) \ - .with_penalty_constraints(f2) \ - .with_constraints(bounds) - build_config = og.config.BuildConfiguration() \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_tcp_interface_config(tcp_interface_config=tcp_config) \ - .with_build_c_bindings() - solver_config = og.config.SolverConfiguration() \ - .with_tolerance(1e-6) \ - .with_initial_tolerance(1e-4) \ - .with_delta_tolerance(1e-5) \ - .with_penalty_weight_update_factor(5) - og.builder.OpEnOptimizerBuilder( - problem, meta, build_config, solver_config).build() - - @classmethod - def setUpHalfspace(cls): - u = cs.SX.sym("u", 5) # decision variable (nu = 5) - p = cs.SX.sym("p", 2) # parameter (np = 2) - phi = cs.dot(u, u) # cost function - - bounds = og.constraints.Halfspace([1., 2., 1., 5., 2.], -10.39) - - problem = og.builder.Problem(u, p, phi) \ - .with_constraints(bounds) - - meta = og.config.OptimizerMeta() \ - .with_optimizer_name("halfspace_optimizer") - - tcp_config = og.config.TcpServerConfiguration(bind_port=3305) - build_config = og.config.BuildConfiguration() \ - .with_build_directory(RustBuildTestCase.TEST_DIR) \ - .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ - .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ - .with_tcp_interface_config(tcp_interface_config=tcp_config) - - builder = og.builder.OpEnOptimizerBuilder(problem, - meta, - build_config, - cls.solverConfig()) - builder.build() - - @classmethod - def setUpClass(cls): - cls.setUpPythonBindings() - cls.setUpRosPackageGeneration() - cls.setUpOnlyF1() - cls.setUpOnlyF2() - cls.setUpOnlyF2(is_preconditioned=True) - cls.setUpPlain() - cls.setUpOnlyParametricF2() - cls.setUpHalfspace() - - def test_python_bindings(self): - import sys - import os - - # include the target directory into the path... - sys.path.insert(1, os.path.join( - RustBuildTestCase.TEST_DIR, "python_bindings")) - import python_bindings # import python_bindings.so - - solver = python_bindings.solver() - # returns object of type OptimizerSolution - result = solver.run([1., 2.]) - self.assertIsNotNone(result.solution) - - def test_rectangle_empty(self): - xmin = [-1, 2] - xmax = [-2, 4] - with self.assertRaises(Exception) as __context: - og.constraints.Rectangle(xmin, xmax) - - def test_rectangle_incompatible_dimensions(self): - xmin = [-1, -1, 1] - xmax = [1, 1] - with self.assertRaises(Exception) as __context: - og.constraints.Rectangle(xmin, xmax) - - def test_rectangle_both_none(self): - with self.assertRaises(Exception) as __context: - og.constraints.Rectangle(None, None) - - def test_ball_negative_radius(self): - with self.assertRaises(Exception) as __context: - og.constraints.Ball2(None, -1) - - def test_solver_config_wrong_max_duration(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_max_duration_micros(0) - - def test_solver_config_wrong_update_factor(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_penalty_weight_update_factor(0.5) - - def test_solver_config_wrong_outer_iterations(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_max_outer_iterations(0) - - def test_solver_config_wrong_inner_iterations(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_max_inner_iterations(0) - - def test_solver_config_wrong_constraints_tolerance(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_delta_tolerance(0) - - def test_solver_config_wrong_inner_tolerance(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_tolerance(0) - - def test_solver_config_wrong_lbfgs_memory(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_lbfgs_memory(1) - - def test_solver_config_wrong_max_inner_iterations(self): - with self.assertRaises(Exception) as __context: - og.config.SolverConfiguration().with_max_inner_iterations() - - def test_start_multiple_servers(self): - all_managers = [] - for i in range(10): - all_managers += [og.tcp.OptimizerTcpManager( - optimizer_path=RustBuildTestCase.TEST_DIR + '/only_f1', - ip='0.0.0.0', - port=15311+i)] - - # Start all servers - for m in all_managers: - m.start() - - # Ping all - for m in all_managers: - m.ping() - - # Kill all - for m in all_managers: - m.kill() - - def test_rust_build_only_f1(self): - # Start the server using a custom bind IP and port - mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1', - ip='0.0.0.0', - port=13757) - mng.start() - pong = mng.ping() # check if the server is alive - self.assertEqual(1, pong["Pong"]) - - # Regular call - response = mng.call(p=[2.0, 10.0]) - self.assertEqual("Converged", response["exit_status"]) - - # Call with initial params, initial y and initial penalty param - response = mng.call(p=[2.0, 10.0], - initial_guess=response["solution"], - initial_y=response["lagrange_multipliers"], - initial_penalty=response["penalty"]) - self.assertTrue(response.is_ok()) - status = response.get() - self.assertEqual(2, status.num_outer_iterations) - - response = mng.call(p=[2.0, 10.0, 50.0]) - status = response.get() - self.assertFalse(response.is_ok()) - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(3003, status.code) - - response = mng.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1600, status.code) - - response = mng.call(p=[2.0, 10.0], initial_y=[0.1]) - status = response.get() - self.assertFalse(response.is_ok()) - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1700, status.code) - - mng.kill() - - def test_rust_build_only_f2_preconditioned(self): - mng1 = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/only_f2') - mng2 = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/only_f2_precond') - mng1.start() - mng2.start() - - try: - response1 = mng1.call(p=[0.5, 8.5], initial_guess=[ - 1, 2, 3, 4, 0]).get() - response2 = mng2.call(p=[0.5, 8.5], initial_guess=[ - 1, 2, 3, 4, 0]).get() - - self.assertEqual("Converged", response1.exit_status) - self.assertEqual("Converged", response2.exit_status) - - # Further testing - slv_cfg = RustBuildTestCase.solverConfig() - # check that the solution is (near-) feasible - self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance) - self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance) - # check the nrom of the FPR - self.assertTrue(response1.last_problem_norm_fpr < - slv_cfg.tolerance) - self.assertTrue(response2.last_problem_norm_fpr < - slv_cfg.tolerance) - # compare the costs - self.assertAlmostEqual(response1.cost, response2.cost, 4) - - x1, x2 = response1.solution, response2.solution - for i in range(len(x1)): - self.assertAlmostEqual(x1[i], x2[i], delta=5e-4) - - response = mng1.call(p=[2.0, 10.0, 50.0]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(3003, status.code) - - response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1600, status.code) - - response = mng1.call(p=[2.0, 10.0], initial_y=[0.1]) - self.assertFalse(response.is_ok()) - status = response.get() - self.assertEqual(True, isinstance(status, og.tcp.SolverError)) - self.assertEqual(1700, status.code) - finally: - mng1.kill() - mng2.kill() - - def test_rust_build_plain(self): - mng = og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain') - mng.start() - pong = mng.ping() # check if the server is alive - self.assertEqual(1, pong["Pong"]) - - # Regular call - response = mng.call(p=[2.0, 10.0]) - self.assertTrue(response.is_ok()) - status = response.get() - self.assertEqual("Converged", status.exit_status) - - mng.kill() - - def test_rust_build_parametric_f2(self): - # introduced to tackle issue #123 - mng = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/parametric_f2') - mng.start() - pong = mng.ping() # check if the server is alive - self.assertEqual(1, pong["Pong"]) - - # Regular call - response = mng.call(p=[1.0, 1.0, 0.5]) - self.assertTrue(response.is_ok()) - status = response.get() - self.assertEqual("Converged", status.exit_status) - self.assertTrue(status.f2_norm < 1e-4) - mng.kill() - - def test_rust_build_parametric_halfspace(self): - mng = og.tcp.OptimizerTcpManager( - RustBuildTestCase.TEST_DIR + '/halfspace_optimizer') - mng.start() - pong = mng.ping() # check if the server is alive - self.assertEqual(1, pong["Pong"]) - - # Regular call - response = mng.call(p=[1.0, 1.0]) - self.assertTrue(response.is_ok()) - status = response.get() - self.assertEqual("Converged", status.exit_status) - u = status.solution - c = [1., 2., 1., 5., 2.] - b = -10.39 - eps = 1e-14 - self.assertTrue(sum([u[i] * c[i] for i in range(5)]) - b <= eps) - self.assertTrue(-sum([u[i] * c[i] for i in range(5)]) + b <= eps) - - mng.kill() - - @staticmethod - def c_bindings_helper(optimizer_name): - p = subprocess.Popen(["/usr/bin/gcc", - RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/example_optimizer.c", - "-I" + RustBuildTestCase.TEST_DIR + "/" + optimizer_name, - "-pthread", - RustBuildTestCase.TEST_DIR + "/" + optimizer_name + - "/target/debug/lib" + optimizer_name + ".a", - "-lm", - "-ldl", - "-std=c99", - "-o", - RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/optimizer"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - # Make sure it compiles - p.communicate() - rc1 = p.returncode - - # Run the optimizer - p = subprocess.Popen([RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/optimizer"], - stdout=subprocess.DEVNULL) - p.communicate() - rc2 = p.returncode - - return rc1, rc2 - - def test_c_bindings(self): - rc1, rc2 = RustBuildTestCase.c_bindings_helper( - optimizer_name="only_f1") - self.assertEqual(0, rc1) - self.assertEqual(0, rc2) - - rc1, rc2 = RustBuildTestCase.c_bindings_helper( - optimizer_name="only_f2") - self.assertEqual(0, rc1) - self.assertEqual(0, rc2) - - rc1, rc2 = RustBuildTestCase.c_bindings_helper(optimizer_name="plain") - self.assertEqual(0, rc1) - self.assertEqual(0, rc2) - - def test_tcp_manager_remote_cannot_start(self): - remote_tcp_manager = og.tcp.OptimizerTcpManager( - ip='10.8.0.1', port=3345) - with self.assertRaises(Exception) as __context: - remote_tcp_manager.start() - - def test_tcp_manager_remote_ip_no_port(self): - with self.assertRaises(Exception) as __context: - _remote_tcp_manager = og.tcp.OptimizerTcpManager(ip='10.8.0.1') - - def test_tcp_manager_remote_port_no_ip(self): - with self.assertRaises(Exception) as __context: - _remote_tcp_manager = og.tcp.OptimizerTcpManager(port=8888) - - def test_set_y(self): - c = og.constraints.Ball2(radius=1) - y_calc = og.builder.SetYCalculator(c) - y = y_calc.obtain() - - def test_squared_norm(self): - u = np.array([3, 4]) - y = og.functions.norm2_squared(u) - self.assertAlmostEqual(25., y, places=12) - - u = [3, 4] - y = og.functions.norm2_squared(u) - self.assertAlmostEqual(25., y, places=12) - - u = cs.SX.sym("u", 2) - f = og.functions.norm2_squared(u) - fun = cs.Function('fun', [u], [f]) - y = fun([3, 4]) - self.assertAlmostEqual(25., y, places=12) - - def test_optimizer_meta_valid_version(self): - meta = og.config.OptimizerMeta().with_version("1.2.3-alpha.1+build.5") - self.assertEqual("1.2.3-alpha.1+build.5", meta.version) - - def test_optimizer_meta_invalid_version1(self): - with self.assertRaises(ValueError) as context: - og.config.OptimizerMeta().with_version("^1.2") - - self.assertIn("Cargo package version", str(context.exception)) - - def test_optimizer_meta_invalid_version2(self): - with self.assertRaises(ValueError) as context: - og.config.OptimizerMeta().with_version("0.1") - - self.assertIn("Cargo package version", str(context.exception)) - - -if __name__ == '__main__': - logging.getLogger('retry').setLevel(logging.ERROR) - unittest.main() diff --git a/python/.gitignore b/python/.gitignore new file mode 100644 index 00000000..7202b60e --- /dev/null +++ b/python/.gitignore @@ -0,0 +1,18 @@ +.eggs/ +__pycache__/ +*.pyc +*.egg-info + +.pytest_cache/ +.python_test_build/ +.python_test_build_ocp/ + +build/ +dist/ +htmlcov/ +my_optimizers/ +opengen.egg-info/ +opengen/icasadi/extern/Makefile +run/ +venv*/ +virt/ diff --git a/open-codegen/CHANGELOG.md b/python/CHANGELOG.md similarity index 73% rename from open-codegen/CHANGELOG.md rename to python/CHANGELOG.md index 75b7a86d..cbda879e 100644 --- a/open-codegen/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -8,6 +8,39 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Note: This is the Changelog file of `opengen` - the Python interface of OpEn +## [0.11.0] - 31 March 2026 + +### Added + +- ROS2 package generation support via `BuildConfiguration.with_ros2(...)`, including auto-generated ROS2 templates, launcher, messages, and package wrapper code +- Dedicated ROS2 tests covering package generation, build configuration behavior, rendered custom package settings, and end-to-end execution of a generated ROS2 node +- More informative TCP solver error payloads, including clearer dimension/parameter validation failures and propagated solver-side failure messages +- Additional unit tests for `BuildConfiguration`, `OcpSolution`, `AffineSpace`, `RosConfiguration`, and `SetYCalculator`, increasing Python coverage to 93% + +### Changed + +- Extended `RosConfiguration` so it can be used for both ROS and ROS2 package generation +- Breaking change: the direct interface (Python bindings) now has an API which mirrors that of the TCP interface: the method `solve` returns either a solution or an error object. Website documentation is updated. New unit tests are implemented. Note that `solver.run()` does not return the solution object directly, but rather works in the same way as the TCP interface: it returns a response object (instance of `SolverResponse`), on which the method `.get()` returns either a `SolverStatus` or `SolverError`. +- Added helpful `__repr__` methods to generated Python binding response/status/error objects, TCP solver response/error objects, and `GeneratedOptimizer` for easier inspection and debugging +- Updated generated TCP server and C interface templates to work with the richer Rust solver error model and expose better failure information to clients. Updated auto-generated `CMakeLists.txt` file. Tighter unit tests. +- ROS2 generated packages now publish detailed `error_code` and `error_message` fields, plus `STATUS_INVALID_REQUEST`, so invalid requests and solver failures are reported explicitly instead of being silently ignored +- Extended GitHub Actions CI to run Python, OCP, and generated-code tests on Windows, and fixed multiple Windows-specific code generation, path, encoding, TCP, and C/CMake compatibility issues. +- ROS/ROS2 messages: using `uint64` instead of `uint8` to avoid overflow +- Tighter checks of provided arguments in build configuration +- Added `pytest` to the optional `dev` dependencies and documented local test, benchmark, and coverage workflows for Python and Rust contributors +- Restructured folder structure of entire repo: opengen moved to folder `python` + +### Fixed + +- Generated ROS and ROS2 wrappers now accept any meaningful positive `initial_penalty` instead of requiring values greater than `1.0` +- Generated ROS and ROS2 result messages now use wide iteration counters so `inner_iterations` and `outer_iterations` cannot overflow +- ROS2 generated packages now use `uint16` for `error_code`, matching the current positive error-code range while keeping the wire format compact +- MATLAB-related Python docs now reference the new `python/` package layout instead of the removed `open-codegen/` path +- ROS2 integration tests now clean stale nested `colcon` build artifacts and preserve the active shell environment more reliably in micromamba/conda setups +- Fixed issues in `__init__.py`: lazy imports and discoverability +- Now checking whether the constraints have the correct dimension before attempting to build an optimizer + + ## [0.10.1] - 2026-03-25 @@ -288,6 +321,7 @@ Note: This is the Changelog file of `opengen` - the Python interface of OpEn * Fixed `lbfgs` typo +[0.11.0]: https://github.com/alphaville/optimization-engine/compare/opengen-0.10.1...opengen-0.11.0 [0.10.1]: https://github.com/alphaville/optimization-engine/compare/opengen-0.10.0...opengen-0.10.1 [0.10.0]: https://github.com/alphaville/optimization-engine/compare/opengen-0.9.6...opengen-0.10.0 [0.9.6]: https://github.com/alphaville/optimization-engine/compare/opengen-0.9.5...opengen-0.9.6 diff --git a/open-codegen/MANIFEST.in b/python/MANIFEST.in similarity index 100% rename from open-codegen/MANIFEST.in rename to python/MANIFEST.in diff --git a/open-codegen/README.md b/python/README.md similarity index 100% rename from open-codegen/README.md rename to python/README.md diff --git a/python/VERSION b/python/VERSION new file mode 100644 index 00000000..142464bf --- /dev/null +++ b/python/VERSION @@ -0,0 +1 @@ +0.11.0 \ No newline at end of file diff --git a/open-codegen/main.py b/python/main.py similarity index 75% rename from open-codegen/main.py rename to python/main.py index 5f64dc91..80b35633 100644 --- a/open-codegen/main.py +++ b/python/main.py @@ -9,8 +9,7 @@ def get_open_local_absolute_path(): - cwd = os.getcwd() - return cwd.split('open-codegen')[0] + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "rust")) nu, np = 5, 2 @@ -44,23 +43,21 @@ def get_open_local_absolute_path(): meta, build_config, solver_cfg) -builder.build() +# builder.build() sys.path.insert(1, os.path.join(optimizers_dir, optimizer_name)) rosenbrock = __import__(optimizer_name) solver = rosenbrock.solver() -result = solver.run(p=[0.5, 8.5], initial_guess=[1, 2, 3, 4, 0]) -print(" ") -print(f"solution = {result.solution}") -print(f"time = {result.solve_time_ms} ms") -print(f"penalty = {result.penalty}") -print(f"infeasibility f1 = {result.f1_infeasibility}") -print(f"infeasibility f2 = {result.f2_norm}") -print(f"status = {result.exit_status}") -print(f"inner = {result.num_inner_iterations}") -print(f"outer = {result.num_outer_iterations}") -print(f"cost = {result.cost}") +response = solver.run(p=[0.5, 8.5], initial_guess=[1, 2, 3, 4, 0]) # SolverResponse + +if response.is_ok(): + result = response.get() # SolverStatus + print(type(result)) +else: + error = response.get() # SolverError + print(type(error)) + # Preconditioned Non-preconditioned # ------------------------------------- @@ -69,4 +66,4 @@ def get_open_local_absolute_path(): # Solutions: # # [-0.06168156776090604, 0.10745271293967644, 0.11363970229300129, 0.013666212246169969, 0.00018549750799884656] -# [-0.06168061635750967, 0.10744096043712821, 0.11361445307465148, 0.013640838407880301, 0.00019045750968868237] \ No newline at end of file +# [-0.06168061635750967, 0.10744096043712821, 0.11361445307465148, 0.013640838407880301, 0.00019045750968868237] diff --git a/python/opengen/__init__.py b/python/opengen/__init__.py new file mode 100644 index 00000000..daf6cf89 --- /dev/null +++ b/python/opengen/__init__.py @@ -0,0 +1,44 @@ +"""Top-level package for OpEn with lazy submodule imports. + +This module defers importing heavy subpackages to attribute access +to avoid circular import problems during package initialization. + +Lazy submodule imports defer the loading of Python modules and their +attributes until they are first accessed, reducing startup time and +memory usage. This is achieved using PEP 562 (__getattr__ and __dir__) +to intercept attribute access and load the underlying code only when +necessary. +""" + +from importlib import import_module + +__all__ = [ + "definitions", + "builder", + "config", + "functions", + "constraints", + "tcp", + "ocp", +] + + +def __getattr__(name): + """Lazily import submodules on attribute access. + + Example: accessing ``opengen.builder`` will import + ``opengen.builder`` and cache it on the package module. + + This defers importing heavy subpackages until they're actually used + (lazy imports), reducing startup cost and helping avoid import-time + circular dependencies. + """ + if name in __all__: + module = import_module(f"{__name__}.{name}") + globals()[name] = module + return module + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__(): + return sorted(list(__all__) + list(globals().keys())) diff --git a/python/opengen/builder/__init__.py b/python/opengen/builder/__init__.py new file mode 100644 index 00000000..7f127ba1 --- /dev/null +++ b/python/opengen/builder/__init__.py @@ -0,0 +1,12 @@ +from .optimizer_builder import OpEnOptimizerBuilder +from .problem import Problem +from .set_y_calculator import SetYCalculator +from .ros_builder import RosBuilder, ROS2Builder + +__all__ = [ + "OpEnOptimizerBuilder", + "Problem", + "SetYCalculator", + "RosBuilder", + "ROS2Builder", +] diff --git a/open-codegen/opengen/builder/optimizer_builder.py b/python/opengen/builder/optimizer_builder.py similarity index 97% rename from open-codegen/opengen/builder/optimizer_builder.py rename to python/opengen/builder/optimizer_builder.py index 316f5422..d4e2c2b8 100644 --- a/open-codegen/opengen/builder/optimizer_builder.py +++ b/python/opengen/builder/optimizer_builder.py @@ -1,7 +1,10 @@ +from __future__ import annotations + import subprocess import shutil import yaml +import opengen as og import opengen.config as og_cfg import opengen.definitions as og_dfn import opengen.constraints as og_cstr @@ -12,7 +15,7 @@ import sys from importlib.metadata import version -from .ros_builder import RosBuilder +from .ros_builder import ROS2Builder, RosBuilder _AUTOGEN_COST_FNAME = 'auto_casadi_cost.c' _AUTOGEN_GRAD_FNAME = 'auto_casadi_grad.c' @@ -43,10 +46,10 @@ class OpEnOptimizerBuilder: """ def __init__(self, - problem, - metadata=og_cfg.OptimizerMeta(), - build_configuration=og_cfg.BuildConfiguration(), - solver_configuration=og_cfg.SolverConfiguration()): + problem: og.builder.Problem, + metadata: og_cfg.OptimizerMeta =og_cfg.OptimizerMeta(), + build_configuration: og_cfg.BuildConfiguration =og_cfg.BuildConfiguration(), + solver_configuration: og_cfg.SolverConfiguration=og_cfg.SolverConfiguration()): """Constructor of OpEnOptimizerBuilder :param problem: instance of :class:`~opengen.builder.problem.Problem` @@ -645,6 +648,15 @@ def __initialize(self): def __check_user_provided_parameters(self): self.__logger.info("Checking user parameters") + + # Check constraints dimensions + dim_constraints = self.__problem.constraints.dimension() + dim_decision_variables = self.__problem.dim_decision_variables() + if dim_constraints is not None and dim_decision_variables != dim_constraints: + raise ValueError(f"Inconsistent dimensions - decision variables: {dim_decision_variables}", + f"set of constraints: {dim_constraints}") + + # Preconditioning... if self.__solver_config.preconditioning: # Preconditioning is not allowed when we have general ALM-type constraints of the form # F1(u, p) in C, unless C is {0} or an orthant (special case of rectangle). @@ -920,4 +932,11 @@ def build(self): self.__solver_config) ros_builder.build() + if self.__build_config.ros2_config is not None: + ros2_builder = ROS2Builder( + self.__meta, + self.__build_config, + self.__solver_config) + ros2_builder.build() + return self.__info() diff --git a/open-codegen/opengen/builder/problem.py b/python/opengen/builder/problem.py similarity index 100% rename from open-codegen/opengen/builder/problem.py rename to python/opengen/builder/problem.py diff --git a/python/opengen/builder/ros_builder.py b/python/opengen/builder/ros_builder.py new file mode 100644 index 00000000..4b539d8b --- /dev/null +++ b/python/opengen/builder/ros_builder.py @@ -0,0 +1,331 @@ +"""Builders for auto-generated ROS1 and ROS2 package wrappers.""" + +import opengen.definitions as og_dfn + +import datetime +import logging +import os +import shutil +import sys + +import jinja2 + + +def make_dir_if_not_exists(directory): + """Create ``directory`` if it does not already exist. + + :param directory: Path to the directory to create. + :type directory: str + """ + if not os.path.exists(directory): + os.makedirs(directory) + + +def get_ros_template(template_subdir, name): + """Load a Jinja template from a ROS-specific template subdirectory. + + :param template_subdir: Template subdirectory name, e.g. ``"ros"`` or + ``"ros2"``. + :type template_subdir: str + :param name: Template file name. + :type name: str + + :return: Loaded Jinja template. + :rtype: jinja2.Template + """ + file_loader = jinja2.FileSystemLoader(og_dfn.templates_subdir(template_subdir)) + env = jinja2.Environment(loader=file_loader, autoescape=True) + return env.get_template(name) + + +class _BaseRosBuilder: + """ + Shared code generation logic for ROS-related packages. + + This base class contains the common file-generation pipeline used by both + :class:`RosBuilder` and :class:`ROS2Builder`. Subclasses specialize the + process by providing the package configuration object, template + subdirectory, launch file name, and final user-facing instructions. + + :ivar _meta: Optimizer metadata used to render the package templates. + :ivar _build_config: Global build configuration for the generated solver. + :ivar _solver_config: Solver configuration used when rendering node code. + :ivar _logger: Logger dedicated to the concrete builder implementation. + """ + + #: Template subdirectory under ``opengen/templates`` used by the builder. + _template_subdir = None + #: Fully-qualified logger name for the concrete builder. + _logger_name = None + #: Short logger tag shown in log messages. + _logger_tag = None + #: Launch file generated by the concrete builder. + _launch_file_name = None + + def __init__(self, meta, build_config, solver_config): + """Initialise a shared ROS package builder. + + :param meta: Optimizer metadata. + :param build_config: Build configuration object. + :param solver_config: Solver configuration object. + """ + self._meta = meta + self._build_config = build_config + self._solver_config = solver_config + self._logger = logging.getLogger(self._logger_name) + stream_handler = logging.StreamHandler() + stream_handler.setLevel(1) + c_format = logging.Formatter( + f'[%(levelname)s] <<{self._logger_tag}>> %(message)s') + stream_handler.setFormatter(c_format) + self._logger.setLevel(1) + self._logger.handlers.clear() + self._logger.addHandler(stream_handler) + self._logger.propagate = False + + @property + def _ros_config(self): + """Return the ROS/ROS2 package configuration for the subclass. + + :return: ROS configuration object used by the concrete builder. + :raises NotImplementedError: If a subclass does not provide this hook. + """ + raise NotImplementedError + + def _template(self, name): + """Return a template from the builder's template subdirectory. + + :param name: Template file name. + :type name: str + + :return: Loaded Jinja template. + :rtype: jinja2.Template + """ + return get_ros_template(self._template_subdir, name) + + def _target_dir(self): + """Return the root directory of the generated optimizer project. + + :return: Absolute path to the generated optimizer directory. + :rtype: str + """ + return os.path.abspath( + os.path.join( + self._build_config.build_dir, + self._meta.optimizer_name)) + + def _ros_target_dir(self): + """Return the root directory of the generated ROS package. + + :return: Absolute path to the generated ROS/ROS2 package directory. + :rtype: str + """ + return os.path.abspath( + os.path.join( + self._build_config.build_dir, + self._meta.optimizer_name, + self._ros_config.package_name)) + + def _generate_ros_dir_structure(self): + """Create the directory structure for the generated ROS package.""" + self._logger.info("Generating directory structure") + target_ros_dir = self._ros_target_dir() + make_dir_if_not_exists(target_ros_dir) + for directory_name in ('include', 'extern_lib', 'src', 'msg', 'config', 'launch'): + make_dir_if_not_exists(os.path.abspath( + os.path.join(target_ros_dir, directory_name))) + + def _generate_ros_package_xml(self): + """Render and write ``package.xml`` for the generated package.""" + self._logger.info("Generating package.xml") + target_ros_dir = self._ros_target_dir() + template = self._template('package.xml') + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_rospkg_path = os.path.join(target_ros_dir, "package.xml") + with open(target_rospkg_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _generate_ros_cmakelists(self): + """Render and write the package ``CMakeLists.txt`` file.""" + self._logger.info("Generating CMakeLists") + target_ros_dir = self._ros_target_dir() + template = self._template('CMakeLists.txt') + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_rospkg_path = os.path.join(target_ros_dir, "CMakeLists.txt") + with open(target_rospkg_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _copy_ros_files(self): + """Copy generated bindings, static library, and message files.""" + self._logger.info("Copying external dependencies") + target_ros_dir = self._ros_target_dir() + + header_file_name = self._meta.optimizer_name + '_bindings.hpp' + target_include_filename = os.path.abspath( + os.path.join(target_ros_dir, 'include', header_file_name)) + original_include_file = os.path.abspath( + os.path.join(self._target_dir(), header_file_name)) + shutil.copyfile(original_include_file, target_include_filename) + + if sys.platform == "win32": + lib_file_name = self._meta.optimizer_name + '.lib' + else: + lib_file_name = 'lib' + self._meta.optimizer_name + '.a' + target_lib_file_name = os.path.abspath( + os.path.join(target_ros_dir, 'extern_lib', lib_file_name)) + original_lib_file = os.path.abspath( + os.path.join( + self._target_dir(), + 'target', + self._build_config.build_mode, + lib_file_name)) + shutil.copyfile(original_lib_file, target_lib_file_name) + + for message_name in ('OptimizationParameters.msg', 'OptimizationResult.msg'): + original_message = os.path.abspath( + os.path.join( + og_dfn.templates_dir(), + self._template_subdir, + message_name)) + target_message = os.path.abspath( + os.path.join(target_ros_dir, 'msg', message_name)) + shutil.copyfile(original_message, target_message) + + def _generate_ros_params_file(self): + """Render and write the runtime parameter YAML file.""" + self._logger.info("Generating open_params.yaml") + target_ros_dir = self._ros_target_dir() + template = self._template('open_params.yaml') + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_yaml_fname = os.path.join(target_ros_dir, "config", "open_params.yaml") + with open(target_yaml_fname, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _generate_ros_node_header(self): + """Render and write the generated node header file.""" + self._logger.info("Generating open_optimizer.hpp") + target_ros_dir = self._ros_target_dir() + template = self._template('open_optimizer.hpp') + output_template = template.render( + meta=self._meta, + ros=self._ros_config, + solver_config=self._solver_config) + target_rosnode_header_path = os.path.join( + target_ros_dir, "include", "open_optimizer.hpp") + with open(target_rosnode_header_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _generate_ros_node_cpp(self): + """Render and write the generated node implementation file.""" + self._logger.info("Generating open_optimizer.cpp") + target_ros_dir = self._ros_target_dir() + template = self._template('open_optimizer.cpp') + output_template = template.render( + meta=self._meta, + ros=self._ros_config, + timestamp_created=datetime.datetime.now()) + target_rosnode_cpp_path = os.path.join(target_ros_dir, "src", "open_optimizer.cpp") + with open(target_rosnode_cpp_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _generate_ros_launch_file(self): + """Render and write the package launch file.""" + self._logger.info("Generating %s", self._launch_file_name) + target_ros_dir = self._ros_target_dir() + template = self._template(self._launch_file_name) + output_template = template.render(meta=self._meta, ros=self._ros_config) + target_rosnode_launch_path = os.path.join( + target_ros_dir, "launch", self._launch_file_name) + with open(target_rosnode_launch_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _generate_ros_readme_file(self): + """Render and write the generated package README.""" + self._logger.info("Generating README.md") + target_ros_dir = self._ros_target_dir() + template = self._template('README.md') + output_template = template.render(ros=self._ros_config) + target_readme_path = os.path.join(target_ros_dir, "README.md") + with open(target_readme_path, "w", encoding="utf-8") as fh: + fh.write(output_template) + + def _symbolic_link_info_message(self): + """Emit final user-facing setup instructions for the generated package. + + :raises NotImplementedError: If a subclass does not provide this hook. + """ + raise NotImplementedError + + def build(self): + """ + Generate all ROS/ROS2 wrapper files for the current optimizer. + + This method creates the package directory structure, copies the + generated solver artefacts, renders all templates, and logs final setup + instructions for the user. + """ + self._generate_ros_dir_structure() + self._generate_ros_package_xml() + self._generate_ros_cmakelists() + self._copy_ros_files() + self._generate_ros_params_file() + self._generate_ros_node_header() + self._generate_ros_node_cpp() + self._generate_ros_launch_file() + self._generate_ros_readme_file() + self._symbolic_link_info_message() + + +class RosBuilder(_BaseRosBuilder): + """ + Builder for ROS1 package generation. + + This specialization uses the ``templates/ros`` template set and the + ROS1-specific configuration stored in + :attr:`opengen.config.build_config.BuildConfiguration.ros_config`. + """ + + _template_subdir = 'ros' + _logger_name = 'opengen.builder.RosBuilder' + _logger_tag = 'ROS' + _launch_file_name = 'open_optimizer.launch' + + @property + def _ros_config(self): + """Return the ROS1 package configuration.""" + return self._build_config.ros_config + + def _symbolic_link_info_message(self): + """Log the final ROS1 workspace integration instructions.""" + target_ros_dir = self._ros_target_dir() + self._logger.info("ROS package was built successfully. Now run:") + self._logger.info("ln -s %s ~/catkin_ws/src/", target_ros_dir) + self._logger.info("cd ~/catkin_ws/; catkin_make") + + +class ROS2Builder(_BaseRosBuilder): + """ + Builder for ROS2 package generation. + + This specialization uses the ``templates/ros2`` template set and the + ROS2-specific configuration stored in + :attr:`opengen.config.build_config.BuildConfiguration.ros2_config`. + """ + + _template_subdir = 'ros2' + _logger_name = 'opengen.builder.ROS2Builder' + _logger_tag = 'ROS2' + _launch_file_name = 'open_optimizer.launch.py' + + @property + def _ros_config(self): + """Return the ROS2 package configuration.""" + return self._build_config.ros2_config + + def _symbolic_link_info_message(self): + """Log the final ROS2 workspace integration instructions.""" + target_ros_dir = self._ros_target_dir() + self._logger.info("ROS2 package was built successfully. Now run:") + self._logger.info("ln -s %s ~/ros2_ws/src/", target_ros_dir) + self._logger.info("cd ~/ros2_ws/; colcon build --packages-select %s", + self._ros_config.package_name) diff --git a/open-codegen/opengen/builder/set_y_calculator.py b/python/opengen/builder/set_y_calculator.py similarity index 100% rename from open-codegen/opengen/builder/set_y_calculator.py rename to python/opengen/builder/set_y_calculator.py diff --git a/python/opengen/config/__init__.py b/python/opengen/config/__init__.py new file mode 100644 index 00000000..74b924eb --- /dev/null +++ b/python/opengen/config/__init__.py @@ -0,0 +1,15 @@ +from .meta import OptimizerMeta, SEMVER_PATTERN +from .solver_config import SolverConfiguration +from .build_config import BuildConfiguration, RustAllocator +from .tcp_server_config import TcpServerConfiguration +from .ros_config import RosConfiguration + +__all__ = [ + "OptimizerMeta", + "SEMVER_PATTERN", + "SolverConfiguration", + "BuildConfiguration", + "RustAllocator", + "TcpServerConfiguration", + "RosConfiguration", +] diff --git a/open-codegen/opengen/config/build_config.py b/python/opengen/config/build_config.py similarity index 82% rename from open-codegen/opengen/config/build_config.py rename to python/opengen/config/build_config.py index 939150d1..78819f85 100644 --- a/open-codegen/opengen/config/build_config.py +++ b/python/opengen/config/build_config.py @@ -1,5 +1,6 @@ from opengen.config.tcp_server_config import TcpServerConfiguration from opengen.config.ros_config import RosConfiguration +from opengen.config.meta import SEMVER_PATTERN import random import string from enum import Enum @@ -57,6 +58,7 @@ def __init__(self, build_dir="."): self.__build_c_bindings = False self.__build_python_bindings = False self.__ros_config = None + self.__ros2_config = None self.__tcp_interface_config = None self.__local_path = None self.__allocator = RustAllocator.DefaultAllocator @@ -104,7 +106,11 @@ def open_version(self): @property def local_path(self): """Local path of OpEn (if any)""" - return self.__local_path + if self.__local_path is None: + return None + # Cargo.toml accepts forward slashes on Windows, while raw backslashes + # inside TOML strings are treated as escape sequences. + return self.__local_path.replace("\\", "/") @property def build_c_bindings(self): @@ -135,6 +141,14 @@ def ros_config(self) -> RosConfiguration: """ return self.__ros_config + @property + def ros2_config(self) -> RosConfiguration: + """ROS2 package configuration + + :return: instance of RosConfiguration + """ + return self.__ros2_config + @property def allocator(self) -> RustAllocator: """ @@ -183,6 +197,15 @@ def with_build_mode(self, build_mode): :return: current instance of BuildConfiguration """ + if build_mode not in ( + BuildConfiguration.DEBUG_MODE, + BuildConfiguration.RELEASE_MODE, + ): + raise ValueError( + "build mode must be either " + f"'{BuildConfiguration.DEBUG_MODE}' or " + f"'{BuildConfiguration.RELEASE_MODE}'" + ) self.__build_mode = build_mode return self @@ -212,8 +235,15 @@ def with_open_version(self, open_version="*", local_path=None): :return: current instance of BuildConfiguration """ + if open_version != "*" and ( + not isinstance(open_version, str) or not SEMVER_PATTERN.match(open_version) + ): + raise ValueError( + "invalid OpEn version {!r}; expected '*' or a Semantic Version " + "such as '0.1.0' or '1.2.3-alpha.1'".format(open_version) + ) self.__open_version = open_version - self.__local_path = local_path + self.__local_path = None if local_path is None else str(local_path) return self def with_build_c_bindings(self, build_c_bindings=True): @@ -257,6 +287,21 @@ def with_ros(self, ros_config: RosConfiguration): """ self.__build_c_bindings = True # no C++ bindings, no ROS package mate self.__ros_config = ros_config + self.__ros2_config = None + return self + + def with_ros2(self, ros_config: RosConfiguration): + """ + Activates the generation of a ROS2 package. The caller must provide an + instance of RosConfiguration + + :param ros_config: Configuration of ROS2 package + + :return: current instance of BuildConfiguration + """ + self.__build_c_bindings = True # no C++ bindings, no ROS package + self.__ros2_config = ros_config + self.__ros_config = None return self def with_tcp_interface_config(self, tcp_interface_config=TcpServerConfiguration()): @@ -283,6 +328,10 @@ def with_allocator(self, allocator: RustAllocator): :return: current instance of BuildConfiguration """ + if not isinstance(allocator, RustAllocator): + raise ValueError( + "allocator must be an instance of RustAllocator" + ) self.__allocator = allocator return self @@ -300,4 +349,6 @@ def to_dict(self): build_dict["tcp_interface_config"] = self.__tcp_interface_config.to_dict() if self.__ros_config is not None: build_dict["ros_config"] = self.__ros_config.to_dict() + if self.__ros2_config is not None: + build_dict["ros2_config"] = self.__ros2_config.to_dict() return build_dict diff --git a/open-codegen/opengen/config/meta.py b/python/opengen/config/meta.py similarity index 100% rename from open-codegen/opengen/config/meta.py rename to python/opengen/config/meta.py diff --git a/open-codegen/opengen/config/ros_config.py b/python/opengen/config/ros_config.py similarity index 95% rename from open-codegen/opengen/config/ros_config.py rename to python/opengen/config/ros_config.py index 206051c1..4e0a6c91 100644 --- a/open-codegen/opengen/config/ros_config.py +++ b/python/opengen/config/ros_config.py @@ -3,7 +3,7 @@ class RosConfiguration: """ - Configuration of auto-generated ROS package + Configuration of an auto-generated ROS or ROS2 package """ def __init__(self): @@ -61,7 +61,7 @@ def description(self): @property def rate(self): - """ROS node rate in Hz + """ROS/ROS2 node rate in Hz :return: rate, defaults to `10.0` """ @@ -87,7 +87,7 @@ def params_topic_queue_size(self): def with_package_name(self, pkg_name): """ Set the package name, which is the same as the name - of the folder that will store the auto-generated ROS node. + of the folder that will store the auto-generated ROS/ROS2 node. The node name can contain lowercase and uppercase characters and underscores, but not spaces or other symbols @@ -124,6 +124,7 @@ def with_node_name(self, node_name): def with_rate(self, rate): """ Set the rate of the ROS node + or ROS2 node :param rate: rate in Hz :type rate: float @@ -135,7 +136,7 @@ def with_rate(self, rate): def with_description(self, description): """ - Set the description of the ROS package + Set the description of the ROS or ROS2 package :param description: description, defaults to "parametric optimization with OpEn" :type description: string @@ -149,7 +150,7 @@ def with_queue_sizes(self, result_topic_queue_size=100, parameter_topic_queue_size=100): """ - Set queue sizes for ROS node + Set queue sizes for ROS or ROS2 node :param result_topic_queue_size: queue size of results, defaults to 100 :type result_topic_queue_size: int, optional diff --git a/open-codegen/opengen/config/solver_config.py b/python/opengen/config/solver_config.py similarity index 100% rename from open-codegen/opengen/config/solver_config.py rename to python/opengen/config/solver_config.py diff --git a/open-codegen/opengen/config/tcp_server_config.py b/python/opengen/config/tcp_server_config.py similarity index 100% rename from open-codegen/opengen/config/tcp_server_config.py rename to python/opengen/config/tcp_server_config.py diff --git a/python/opengen/constraints/__init__.py b/python/opengen/constraints/__init__.py new file mode 100644 index 00000000..ce529003 --- /dev/null +++ b/python/opengen/constraints/__init__.py @@ -0,0 +1,31 @@ +from .ball1 import Ball1 +from .ball2 import Ball2 +from .sphere2 import Sphere2 +from .rectangle import Rectangle +from .constraint import Constraint +from .ball_inf import BallInf +from .soc import SecondOrderCone +from .no_constraints import NoConstraints +from .cartesian import CartesianProduct +from .zero import Zero +from .finite_set import FiniteSet +from .halfspace import Halfspace +from .simplex import Simplex +from .affine_space import AffineSpace + +__all__ = [ + "Ball1", + "Ball2", + "Sphere2", + "Rectangle", + "Constraint", + "BallInf", + "SecondOrderCone", + "NoConstraints", + "CartesianProduct", + "Zero", + "FiniteSet", + "Halfspace", + "Simplex", + "AffineSpace", +] diff --git a/open-codegen/opengen/constraints/affine_space.py b/python/opengen/constraints/affine_space.py similarity index 95% rename from open-codegen/opengen/constraints/affine_space.py rename to python/opengen/constraints/affine_space.py index 1eb054c6..ae38bfea 100644 --- a/open-codegen/opengen/constraints/affine_space.py +++ b/python/opengen/constraints/affine_space.py @@ -54,3 +54,6 @@ def is_compact(self): """Affine spaces are not compact sets """ return False + + def dimension(self): + return super().dimension() diff --git a/open-codegen/opengen/constraints/ball1.py b/python/opengen/constraints/ball1.py similarity index 94% rename from open-codegen/opengen/constraints/ball1.py rename to python/opengen/constraints/ball1.py index 7ca05849..deee203d 100644 --- a/open-codegen/opengen/constraints/ball1.py +++ b/python/opengen/constraints/ball1.py @@ -73,3 +73,8 @@ def is_convex(self): def is_compact(self): return True + + def dimension(self): + if self.center is None: + return None + return len(self.center) \ No newline at end of file diff --git a/open-codegen/opengen/constraints/ball2.py b/python/opengen/constraints/ball2.py similarity index 93% rename from open-codegen/opengen/constraints/ball2.py rename to python/opengen/constraints/ball2.py index f0bb43be..70fdffd0 100644 --- a/open-codegen/opengen/constraints/ball2.py +++ b/python/opengen/constraints/ball2.py @@ -7,7 +7,7 @@ class Ball2(Constraint): """A Euclidean ball constraint - A constraint of the form :math:`\|u-u_0\| \leq r`, where :math:`u_0` is the center + A constraint of the form :math:`\Vert u-u_0 \Vert \leq r`, where :math:`u_0` is the center of the ball and `r` is its radius """ @@ -91,3 +91,8 @@ def is_convex(self): def is_compact(self): return True + + def dimension(self): + if self.center is None: + return None + return len(self.center) \ No newline at end of file diff --git a/open-codegen/opengen/constraints/ball_inf.py b/python/opengen/constraints/ball_inf.py similarity index 96% rename from open-codegen/opengen/constraints/ball_inf.py rename to python/opengen/constraints/ball_inf.py index 738eaf4f..4f4ea4ec 100644 --- a/open-codegen/opengen/constraints/ball_inf.py +++ b/python/opengen/constraints/ball_inf.py @@ -88,3 +88,8 @@ def is_convex(self): def is_compact(self): return True + + def dimension(self): + if self.center is None: + return None + return len(self.center) \ No newline at end of file diff --git a/open-codegen/opengen/constraints/cartesian.py b/python/opengen/constraints/cartesian.py similarity index 87% rename from open-codegen/opengen/constraints/cartesian.py rename to python/opengen/constraints/cartesian.py index 2992668f..448ebb16 100644 --- a/open-codegen/opengen/constraints/cartesian.py +++ b/python/opengen/constraints/cartesian.py @@ -19,6 +19,15 @@ def __init__(self, segments: List[int], constraints: List[constraint.Constraint] We can associate with :math:`x_{[1]}` the indices [0, 1] and with :math:`x_{[2]}` the indices [2, 3, 4]. The *segment ids* are the indices 1 and 4. + Important: + In Python, `segments` uses inclusive last indices. For example, + `segments=[1, 4]` means that the first segment is `x[0:2]` and the + second segment is `x[2:5]`. + + This convention is different from the Rust API, where Cartesian + products are specified using cumulative lengths / exclusive end + indices. + Example: In this example we shall define the set :math:`X = \mathcal{B}_{1.5} \\times R \\times {\\rm I\!R}^{5}`, where :math:`\mathcal{B}_{1.5}` is a Euclidean ball of dimension 2 with radius 1.5, :math:`R` is a @@ -31,7 +40,7 @@ def __init__(self, segments: List[int], constraints: List[constraint.Constraint] >>> segment_ids = [1, 4, 9] >>> my_set = og.constraints.CartesianProduct(segment_ids, [ball, rect, free]) - :param segments: ids of segments + :param segments: inclusive last indices of segments :param constraints: list of sets """ @@ -44,7 +53,7 @@ def __init__(self, segments: List[int], constraints: List[constraint.Constraint] if segments[0] < 0: raise ValueError( - "the first element of segment must be a positive integer") + "the first element of segment must be a non-negative integer") if len(segments) != len(constraints): raise ValueError( @@ -115,3 +124,6 @@ def is_compact(self): if not set_i.is_compact(): return False return True + + def dimension(self): + return self.segments[-1] + 1 \ No newline at end of file diff --git a/open-codegen/opengen/constraints/constraint.py b/python/opengen/constraints/constraint.py similarity index 73% rename from open-codegen/opengen/constraints/constraint.py rename to python/opengen/constraints/constraint.py index 6076f223..8fee3aa8 100644 --- a/open-codegen/opengen/constraints/constraint.py +++ b/python/opengen/constraints/constraint.py @@ -32,3 +32,12 @@ def is_compact(self): Whether the set is compact """ return False + + def dimension(self): + """ + Constraint dimension + + Derived classes can override this method to return the dimension of the + constraint, where possible, or return `None` if the constraint does not + have a fixed dimension. + """ \ No newline at end of file diff --git a/open-codegen/opengen/constraints/finite_set.py b/python/opengen/constraints/finite_set.py similarity index 96% rename from open-codegen/opengen/constraints/finite_set.py rename to python/opengen/constraints/finite_set.py index b590caab..37908a64 100644 --- a/open-codegen/opengen/constraints/finite_set.py +++ b/python/opengen/constraints/finite_set.py @@ -7,7 +7,7 @@ class FiniteSet(Constraint): """Finite set - A set of the form :math:`A = \{a_1, a_2, \ldots, a_K\}` + A set of the form :math:`A = \\{a_1, a_2, \\ldots, a_K\\}` """ def __init__(self, points=None): diff --git a/open-codegen/opengen/constraints/halfspace.py b/python/opengen/constraints/halfspace.py similarity index 100% rename from open-codegen/opengen/constraints/halfspace.py rename to python/opengen/constraints/halfspace.py diff --git a/open-codegen/opengen/constraints/no_constraints.py b/python/opengen/constraints/no_constraints.py similarity index 88% rename from open-codegen/opengen/constraints/no_constraints.py rename to python/opengen/constraints/no_constraints.py index a81722d4..7dd72a18 100644 --- a/open-codegen/opengen/constraints/no_constraints.py +++ b/python/opengen/constraints/no_constraints.py @@ -21,3 +21,6 @@ def is_convex(self): def is_compact(self): return False + + def dimension(self): + return None \ No newline at end of file diff --git a/open-codegen/opengen/constraints/rectangle.py b/python/opengen/constraints/rectangle.py similarity index 99% rename from open-codegen/opengen/constraints/rectangle.py rename to python/opengen/constraints/rectangle.py index c6340d09..f77e92b8 100644 --- a/open-codegen/opengen/constraints/rectangle.py +++ b/python/opengen/constraints/rectangle.py @@ -31,7 +31,7 @@ def __check_xmin_xmax(cls, xmin, xmax): if xmin_element > xmax_element: raise Exception("xmin must be <= xmax") - def __init__(self, xmin, xmax): + def __init__(self, xmin=None, xmax=None): """Construct a new instance of Rectangle :param xmin: minimum bounds (can be ``None``) diff --git a/open-codegen/opengen/constraints/simplex.py b/python/opengen/constraints/simplex.py similarity index 98% rename from open-codegen/opengen/constraints/simplex.py rename to python/opengen/constraints/simplex.py index 1a07fa93..113767e3 100644 --- a/open-codegen/opengen/constraints/simplex.py +++ b/python/opengen/constraints/simplex.py @@ -103,3 +103,6 @@ def is_convex(self): def is_compact(self): """Whether the set is compact (`True`)""" return True + + def dimension(self): + return None \ No newline at end of file diff --git a/open-codegen/opengen/constraints/soc.py b/python/opengen/constraints/soc.py similarity index 98% rename from open-codegen/opengen/constraints/soc.py rename to python/opengen/constraints/soc.py index eb1279e5..466058df 100644 --- a/open-codegen/opengen/constraints/soc.py +++ b/python/opengen/constraints/soc.py @@ -85,3 +85,6 @@ def is_convex(self): def is_compact(self): return False + + def dimension(self): + return None \ No newline at end of file diff --git a/open-codegen/opengen/constraints/sphere2.py b/python/opengen/constraints/sphere2.py similarity index 95% rename from open-codegen/opengen/constraints/sphere2.py rename to python/opengen/constraints/sphere2.py index f8fca5f2..5283da53 100644 --- a/open-codegen/opengen/constraints/sphere2.py +++ b/python/opengen/constraints/sphere2.py @@ -75,3 +75,8 @@ def is_convex(self): def is_compact(self): return True + + def dimension(self): + if self.center is None: + return None + return len(self.center) \ No newline at end of file diff --git a/open-codegen/opengen/constraints/zero.py b/python/opengen/constraints/zero.py similarity index 76% rename from open-codegen/opengen/constraints/zero.py rename to python/opengen/constraints/zero.py index 8205445c..96cb357e 100644 --- a/open-codegen/opengen/constraints/zero.py +++ b/python/opengen/constraints/zero.py @@ -7,15 +7,16 @@ class Zero(Constraint): """A set that contains only the origin - The singleton :math:`\{0\}` + The singleton :math:`\\{0\\}` """ def __init__(self): """ - Constructor for set :math:`Z = \{0\}` + Constructor for set :math:`Z = \\{0\\}` """ + pass def distance_squared(self, u): return fn.norm2_squared(u) @@ -28,3 +29,6 @@ def is_convex(self): def is_compact(self): return True + + def dimension(self): + return None \ No newline at end of file diff --git a/open-codegen/opengen/definitions.py b/python/opengen/definitions.py similarity index 100% rename from open-codegen/opengen/definitions.py rename to python/opengen/definitions.py diff --git a/python/opengen/functions/__init__.py b/python/opengen/functions/__init__.py new file mode 100644 index 00000000..11508d8b --- /dev/null +++ b/python/opengen/functions/__init__.py @@ -0,0 +1,21 @@ +from .rosenbrock import rosenbrock +from .fmin import fmin +from .fmax import fmax +from .is_symbolic import is_symbolic +from .is_numeric import is_numeric +from .sign import sign +from .norm2 import norm2 +from .fabs import fabs +from .norm2_squared import norm2_squared + +__all__ = [ + "rosenbrock", + "fmin", + "fmax", + "is_symbolic", + "is_numeric", + "sign", + "norm2", + "fabs", + "norm2_squared", +] diff --git a/open-codegen/opengen/functions/fabs.py b/python/opengen/functions/fabs.py similarity index 100% rename from open-codegen/opengen/functions/fabs.py rename to python/opengen/functions/fabs.py diff --git a/open-codegen/opengen/functions/fmax.py b/python/opengen/functions/fmax.py similarity index 100% rename from open-codegen/opengen/functions/fmax.py rename to python/opengen/functions/fmax.py diff --git a/open-codegen/opengen/functions/fmin.py b/python/opengen/functions/fmin.py similarity index 100% rename from open-codegen/opengen/functions/fmin.py rename to python/opengen/functions/fmin.py diff --git a/open-codegen/opengen/functions/is_numeric.py b/python/opengen/functions/is_numeric.py similarity index 100% rename from open-codegen/opengen/functions/is_numeric.py rename to python/opengen/functions/is_numeric.py diff --git a/open-codegen/opengen/functions/is_symbolic.py b/python/opengen/functions/is_symbolic.py similarity index 100% rename from open-codegen/opengen/functions/is_symbolic.py rename to python/opengen/functions/is_symbolic.py diff --git a/open-codegen/opengen/functions/norm2.py b/python/opengen/functions/norm2.py similarity index 100% rename from open-codegen/opengen/functions/norm2.py rename to python/opengen/functions/norm2.py diff --git a/open-codegen/opengen/functions/norm2_squared.py b/python/opengen/functions/norm2_squared.py similarity index 100% rename from open-codegen/opengen/functions/norm2_squared.py rename to python/opengen/functions/norm2_squared.py diff --git a/open-codegen/opengen/functions/rosenbrock.py b/python/opengen/functions/rosenbrock.py similarity index 100% rename from open-codegen/opengen/functions/rosenbrock.py rename to python/opengen/functions/rosenbrock.py diff --git a/open-codegen/opengen/functions/sign.py b/python/opengen/functions/sign.py similarity index 100% rename from open-codegen/opengen/functions/sign.py rename to python/opengen/functions/sign.py diff --git a/open-codegen/opengen/icasadi/.gitignore b/python/opengen/icasadi/.gitignore similarity index 100% rename from open-codegen/opengen/icasadi/.gitignore rename to python/opengen/icasadi/.gitignore diff --git a/open-codegen/opengen/icasadi/Cargo.toml b/python/opengen/icasadi/Cargo.toml similarity index 100% rename from open-codegen/opengen/icasadi/Cargo.toml rename to python/opengen/icasadi/Cargo.toml diff --git a/open-codegen/opengen/icasadi/README.md b/python/opengen/icasadi/README.md similarity index 100% rename from open-codegen/opengen/icasadi/README.md rename to python/opengen/icasadi/README.md diff --git a/open-codegen/opengen/icasadi/build.rs b/python/opengen/icasadi/build.rs similarity index 100% rename from open-codegen/opengen/icasadi/build.rs rename to python/opengen/icasadi/build.rs diff --git a/open-codegen/opengen/icasadi/extern/README.txt b/python/opengen/icasadi/extern/README.txt similarity index 100% rename from open-codegen/opengen/icasadi/extern/README.txt rename to python/opengen/icasadi/extern/README.txt diff --git a/open-codegen/opengen/icasadi/src/PLACEHOLDER b/python/opengen/icasadi/src/PLACEHOLDER similarity index 100% rename from open-codegen/opengen/icasadi/src/PLACEHOLDER rename to python/opengen/icasadi/src/PLACEHOLDER diff --git a/open-codegen/opengen/ocp/__init__.py b/python/opengen/ocp/__init__.py similarity index 100% rename from open-codegen/opengen/ocp/__init__.py rename to python/opengen/ocp/__init__.py diff --git a/open-codegen/opengen/ocp/builder.py b/python/opengen/ocp/builder.py similarity index 95% rename from open-codegen/opengen/ocp/builder.py rename to python/opengen/ocp/builder.py index 8a44286c..44a3a693 100644 --- a/open-codegen/opengen/ocp/builder.py +++ b/python/opengen/ocp/builder.py @@ -113,6 +113,19 @@ def backend_kind(self): """Backend kind used by this optimizer wrapper.""" return self.__backend_kind + def __repr__(self): + """Return a concise summary of the generated optimizer wrapper.""" + return ( + "GeneratedOptimizer(" + f"optimizer_name={self.__optimizer_name!r}, " + f"backend_kind={self.__backend_kind!r}, " + f"shooting={self.__shooting.value!r}, " + f"nx={self.__nx}, " + f"nu={self.__nu}, " + f"horizon={self.__horizon}, " + f"target_dir={self.__target_dir!r})" + ) + def start(self): """Start the backend if it is a local TCP server. @@ -229,6 +242,10 @@ def __casadi_version(): return casadi_version return GeneratedOptimizer.__safe_package_version("casadi") + @staticmethod + def __format_backend_error(error): + return getattr(error, "message", str(error)) + def save(self, json_path=None): """Save a manifest that can later recreate this optimizer. @@ -327,8 +344,10 @@ def solve( :param initial_penalty: optional initial penalty parameter :param parameter_values: named parameter values :return: :class:`OcpSolution` + :raises ValueError: if required named parameters are missing or have + incompatible dimensions :raises RuntimeError: if the backend is unavailable or the low-level - solve call fails + solve call fails; backend-specific error messages are propagated """ packed_parameters = self.__pack_parameters(parameter_values) @@ -341,6 +360,10 @@ def solve( ) if raw is None: raise RuntimeError("solver failed") + if hasattr(raw, "is_ok") and hasattr(raw, "get"): + if not raw.is_ok(): + raise RuntimeError(self.__format_backend_error(raw.get())) + raw = raw.get() elif self.__backend_kind == "tcp": self.start() response = self.__backend.call( @@ -350,7 +373,7 @@ def solve( initial_penalty=initial_penalty, ) if not response.is_ok(): - raise RuntimeError(str(response.get())) + raise RuntimeError(self.__format_backend_error(response.get())) raw = response.get() else: raise RuntimeError("optimizer backend is not available") diff --git a/open-codegen/opengen/ocp/constraint_utils.py b/python/opengen/ocp/constraint_utils.py similarity index 100% rename from open-codegen/opengen/ocp/constraint_utils.py rename to python/opengen/ocp/constraint_utils.py diff --git a/open-codegen/opengen/ocp/dynamics.py b/python/opengen/ocp/dynamics.py similarity index 100% rename from open-codegen/opengen/ocp/dynamics.py rename to python/opengen/ocp/dynamics.py diff --git a/open-codegen/opengen/ocp/parameter.py b/python/opengen/ocp/parameter.py similarity index 100% rename from open-codegen/opengen/ocp/parameter.py rename to python/opengen/ocp/parameter.py diff --git a/open-codegen/opengen/ocp/problem.py b/python/opengen/ocp/problem.py similarity index 100% rename from open-codegen/opengen/ocp/problem.py rename to python/opengen/ocp/problem.py diff --git a/open-codegen/opengen/ocp/solution.py b/python/opengen/ocp/solution.py similarity index 100% rename from open-codegen/opengen/ocp/solution.py rename to python/opengen/ocp/solution.py diff --git a/python/opengen/tcp/__init__.py b/python/opengen/tcp/__init__.py new file mode 100644 index 00000000..17d36335 --- /dev/null +++ b/python/opengen/tcp/__init__.py @@ -0,0 +1,11 @@ +from .optimizer_tcp_manager import OptimizerTcpManager +from .solver_status import SolverStatus +from .solver_error import SolverError +from .solver_response import SolverResponse + +__all__ = [ + "OptimizerTcpManager", + "SolverStatus", + "SolverError", + "SolverResponse", +] diff --git a/open-codegen/opengen/tcp/optimizer_tcp_manager.py b/python/opengen/tcp/optimizer_tcp_manager.py similarity index 96% rename from open-codegen/opengen/tcp/optimizer_tcp_manager.py rename to python/opengen/tcp/optimizer_tcp_manager.py index 77c2297c..9a16a1f0 100644 --- a/open-codegen/opengen/tcp/optimizer_tcp_manager.py +++ b/python/opengen/tcp/optimizer_tcp_manager.py @@ -91,10 +91,16 @@ def __load_tcp_details(self): with open(yaml_file, 'r') as stream: self.__optimizer_details = yaml.safe_load(stream) + @staticmethod + def __client_ip_for_connection(ip): + # `0.0.0.0` is a valid bind address for the server, but it is not a + # routable destination for a client connection on Windows. + return '127.0.0.1' if ip == '0.0.0.0' else ip + @retry(tries=10, delay=1) def __obtain_socket_connection(self): tcp_data = self.__optimizer_details - ip = tcp_data['tcp']['ip'] + ip = self.__client_ip_for_connection(tcp_data['tcp']['ip']) port = tcp_data['tcp']['port'] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) try: @@ -132,7 +138,7 @@ def ping(self): def __check_if_server_is_running(self): tcp_data = self.__optimizer_details - ip = tcp_data['tcp']['ip'] + ip = self.__client_ip_for_connection(tcp_data['tcp']['ip']) port = tcp_data['tcp']['port'] with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as s: result = 0 == s.connect_ex((ip, port)) diff --git a/open-codegen/opengen/tcp/solver_error.py b/python/opengen/tcp/solver_error.py similarity index 70% rename from open-codegen/opengen/tcp/solver_error.py rename to python/opengen/tcp/solver_error.py index 61ab7fc8..c78e0144 100644 --- a/open-codegen/opengen/tcp/solver_error.py +++ b/python/opengen/tcp/solver_error.py @@ -1,5 +1,5 @@ class SolverError: - """Class for storing solver status in the event of an error.""" + """Structured solver error returned by TCP or direct Python bindings.""" def __init__(self, error): """Constructs instance of :class:`~opengen.tcp.solver_error.SolverError` @@ -19,10 +19,10 @@ def code(self): Possible error codes are: - - **1000**: Invalid request: Malformed or invalid JSON + - **1000**: Invalid request: malformed JSON or invalid UTF-8 payload - **1600**: Initial guess has incomplete dimensions - **1700**: Wrong dimension of Lagrange multipliers - - **2000**: Problem solution failed (solver error) + - **2000**: Problem solution failed (message may include the solver reason) - **3003**: Parameter vector has wrong length :return: Error code @@ -38,3 +38,7 @@ def message(self): :rtype: str """ return self.__dict__["__message"] + + def __repr__(self): + """Return a concise one-line representation of the error.""" + return f"SolverError(code={self.code}, message={self.message!r})" diff --git a/open-codegen/opengen/tcp/solver_response.py b/python/opengen/tcp/solver_response.py similarity index 59% rename from open-codegen/opengen/tcp/solver_response.py rename to python/opengen/tcp/solver_response.py index 27f0594d..49dcbad9 100644 --- a/open-codegen/opengen/tcp/solver_response.py +++ b/python/opengen/tcp/solver_response.py @@ -3,7 +3,13 @@ class SolverResponse: - """Stores a solver response of type SolverStatus or SolverError.""" + """Stores a solver response of type SolverStatus or SolverError. + + This wrapper is used by both the TCP interface and the direct Python + bindings generated by OpEn. Call :meth:`is_ok` first, then + :meth:`get` to obtain either a :class:`SolverStatus` or a + :class:`SolverError`. + """ def __init__(self, d): """Constructs instance of :class:`~opengen.tcp.solver_response.SolverResponse` @@ -38,4 +44,22 @@ def get(self): return self.__response def __getitem__(self, key): + """Proxy attribute access to the wrapped status or error object.""" return getattr(self.__response, key) + + def __repr__(self): + """Return a concise one-line summary suitable for debugging.""" + if self.is_ok(): + status = self.get() + return ( + "SolverResponse(ok=True, " + f"exit_status={status.exit_status!r}, " + f"num_outer_iterations={status.num_outer_iterations}, " + f"num_inner_iterations={status.num_inner_iterations})" + ) + error = self.get() + return ( + "SolverResponse(ok=False, " + f"code={error.code}, " + f"message={error.message!r})" + ) diff --git a/open-codegen/opengen/tcp/solver_status.py b/python/opengen/tcp/solver_status.py similarity index 100% rename from open-codegen/opengen/tcp/solver_status.py rename to python/opengen/tcp/solver_status.py diff --git a/python/opengen/templates/c/example_cmakelists.txt b/python/opengen/templates/c/example_cmakelists.txt new file mode 100644 index 00000000..7412dae6 --- /dev/null +++ b/python/opengen/templates/c/example_cmakelists.txt @@ -0,0 +1,57 @@ +cmake_minimum_required(VERSION 3.10) + +# Project name +project({{meta.optimizer_name}}) + +# Build the generated example as C99. +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED ON) + +# Resolve the generated static library name for the current platform. +if(WIN32) + set(OPEN_STATIC_LIB ${CMAKE_CURRENT_SOURCE_DIR}/target/{{ build_config.build_mode }}/{{meta.optimizer_name}}.lib) +else() + set(OPEN_STATIC_LIB ${CMAKE_CURRENT_SOURCE_DIR}/target/{{ build_config.build_mode }}/lib{{meta.optimizer_name}}.a) +endif() + +find_package(Threads REQUIRED) + +# Add the executable +add_executable(optimizer example_optimizer.c) + +# Add libraries to the executable +target_link_libraries( + optimizer + PRIVATE + ${OPEN_STATIC_LIB} + Threads::Threads +) + +if(UNIX) + target_link_libraries(optimizer PRIVATE m) +endif() + +if(CMAKE_DL_LIBS) + target_link_libraries(optimizer PRIVATE ${CMAKE_DL_LIBS}) +endif() + +if(WIN32) + # Rust static libraries built with the MSVC toolchain depend on a small set + # of Windows system libraries that must be linked by the final C executable. + target_link_libraries( + optimizer + PRIVATE + advapi32 + bcrypt + kernel32 + ntdll + userenv + ws2_32 + ) +endif() + +add_custom_target(run + COMMAND $ + DEPENDS optimizer + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} +) diff --git a/open-codegen/opengen/templates/c/example_optimizer_c_bindings.c b/python/opengen/templates/c/example_optimizer_c_bindings.c similarity index 62% rename from open-codegen/opengen/templates/c/example_optimizer_c_bindings.c rename to python/opengen/templates/c/example_optimizer_c_bindings.c index 64f3b69a..31cf8e66 100644 --- a/open-codegen/opengen/templates/c/example_optimizer_c_bindings.c +++ b/python/opengen/templates/c/example_optimizer_c_bindings.c @@ -20,17 +20,35 @@ */ #include +#include #include "{{meta.optimizer_name}}_bindings.h" /* * Feel free to customize the following code... */ +static const char *exit_status_to_string({{meta.optimizer_name}}ExitStatus exit_status) { + switch (exit_status) { + case {{meta.optimizer_name}}Converged: + return "Converged"; + case {{meta.optimizer_name}}NotConvergedIterations: + return "NotConvergedIterations"; + case {{meta.optimizer_name}}NotConvergedOutOfTime: + return "NotConvergedOutOfTime"; + case {{meta.optimizer_name}}NotConvergedCost: + return "NotConvergedCost"; + case {{meta.optimizer_name}}NotConvergedNotFiniteComputation: + return "NotConvergedNotFiniteComputation"; + default: + return "Unknown"; + } +} + int main(void) { int i; /* parameters */ - double p[{{meta.optimizer_name|upper}}_NUM_PARAMETERS] = {2.0, 10.0}; + double p[{{meta.optimizer_name|upper}}_NUM_PARAMETERS] = {0}; /* initial guess */ double u[{{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES] = {0}; @@ -45,10 +63,39 @@ int main(void) { /* obtain cache */ {{meta.optimizer_name}}Cache *cache = {{meta.optimizer_name}}_new(); + if (cache == NULL) { + fprintf(stderr, "Could not allocate solver cache\n"); + return EXIT_FAILURE; + } /* solve */ {{meta.optimizer_name}}SolverStatus status = {{meta.optimizer_name}}_solve(cache, u, p, {% if problem.dim_constraints_aug_lagrangian() > 0 %}y{% else %}0{% endif %}, &init_penalty); + printf("\n\n-------------------------------------------------\n"); + printf(" Solver Statistics\n"); + printf("-------------------------------------------------\n"); + printf("exit status : %d (%s)\n", status.exit_status, exit_status_to_string(status.exit_status)); + printf("error code : %d\n", status.error_code); + printf("error message : %s\n", status.error_message); + printf("iterations : %lu\n", status.num_inner_iterations); + printf("outer iterations : %lu\n", status.num_outer_iterations); + printf("solve time : %f ms\n", (double)status.solve_time_ns / 1000000.0); + printf("penalty : %f\n", status.penalty); + printf("||Dy||/c : %f\n", status.delta_y_norm_over_c); + printf("||F2(u)|| : %f\n", status.f2_norm); + printf("Cost : %f\n", status.cost); + printf("||FRP|| : %f\n\n", status.last_problem_norm_fpr); + + if (status.error_code != 0) { + fprintf(stderr, "Solver returned an error; solution vector is not printed.\n"); + {{meta.optimizer_name}}_free(cache); + return EXIT_FAILURE; + } + + if (status.exit_status != {{meta.optimizer_name}}Converged) { + fprintf(stderr, "Warning: solver did not converge, printing best available iterate.\n"); + } + /* print results */ printf("\n\n-------------------------------------------------\n"); printf(" Solution\n"); @@ -63,24 +110,10 @@ int main(void) { printf("y[%d] = %g\n", i, status.lagrange[i]); } - printf("\n\n-------------------------------------------------\n"); - printf(" Solver Statistics\n"); - printf("-------------------------------------------------\n"); - printf("exit status : %d\n", status.exit_status); - printf("iterations : %lu\n", status.num_inner_iterations); - printf("outer iterations : %lu\n", status.num_outer_iterations); - printf("solve time : %f ms\n", (double)status.solve_time_ns / 1000000.0); - printf("penalty : %f\n", status.penalty); - printf("||Dy||/c : %f\n", status.delta_y_norm_over_c); - printf("||F2(u)|| : %f\n", status.f2_norm); - printf("Cost : %f\n", status.cost); - printf("||FRP|| : %f\n\n", status.last_problem_norm_fpr); - /* free memory */ {{meta.optimizer_name}}_free(cache); - return 0; + return EXIT_SUCCESS; } - diff --git a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja b/python/opengen/templates/c/optimizer_cinterface.rs.jinja similarity index 81% rename from open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja rename to python/opengen/templates/c/optimizer_cinterface.rs.jinja index 039dfe0f..db2dec64 100644 --- a/open-codegen/opengen/templates/c/optimizer_cinterface.rs.jinja +++ b/python/opengen/templates/c/optimizer_cinterface.rs.jinja @@ -1,5 +1,6 @@ {% if activate_clib_generation -%} -// ---Export functionality from Rust to C/C++------------------------------------------------------------ +{% set error_message_capacity = 1024 -%} +// ---Export functionality from Rust to C/C++ -------------- /// Solver cache (structure `{{meta.optimizer_name}}Cache`) /// @@ -8,12 +9,31 @@ pub struct {{meta.optimizer_name}}Cache { cache: AlmCache, } +const {{meta.optimizer_name|upper}}_NO_ERROR_CODE: c_int = 0; +const {{meta.optimizer_name|upper}}_SOLVER_ERROR_CODE: c_int = 2000; +const {{meta.optimizer_name|upper}}_ERROR_MESSAGE_CAPACITY: usize = {{ error_message_capacity }}; + impl {{meta.optimizer_name}}Cache { pub fn new(cache: AlmCache) -> Self { {{meta.optimizer_name}}Cache { cache } } } +fn empty_error_message() -> [c_char; {{ error_message_capacity }}] { + [0 as c_char; {{ error_message_capacity }}] +} + +fn error_message_to_c_array( + message: &str, +) -> [c_char; {{ error_message_capacity }}] { + let mut buffer = empty_error_message(); + let max_len = {{meta.optimizer_name|upper}}_ERROR_MESSAGE_CAPACITY - 1; + for (idx, byte) in message.as_bytes().iter().copied().take(max_len).enumerate() { + buffer[idx] = byte as c_char; + } + buffer +} + /// {{meta.optimizer_name}} version of ExitStatus /// Structure: `{{meta.optimizer_name}}ExitStatus` #[allow(non_camel_case_types)] @@ -41,6 +61,10 @@ pub enum {{meta.optimizer_name}}ExitStatus { pub struct {{meta.optimizer_name}}SolverStatus { /// Exit status exit_status: {{meta.optimizer_name}}ExitStatus, + /// Detailed error code (0 on success) + error_code: c_int, + /// Detailed error message (empty string on success) + error_message: [c_char; {{ error_message_capacity }}], /// Number of outer iterations num_outer_iterations: c_ulong, /// Total number of inner iterations @@ -150,6 +174,8 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_solve( core::ExitStatus::NotConvergedIterations => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedIterations, core::ExitStatus::NotConvergedOutOfTime => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedOutOfTime, }, + error_code: {{meta.optimizer_name|upper}}_NO_ERROR_CODE, + error_message: empty_error_message(), num_outer_iterations: status.num_outer_iterations() as c_ulong, num_inner_iterations: status.num_inner_iterations() as c_ulong, last_problem_norm_fpr: status.last_problem_norm_fpr(), @@ -177,11 +203,18 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_solve( } } }, - Err(e) => {{meta.optimizer_name}}SolverStatus { + Err(e) => { + let error_message = format!("problem solution failed: {}", e); + {{meta.optimizer_name}}SolverStatus { exit_status: match e { - SolverError::Cost => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost, - SolverError::NotFiniteComputation => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation, + SolverError::Cost(_) + | SolverError::ProjectionFailed(_) + | SolverError::LinearAlgebraFailure(_) + | SolverError::InvalidProblemState(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedCost, + SolverError::NotFiniteComputation(_) => {{meta.optimizer_name}}ExitStatus::{{meta.optimizer_name}}NotConvergedNotFiniteComputation, }, + error_code: {{meta.optimizer_name|upper}}_SOLVER_ERROR_CODE, + error_message: error_message_to_c_array(&error_message), num_outer_iterations: u64::MAX as c_ulong, num_inner_iterations: u64::MAX as c_ulong, last_problem_norm_fpr: f64::INFINITY, @@ -193,6 +226,7 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_solve( lagrange: {%- if problem.dim_constraints_aug_lagrangian() > 0 -%} [0.0; {{meta.optimizer_name|upper}}_N1] {%- else -%}std::ptr::null::(){%- endif %} + } }, } } @@ -209,4 +243,4 @@ pub unsafe extern "C" fn {{meta.optimizer_name|lower}}_free(instance: *mut {{met assert!(!instance.is_null()); drop(Box::from_raw(instance)); } -{% endif %} \ No newline at end of file +{% endif %} diff --git a/open-codegen/opengen/templates/cargo_config.toml b/python/opengen/templates/cargo_config.toml similarity index 100% rename from open-codegen/opengen/templates/cargo_config.toml rename to python/opengen/templates/cargo_config.toml diff --git a/open-codegen/opengen/templates/icasadi/casadi_memory.h b/python/opengen/templates/icasadi/casadi_memory.h similarity index 100% rename from open-codegen/opengen/templates/icasadi/casadi_memory.h rename to python/opengen/templates/icasadi/casadi_memory.h diff --git a/open-codegen/opengen/templates/icasadi/icasadi_cargo.toml b/python/opengen/templates/icasadi/icasadi_cargo.toml similarity index 100% rename from open-codegen/opengen/templates/icasadi/icasadi_cargo.toml rename to python/opengen/templates/icasadi/icasadi_cargo.toml diff --git a/open-codegen/opengen/templates/icasadi/icasadi_lib.rs b/python/opengen/templates/icasadi/icasadi_lib.rs similarity index 100% rename from open-codegen/opengen/templates/icasadi/icasadi_lib.rs rename to python/opengen/templates/icasadi/icasadi_lib.rs diff --git a/open-codegen/opengen/templates/icasadi/interface.c b/python/opengen/templates/icasadi/interface.c similarity index 100% rename from open-codegen/opengen/templates/icasadi/interface.c rename to python/opengen/templates/icasadi/interface.c diff --git a/open-codegen/opengen/templates/optimizer.rs.jinja b/python/opengen/templates/optimizer.rs.jinja similarity index 99% rename from open-codegen/opengen/templates/optimizer.rs.jinja rename to python/opengen/templates/optimizer.rs.jinja index 3e8aad58..c0433268 100644 --- a/open-codegen/opengen/templates/optimizer.rs.jinja +++ b/python/opengen/templates/optimizer.rs.jinja @@ -5,7 +5,7 @@ // {% if activate_clib_generation -%} -use libc::{c_double, c_ulong, c_ulonglong}; +use libc::{c_char, c_double, c_int, c_ulong, c_ulonglong}; {% endif %} use optimization_engine::{constraints::*, panoc::*, alm::*, *}; diff --git a/open-codegen/opengen/templates/optimizer_build.rs.jinja b/python/opengen/templates/optimizer_build.rs.jinja similarity index 100% rename from open-codegen/opengen/templates/optimizer_build.rs.jinja rename to python/opengen/templates/optimizer_build.rs.jinja diff --git a/open-codegen/opengen/templates/optimizer_cargo.toml.jinja b/python/opengen/templates/optimizer_cargo.toml.jinja similarity index 100% rename from open-codegen/opengen/templates/optimizer_cargo.toml.jinja rename to python/opengen/templates/optimizer_cargo.toml.jinja diff --git a/python/opengen/templates/python/python_bindings.rs b/python/opengen/templates/python/python_bindings.rs new file mode 100644 index 00000000..9f843c7e --- /dev/null +++ b/python/opengen/templates/python/python_bindings.rs @@ -0,0 +1,301 @@ +/// +/// Auto-generated python bindings for optimizer: {{ meta.optimizer_name }} +/// +use optimization_engine::alm::*; + +use pyo3::class::basic::PyObjectProtocol; +use pyo3::prelude::*; +use pyo3::wrap_pyfunction; + +use {{ meta.optimizer_name }}::*; + +#[pymodule] +fn {{ meta.optimizer_name }}(_py: Python, m: &PyModule) -> PyResult<()> { + m.add_function(wrap_pyfunction!(solver, m)?)?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add("OptimizerSolution", m.getattr("SolverStatus")?)?; + Ok(()) +} + +#[pyfunction] +fn solver() -> PyResult { + let cache = initialize_solver(); + Ok(Solver { cache }) +} + +#[derive(Clone)] +struct SolverStatusData { + exit_status: String, + num_outer_iterations: usize, + num_inner_iterations: usize, + last_problem_norm_fpr: f64, + f1_infeasibility: f64, + f2_norm: f64, + solve_time_ms: f64, + penalty: f64, + solution: Vec, + lagrange_multipliers: Vec, + cost: f64, +} + +impl SolverStatusData { + fn from_status(status: AlmOptimizerStatus, solution: &[f64]) -> Self { + SolverStatusData { + exit_status: format!("{:?}", status.exit_status()), + num_outer_iterations: status.num_outer_iterations(), + num_inner_iterations: status.num_inner_iterations(), + last_problem_norm_fpr: status.last_problem_norm_fpr(), + f1_infeasibility: status.delta_y_norm_over_c(), + f2_norm: status.f2_norm(), + penalty: status.penalty(), + lagrange_multipliers: status.lagrange_multipliers().clone().unwrap_or_default(), + solve_time_ms: (status.solve_time().as_nanos() as f64) / 1e6, + solution: solution.to_vec(), + cost: status.cost(), + } + } +} + +#[derive(Clone)] +struct SolverErrorData { + code: i32, + message: String, +} + +enum SolverResponsePayload { + Ok(SolverStatusData), + Err(SolverErrorData), +} + +/// Solution and solution status of optimizer +#[pyclass] +struct SolverStatus { + #[pyo3(get)] + exit_status: String, + #[pyo3(get)] + num_outer_iterations: usize, + #[pyo3(get)] + num_inner_iterations: usize, + #[pyo3(get)] + last_problem_norm_fpr: f64, + #[pyo3(get)] + f1_infeasibility: f64, + #[pyo3(get)] + f2_norm: f64, + #[pyo3(get)] + solve_time_ms: f64, + #[pyo3(get)] + penalty: f64, + #[pyo3(get)] + solution: Vec, + #[pyo3(get)] + lagrange_multipliers: Vec, + #[pyo3(get)] + cost: f64, +} + +impl From for SolverStatus { + fn from(status: SolverStatusData) -> Self { + SolverStatus { + exit_status: status.exit_status, + num_outer_iterations: status.num_outer_iterations, + num_inner_iterations: status.num_inner_iterations, + last_problem_norm_fpr: status.last_problem_norm_fpr, + f1_infeasibility: status.f1_infeasibility, + f2_norm: status.f2_norm, + solve_time_ms: status.solve_time_ms, + penalty: status.penalty, + solution: status.solution, + lagrange_multipliers: status.lagrange_multipliers, + cost: status.cost, + } + } +} + +#[pyproto] +impl PyObjectProtocol for SolverStatus { + fn __repr__(&self) -> PyResult { + Ok(format!( + "SolverStatus(exit_status={:?}, num_outer_iterations={}, num_inner_iterations={}, last_problem_norm_fpr={}, f1_infeasibility={}, f2_norm={}, solve_time_ms={}, penalty={}, cost={})", + self.exit_status, + self.num_outer_iterations, + self.num_inner_iterations, + self.last_problem_norm_fpr, + self.f1_infeasibility, + self.f2_norm, + self.solve_time_ms, + self.penalty, + self.cost + )) + } +} + +#[pyclass] +struct SolverError { + #[pyo3(get)] + code: i32, + #[pyo3(get)] + message: String, +} + +impl From for SolverError { + fn from(error: SolverErrorData) -> Self { + SolverError { + code: error.code, + message: error.message, + } + } +} + +#[pyproto] +impl PyObjectProtocol for SolverError { + fn __repr__(&self) -> PyResult { + Ok(format!( + "SolverError(code={}, message={:?})", + self.code, + self.message + )) + } +} + +#[pyclass] +struct SolverResponse { + payload: SolverResponsePayload, +} + +#[pyproto] +impl PyObjectProtocol for SolverResponse { + fn __repr__(&self) -> PyResult { + match &self.payload { + SolverResponsePayload::Ok(status) => Ok(format!( + "SolverResponse(ok=True, exit_status={:?}, num_outer_iterations={}, num_inner_iterations={})", + status.exit_status, + status.num_outer_iterations, + status.num_inner_iterations + )), + SolverResponsePayload::Err(error) => Ok(format!( + "SolverResponse(ok=False, code={}, message={:?})", + error.code, + error.message + )), + } + } +} + +#[pymethods] +impl SolverResponse { + fn is_ok(&self) -> bool { + matches!(self.payload, SolverResponsePayload::Ok(_)) + } + + fn get(&self, py: Python<'_>) -> PyResult { + match &self.payload { + SolverResponsePayload::Ok(status) => { + Ok(Py::new(py, SolverStatus::from(status.clone()))?.into_py(py)) + } + SolverResponsePayload::Err(error) => { + Ok(Py::new(py, SolverError::from(error.clone()))?.into_py(py)) + } + } + } +} + +#[pyclass] +struct Solver { + cache: AlmCache, +} + +#[pymethods] +impl Solver { + /// Run solver + /// + #[text_signature = "($self, p, initial_guess, initial_y, initial_penalty)"] + fn run( + &mut self, + p: Vec, + initial_guess: Option>, + initial_lagrange_multipliers: Option>, + initial_penalty: Option, + ) -> PyResult { + let mut u = [0.0; {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES]; + + // ---------------------------------------------------- + // Set initial value + // ---------------------------------------------------- + if let Some(u0) = initial_guess { + if u0.len() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES { + return Ok(SolverResponse { + payload: SolverResponsePayload::Err(SolverErrorData { + code: 1600, + message: format!( + "initial guess has incompatible dimensions: provided {}, expected {}", + u0.len(), + {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES + ), + }), + }); + } + u.copy_from_slice(&u0); + } + + // ---------------------------------------------------- + // Check lagrange multipliers + // ---------------------------------------------------- + if let Some(y0) = &initial_lagrange_multipliers { + if y0.len() != {{meta.optimizer_name|upper}}_N1 { + return Ok(SolverResponse { + payload: SolverResponsePayload::Err(SolverErrorData { + code: 1700, + message: format!( + "wrong dimension of Langrange multipliers: provided {}, expected {}", + y0.len(), + {{meta.optimizer_name|upper}}_N1 + ), + }), + }); + } + } + + // ---------------------------------------------------- + // Check parameter + // ---------------------------------------------------- + if p.len() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS { + return Ok(SolverResponse { + payload: SolverResponsePayload::Err(SolverErrorData { + code: 3003, + message: format!( + "wrong number of parameters: provided {}, expected {}", + p.len(), + {{meta.optimizer_name|upper}}_NUM_PARAMETERS + ), + }), + }); + } + + // ---------------------------------------------------- + // Run solver + // ---------------------------------------------------- + let solver_status = solve( + &p, + &mut self.cache, + &mut u, + &initial_lagrange_multipliers, + &initial_penalty, + ); + + match solver_status { + Ok(status) => Ok(SolverResponse { + payload: SolverResponsePayload::Ok(SolverStatusData::from_status(status, &u)), + }), + Err(err) => Ok(SolverResponse { + payload: SolverResponsePayload::Err(SolverErrorData { + code: 2000, + message: format!("problem solution failed: {}", err), + }), + }), + } + } +} diff --git a/open-codegen/opengen/templates/python/python_bindings_cargo.toml b/python/opengen/templates/python/python_bindings_cargo.toml similarity index 100% rename from open-codegen/opengen/templates/python/python_bindings_cargo.toml rename to python/opengen/templates/python/python_bindings_cargo.toml diff --git a/open-codegen/opengen/templates/ros/CMakeLists.txt b/python/opengen/templates/ros/CMakeLists.txt similarity index 70% rename from open-codegen/opengen/templates/ros/CMakeLists.txt rename to python/opengen/templates/ros/CMakeLists.txt index e66a5aee..c9a7f1f7 100644 --- a/open-codegen/opengen/templates/ros/CMakeLists.txt +++ b/python/opengen/templates/ros/CMakeLists.txt @@ -29,13 +29,24 @@ include_directories( set(NODE_NAME {{ros.node_name}}) add_executable(${NODE_NAME} src/open_optimizer.cpp) +if(WIN32) + set(OPEN_STATIC_LIB ${PROJECT_SOURCE_DIR}/extern_lib/{{meta.optimizer_name}}.lib) +else() + set(OPEN_STATIC_LIB ${PROJECT_SOURCE_DIR}/extern_lib/lib{{meta.optimizer_name}}.a) +endif() target_link_libraries( ${NODE_NAME} - ${PROJECT_SOURCE_DIR}/extern_lib/lib{{meta.optimizer_name}}.a) + ${OPEN_STATIC_LIB}) +if(WIN32) + target_link_libraries( + ${NODE_NAME} + ${catkin_LIBRARIES}) +else() target_link_libraries( ${NODE_NAME} m dl ${catkin_LIBRARIES}) +endif() add_dependencies( ${NODE_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} diff --git a/open-codegen/opengen/templates/ros/OptimizationParameters.msg b/python/opengen/templates/ros/OptimizationParameters.msg similarity index 70% rename from open-codegen/opengen/templates/ros/OptimizationParameters.msg rename to python/opengen/templates/ros/OptimizationParameters.msg index 870d2981..1aa37ee4 100644 --- a/open-codegen/opengen/templates/ros/OptimizationParameters.msg +++ b/python/opengen/templates/ros/OptimizationParameters.msg @@ -1,4 +1,4 @@ float64[] parameter # parameter p (mandatory) float64[] initial_guess # u0 (optional/recommended) float64[] initial_y # y0 (optional) -float64 initial_penalty # initial penalty (optional) +float64 initial_penalty # positive initial penalty (optional) diff --git a/open-codegen/opengen/templates/ros/OptimizationResult.msg b/python/opengen/templates/ros/OptimizationResult.msg similarity index 85% rename from open-codegen/opengen/templates/ros/OptimizationResult.msg rename to python/opengen/templates/ros/OptimizationResult.msg index abf646cb..9ec53e41 100644 --- a/open-codegen/opengen/templates/ros/OptimizationResult.msg +++ b/python/opengen/templates/ros/OptimizationResult.msg @@ -6,8 +6,8 @@ uint8 STATUS_NOT_CONVERGED_COST=3 uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 float64[] solution # optimizer (solution) -uint8 inner_iterations # number of inner iterations -uint16 outer_iterations # number of outer iterations +uint64 inner_iterations # number of inner iterations +uint64 outer_iterations # number of outer iterations uint8 status # status code float64 cost # cost at solution float64 norm_fpr # norm of FPR of last inner problem @@ -16,4 +16,3 @@ float64[] lagrange_multipliers # vector of Lagrange multipliers float64 infeasibility_f1 # infeasibility wrt F1 float64 infeasibility_f2 # infeasibility wrt F2 float64 solve_time_ms # solution time in ms - diff --git a/open-codegen/opengen/templates/ros/README.md b/python/opengen/templates/ros/README.md similarity index 100% rename from open-codegen/opengen/templates/ros/README.md rename to python/opengen/templates/ros/README.md diff --git a/open-codegen/opengen/templates/ros/open_optimizer.cpp b/python/opengen/templates/ros/open_optimizer.cpp similarity index 97% rename from open-codegen/opengen/templates/ros/open_optimizer.cpp rename to python/opengen/templates/ros/open_optimizer.cpp index ad9b4f1b..2dfaa3bb 100644 --- a/open-codegen/opengen/templates/ros/open_optimizer.cpp +++ b/python/opengen/templates/ros/open_optimizer.cpp @@ -1,9 +1,12 @@ /** * This is an auto-generated file by Optimization Engine (OpEn) - * OpEn is a free open-source software - see doc.optimization-engine.xyz + * OpEn is a free open-source software - + * see https://alphaville.github.io/optimization-engine * dually licensed under the MIT and Apache v2 licences. * */ +#include + #include "ros/ros.h" #include "{{ros.package_name}}/OptimizationResult.h" #include "{{ros.package_name}}/OptimizationParameters.h" @@ -69,7 +72,7 @@ class OptimizationEngineManager { */ void updateInputData() { - init_penalty = (params.initial_penalty > 1.0) + init_penalty = (params.initial_penalty > std::numeric_limits::epsilon()) ? params.initial_penalty : ROS_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY; diff --git a/open-codegen/opengen/templates/ros/open_optimizer.hpp b/python/opengen/templates/ros/open_optimizer.hpp similarity index 100% rename from open-codegen/opengen/templates/ros/open_optimizer.hpp rename to python/opengen/templates/ros/open_optimizer.hpp diff --git a/open-codegen/opengen/templates/ros/open_optimizer.launch b/python/opengen/templates/ros/open_optimizer.launch similarity index 100% rename from open-codegen/opengen/templates/ros/open_optimizer.launch rename to python/opengen/templates/ros/open_optimizer.launch diff --git a/open-codegen/opengen/templates/ros/open_params.yaml b/python/opengen/templates/ros/open_params.yaml similarity index 100% rename from open-codegen/opengen/templates/ros/open_params.yaml rename to python/opengen/templates/ros/open_params.yaml diff --git a/open-codegen/opengen/templates/ros/package.xml b/python/opengen/templates/ros/package.xml similarity index 100% rename from open-codegen/opengen/templates/ros/package.xml rename to python/opengen/templates/ros/package.xml diff --git a/python/opengen/templates/ros2/CMakeLists.txt b/python/opengen/templates/ros2/CMakeLists.txt new file mode 100644 index 00000000..167e310a --- /dev/null +++ b/python/opengen/templates/ros2/CMakeLists.txt @@ -0,0 +1,75 @@ +cmake_minimum_required(VERSION 3.8) +project({{ros.package_name}}) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) + +# tells CMake's FindPython3 to prefer a venv if one is active +# (instead of the system-wide python) +set(Python3_FIND_VIRTUALENV FIRST) +if(NOT Python3_EXECUTABLE AND DEFINED ENV{VIRTUAL_ENV}) + set(_open_python3_executable "$ENV{VIRTUAL_ENV}/bin/python") + if(EXISTS "${_open_python3_executable}") + set(Python3_EXECUTABLE "${_open_python3_executable}") + endif() +endif() +find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) +set(Python_EXECUTABLE ${Python3_EXECUTABLE}) +set(Python_INCLUDE_DIRS ${Python3_INCLUDE_DIRS}) +set(Python_LIBRARIES ${Python3_LIBRARIES}) +set(Python_NumPy_INCLUDE_DIRS ${Python3_NumPy_INCLUDE_DIRS}) +find_package(rosidl_default_generators REQUIRED) + +set(msg_files + "msg/OptimizationResult.msg" + "msg/OptimizationParameters.msg" +) + +rosidl_generate_interfaces(${PROJECT_NAME} + ${msg_files} +) + +ament_export_dependencies(rosidl_default_runtime) + +include_directories( + ${PROJECT_SOURCE_DIR}/include +) + +set(NODE_NAME {{ros.node_name}}) +add_executable(${NODE_NAME} src/open_optimizer.cpp) +ament_target_dependencies(${NODE_NAME} rclcpp) +if(WIN32) + set(OPEN_STATIC_LIB ${PROJECT_SOURCE_DIR}/extern_lib/{{meta.optimizer_name}}.lib) +else() + set(OPEN_STATIC_LIB ${PROJECT_SOURCE_DIR}/extern_lib/lib{{meta.optimizer_name}}.a) +endif() +target_link_libraries( + ${NODE_NAME} + ${OPEN_STATIC_LIB} +) +if(NOT WIN32) + target_link_libraries( + ${NODE_NAME} + m + dl + ) +endif() +rosidl_get_typesupport_target(cpp_typesupport_target ${PROJECT_NAME} "rosidl_typesupport_cpp") +target_link_libraries(${NODE_NAME} "${cpp_typesupport_target}") + +install(TARGETS + ${NODE_NAME} + DESTINATION lib/${PROJECT_NAME} +) + +install(DIRECTORY + config + launch + DESTINATION share/${PROJECT_NAME} +) + +ament_package() diff --git a/python/opengen/templates/ros2/OptimizationParameters.msg b/python/opengen/templates/ros2/OptimizationParameters.msg new file mode 100644 index 00000000..1aa37ee4 --- /dev/null +++ b/python/opengen/templates/ros2/OptimizationParameters.msg @@ -0,0 +1,4 @@ +float64[] parameter # parameter p (mandatory) +float64[] initial_guess # u0 (optional/recommended) +float64[] initial_y # y0 (optional) +float64 initial_penalty # positive initial penalty (optional) diff --git a/python/opengen/templates/ros2/OptimizationResult.msg b/python/opengen/templates/ros2/OptimizationResult.msg new file mode 100644 index 00000000..bc1f49f8 --- /dev/null +++ b/python/opengen/templates/ros2/OptimizationResult.msg @@ -0,0 +1,21 @@ +# Constants match the enumeration of status codes +uint8 STATUS_CONVERGED=0 +uint8 STATUS_NOT_CONVERGED_ITERATIONS=1 +uint8 STATUS_NOT_CONVERGED_OUT_OF_TIME=2 +uint8 STATUS_NOT_CONVERGED_COST=3 +uint8 STATUS_NOT_CONVERGED_FINITE_COMPUTATION=4 +uint8 STATUS_INVALID_REQUEST=5 + +float64[] solution # optimizer (solution) +uint64 inner_iterations # number of inner iterations +uint64 outer_iterations # number of outer iterations +uint8 status # coarse status code +uint16 error_code # detailed error code (0 on success) +string error_message # detailed error message (empty on success) +float64 cost # cost at solution +float64 norm_fpr # norm of FPR of last inner problem +float64 penalty # penalty value +float64[] lagrange_multipliers # vector of Lagrange multipliers +float64 infeasibility_f1 # infeasibility wrt F1 +float64 infeasibility_f2 # infeasibility wrt F2 +float64 solve_time_ms # solution time in ms diff --git a/python/opengen/templates/ros2/README.md b/python/opengen/templates/ros2/README.md new file mode 100644 index 00000000..2bf80caf --- /dev/null +++ b/python/opengen/templates/ros2/README.md @@ -0,0 +1,174 @@ +# ROS2 Package: {{ros.package_name}} + + +## Installation and Setup + +Move or link the auto-generated ROS2 package (folder `{{ros.package_name}}`) to your workspace source tree (typically `~/ros2_ws/src/`). + +From within the folder `{{ros.package_name}}`, compile with: + +```bash +colcon build --packages-select {{ros.package_name}} +source install/setup.bash +# or source install/setup.zsh if you are using zsh +``` + +If you want to activate logging (recommended), do + +```bash +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + + +## Launch and Use + +Start the optimizer in one terminal. The process stays in the foreground while +the node is running. + +```bash +# Terminal 1 +source install/setup.bash +# or: source install/setup.zsh +ros2 run {{ros.package_name}} {{ros.node_name}} +``` + +If ROS2 cannot write to its default log directory, set an explicit writable log +path: + +```bash +mkdir -p .ros_log +export ROS_LOG_DIR="$PWD/.ros_log" +``` + +If the node starts but does not appear in the ROS2 graph, try forcing Fast DDS +in both terminals before sourcing the generated workspace and running any +`ros2` commands: + +```bash +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +``` + +In a second terminal, source the same environment and verify discovery: + +```bash +# Terminal 2 +source install/setup.bash +# or: source install/setup.zsh +ros2 node list --no-daemon --spin-time 5 +ros2 topic list --no-daemon --spin-time 5 +``` + +You should see the node `/{{ros.node_name}}`, the input topic +`/{{ros.subscriber_subtopic}}`, and the output topic +`/{{ros.publisher_subtopic}}`. + +Then publish a request to the configured parameters topic +(default: `/{{ros.subscriber_subtopic}}`): + +```bash +ros2 topic pub --once /{{ros.subscriber_subtopic}} {{ros.package_name}}/msg/OptimizationParameters "{parameter: [YOUR_PARAMETER_VECTOR], initial_guess: [INITIAL_GUESS_OPTIONAL], initial_y: [], initial_penalty: 15.0}" +``` + +If `initial_guess` is omitted or left empty, the node reuses the previous +solution as a warm start. Likewise, an empty `initial_y` means "reuse the +previous Lagrange multipliers". `initial_penalty` is applied whenever it is +strictly greater than a small positive epsilon; otherwise the generated default +penalty is used. + +The result will be announced on the configured result topic +(default: `/{{ros.publisher_subtopic}}`): + +```bash +ros2 topic echo /{{ros.publisher_subtopic}} --once +``` + +Each request produces exactly one response message. The node does not keep +republishing stale results on later timer ticks. + +To get the optimal solution you can do: + +```bash +ros2 topic echo /{{ros.publisher_subtopic}} --field solution +``` + +You can also start the node using the generated launch file: + +```bash +ros2 launch {{ros.package_name}} open_optimizer.launch.py +``` + +The launch file loads its runtime parameters from +[`config/open_params.yaml`](config/open_params.yaml). + + +## Messages + +This package involves two messages: `OptimizationParameters` +and `OptimizationResult`, which are used to define the input +and output values to the node. `OptimizationParameters` specifies +the parameter vector, the initial guess (optional), the initial +guess for the vector of Lagrange multipliers and the initial value +of the penalty value. `OptimizationResult` is a message containing +all information related to the solution of the optimization +problem, including the optimal solution, the solver status, +solution time, Lagrange multiplier vector and more. The ROS2 +result message also includes `error_code` and `error_message` +fields so invalid requests and solver failures can be diagnosed +without inspecting logs. + +A successful response contains `status: 0`, `error_code: 0`, and an empty +`error_message`. If a request is invalid, the node publishes +`status: 5` (`STATUS_INVALID_REQUEST`) and populates `error_code` and +`error_message` with a more detailed explanation. + +For example, if the parameter vector has the wrong length, the node will +return a response like: + +```yaml +status: 5 +error_code: 3003 +error_message: 'wrong number of parameters: provided 1, expected ' +``` + +Similarly, invalid warm-start data is reported with: + +- `error_code: 1600` for an incompatible `initial_guess` +- `error_code: 1700` for incompatible `initial_y` +- `error_code: 2000` for solver-side failures propagated from the generated bindings + +The message structures are defined in the following msg files: + +- [`OptimizationParameters.msg`](msg/OptimizationParameters.msg) +- [`OptimizationResult.msg`](msg/OptimizationResult.msg) + + +## Configure + +You can configure the rate and topic names by editing +[`config/open_params.yaml`](config/open_params.yaml). + + +## Directory structure and contents + +The following auto-generated files are included in your ROS2 package: + +```txt +├── CMakeLists.txt +├── config +│   └── open_params.yaml +├── extern_lib +│   └── librosenbrock.a +├── include +│   ├── open_optimizer.hpp +│   └── rosenbrock_bindings.hpp +├── launch +│   └── open_optimizer.launch.py +├── msg +│   ├── OptimizationParameters.msg +│   └── OptimizationResult.msg +├── package.xml +├── README.md +└── src + └── open_optimizer.cpp +``` diff --git a/python/opengen/templates/ros2/open_optimizer.cpp b/python/opengen/templates/ros2/open_optimizer.cpp new file mode 100644 index 00000000..58e0efd8 --- /dev/null +++ b/python/opengen/templates/ros2/open_optimizer.cpp @@ -0,0 +1,320 @@ +/** + * This is an auto-generated file by Optimization Engine (OpEn) + * OpEn is a free open-source software - see doc.optimization-engine.xyz + * dually licensed under the MIT and Apache v2 licences. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rclcpp/rclcpp.hpp" +#include "{{ros.package_name}}/msg/optimization_parameters.hpp" +#include "{{ros.package_name}}/msg/optimization_result.hpp" +#include "{{meta.optimizer_name}}_bindings.hpp" +#include "open_optimizer.hpp" + +namespace {{ros.package_name}} { +/** + * ROS2 node that wraps the generated OpEn solver. + * + * The node subscribes to `OptimizationParameters`, validates and copies the + * incoming request into the native solver buffers, invokes the generated C + * bindings, and publishes one `OptimizationResult` message for each request. + */ +class OptimizationEngineNode : public rclcpp::Node { +private: + using OptimizationParametersMsg = {{ros.package_name}}::msg::OptimizationParameters; + using OptimizationResultMsg = {{ros.package_name}}::msg::OptimizationResult; + + OptimizationParametersMsg params_; + OptimizationResultMsg results_; + bool has_received_request_ = false; + double p_[{{meta.optimizer_name|upper}}_NUM_PARAMETERS] = { 0 }; + double u_[{{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES] = { 0 }; + double* y_ = nullptr; + {{meta.optimizer_name}}Cache* cache_ = nullptr; + double init_penalty_ = ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY; + + rclcpp::Publisher::SharedPtr publisher_; + rclcpp::Subscription::SharedPtr subscriber_; + rclcpp::TimerBase::SharedPtr timer_; + + static constexpr uint16_t kInvalidInitialGuessErrorCode = 1600; + static constexpr uint16_t kInvalidInitialYErrorCode = 1700; + static constexpr uint16_t kInvalidParameterErrorCode = 3003; + static constexpr double kInitialPenaltyEpsilon = std::numeric_limits::epsilon(); + + /** + * Convert the configured solver loop rate in Hz to a ROS2 timer period. + * + * A non-positive rate falls back to a conservative default of 100 ms. + */ + static std::chrono::milliseconds rateToPeriod(double rate) + { + if (rate <= 0.0) { + return std::chrono::milliseconds(100); + } + int period_ms = static_cast(1000.0 / rate); + if (period_ms < 1) { + period_ms = 1; + } + return std::chrono::milliseconds(period_ms); + } + + /** + * Build a human-readable dimension-mismatch message for invalid requests. + */ + std::string makeDimensionErrorMessage( + const char* label, + size_t provided, + size_t expected) const + { + std::ostringstream oss; + oss << label << ": provided " << provided << ", expected " << expected; + return oss.str(); + } + + /** + * Populate `results_` with a structured error response. + * + * This is used both for request-validation failures in the ROS2 wrapper + * and for solver-side failures propagated through the generated bindings. + */ + void setErrorResult( + uint8_t status, + uint16_t error_code, + const std::string& error_message) + { + results_.solution.clear(); + results_.lagrange_multipliers.clear(); + results_.inner_iterations = 0; + results_.outer_iterations = 0; + results_.status = status; + results_.error_code = error_code; + results_.error_message = error_message; + results_.cost = 0.0; + results_.norm_fpr = 0.0; + results_.penalty = 0.0; + results_.infeasibility_f1 = 0.0; + results_.infeasibility_f2 = 0.0; + results_.solve_time_ms = 0.0; + } + + /** + * Validate the most recent request and copy it into the solver buffers. + * + * On success, this method updates `p_`, `u_`, `y_`, and `init_penalty_` + * and returns `true`. On failure, it prepares an error result in `results_` + * and returns `false` without invoking the solver. + */ + bool validateAndUpdateInputData() + { + // A missing or too-small penalty falls back to the generated default. + init_penalty_ = (params_.initial_penalty > kInitialPenaltyEpsilon) + ? params_.initial_penalty + : ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY; + + if (params_.parameter.size() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS) { + setErrorResult( + OptimizationResultMsg::STATUS_INVALID_REQUEST, + kInvalidParameterErrorCode, + makeDimensionErrorMessage( + "wrong number of parameters", + params_.parameter.size(), + {{meta.optimizer_name|upper}}_NUM_PARAMETERS)); + return false; + } + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_PARAMETERS; ++i) { + p_[i] = params_.parameter[i]; + } + + // If no initial guess is provided, keep the previous `u_` as a warm start. + if (!params_.initial_guess.empty() + && params_.initial_guess.size() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES) { + setErrorResult( + OptimizationResultMsg::STATUS_INVALID_REQUEST, + kInvalidInitialGuessErrorCode, + makeDimensionErrorMessage( + "initial guess has incompatible dimensions", + params_.initial_guess.size(), + {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES)); + return false; + } + if (params_.initial_guess.size() == {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES) { + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES; ++i) { + u_[i] = params_.initial_guess[i]; + } + } + + // Likewise, an empty `initial_y` means "reuse the previous multipliers". + if (!params_.initial_y.empty() && params_.initial_y.size() != {{meta.optimizer_name|upper}}_N1) { + setErrorResult( + OptimizationResultMsg::STATUS_INVALID_REQUEST, + kInvalidInitialYErrorCode, + makeDimensionErrorMessage( + "wrong dimension of Lagrange multipliers", + params_.initial_y.size(), + {{meta.optimizer_name|upper}}_N1)); + return false; + } + if (params_.initial_y.size() == {{meta.optimizer_name|upper}}_N1) { + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_N1; ++i) { + y_[i] = params_.initial_y[i]; + } + } + + return true; + } + + /** + * Invoke the generated C solver interface on the current buffers. + */ + {{meta.optimizer_name}}SolverStatus solve() + { + return {{meta.optimizer_name}}_solve(cache_, u_, p_, y_, &init_penalty_); + } + + /** + * Lazily allocate the solver workspace and multiplier buffer. + */ + void initializeSolverIfNeeded() + { + if (y_ == nullptr) { + y_ = new double[{{meta.optimizer_name|upper}}_N1](); + } + if (cache_ == nullptr) { + cache_ = {{meta.optimizer_name}}_new(); + } + } + + /** + * Convert the solver status structure into the ROS2 result message. + */ + void updateResults({{meta.optimizer_name}}SolverStatus& status) + { + results_.solution.clear(); + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES; ++i) { + results_.solution.push_back(u_[i]); + } + + results_.lagrange_multipliers.clear(); + for (size_t i = 0; i < {{meta.optimizer_name|upper}}_N1; ++i) { + results_.lagrange_multipliers.push_back(status.lagrange[i]); + } + + results_.inner_iterations = status.num_inner_iterations; + results_.outer_iterations = status.num_outer_iterations; + results_.norm_fpr = status.last_problem_norm_fpr; + results_.cost = status.cost; + results_.penalty = status.penalty; + results_.status = static_cast(status.exit_status); + results_.error_code = static_cast(status.error_code); + // The bindings expose a null-terminated C buffer; convert it once here. + results_.error_message = std::string(status.error_message); + results_.solve_time_ms = static_cast(status.solve_time_ns) / 1000000.0; + results_.infeasibility_f2 = status.f2_norm; + results_.infeasibility_f1 = status.delta_y_norm_over_c; + } + + /** + * Store the latest optimization request received on the parameters topic. + */ + void receiveRequestCallback(const OptimizationParametersMsg::ConstSharedPtr msg) + { + params_ = *msg; + has_received_request_ = true; + } + + /** + * Process at most one pending request and publish exactly one response. + * + * Repeated timer ticks do not republish stale results: once a request has + * been handled, `has_received_request_` is cleared until the next message + * arrives on the input topic. + */ + void solveAndPublish() + { + if (!has_received_request_) { + return; + } + initializeSolverIfNeeded(); + if (!validateAndUpdateInputData()) { + publisher_->publish(results_); + // Mark the request as consumed so the timer does not republish stale errors. + has_received_request_ = false; + return; + } + {{meta.optimizer_name}}SolverStatus status = solve(); + updateResults(status); + publisher_->publish(results_); + // Each request should produce exactly one response message. + has_received_request_ = false; + } + +public: + /** + * Construct the ROS2 node, declare runtime parameters, and create the + * publisher, subscriber, and wall timer used by the generated wrapper. + */ + OptimizationEngineNode() + : Node(ROS2_NODE_{{meta.optimizer_name|upper}}_NODE_NAME) + { + this->declare_parameter( + "result_topic", + std::string(ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC)); + this->declare_parameter( + "params_topic", + std::string(ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC)); + this->declare_parameter( + "rate", + double(ROS2_NODE_{{meta.optimizer_name|upper}}_RATE)); + + std::string result_topic = this->get_parameter("result_topic").as_string(); + std::string params_topic = this->get_parameter("params_topic").as_string(); + double rate = this->get_parameter("rate").as_double(); + + publisher_ = this->create_publisher( + result_topic, + ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC_QUEUE_SIZE); + subscriber_ = this->create_subscription( + params_topic, + ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC_QUEUE_SIZE, + std::bind(&OptimizationEngineNode::receiveRequestCallback, this, std::placeholders::_1)); + timer_ = this->create_wall_timer( + rateToPeriod(rate), + std::bind(&OptimizationEngineNode::solveAndPublish, this)); + } + + /** + * Release any lazily allocated solver resources. + */ + ~OptimizationEngineNode() override + { + if (y_ != nullptr) { + delete[] y_; + } + if (cache_ != nullptr) { + {{meta.optimizer_name}}_free(cache_); + } + } +}; +} /* end of namespace {{ros.package_name}} */ + +/** + * Start the generated ROS2 optimizer node and hand control to the ROS2 + * executor until shutdown is requested. + */ +int main(int argc, char** argv) +{ + rclcpp::init(argc, argv); + auto node = std::make_shared<{{ros.package_name}}::OptimizationEngineNode>(); + rclcpp::spin(node); + rclcpp::shutdown(); + return 0; +} diff --git a/python/opengen/templates/ros2/open_optimizer.hpp b/python/opengen/templates/ros2/open_optimizer.hpp new file mode 100644 index 00000000..a8482fd2 --- /dev/null +++ b/python/opengen/templates/ros2/open_optimizer.hpp @@ -0,0 +1,40 @@ +#ifndef ROS2_NODE_{{meta.optimizer_name|upper}}_H +#define ROS2_NODE_{{meta.optimizer_name|upper}}_H + +/** + * Default node name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_NODE_NAME "{{ros.node_name}}" + +/** + * Default result (publisher) topic name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC "{{ros.publisher_subtopic}}" + +/** + * Default parameters (subscriber) topic name + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC "{{ros.subscriber_subtopic}}" + +/** + * Default execution rate (in Hz) + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RATE {{ros.rate}} + +/** + * Default result topic queue size + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_RESULT_TOPIC_QUEUE_SIZE {{ros.result_topic_queue_size}} + +/** + * Default parameters topic queue size + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_PARAMS_TOPIC_QUEUE_SIZE {{ros.params_topic_queue_size}} + +/** + * Default initial penalty + */ +#define ROS2_NODE_{{meta.optimizer_name|upper}}_DEFAULT_INITIAL_PENALTY {{solver_config.initial_penalty}} + + +#endif /* Header Sentinel: ROS2_NODE_{{meta.optimizer_name|upper}}_H */ diff --git a/python/opengen/templates/ros2/open_optimizer.launch.py b/python/opengen/templates/ros2/open_optimizer.launch.py new file mode 100644 index 00000000..45d7aa60 --- /dev/null +++ b/python/opengen/templates/ros2/open_optimizer.launch.py @@ -0,0 +1,20 @@ +from launch import LaunchDescription +from launch.substitutions import PathJoinSubstitution +from launch_ros.actions import Node +from launch_ros.substitutions import FindPackageShare + + +def generate_launch_description(): + return LaunchDescription([ + Node( + package="{{ros.package_name}}", + executable="{{ros.node_name}}", + name="{{ros.node_name}}", + output="screen", + parameters=[PathJoinSubstitution([ + FindPackageShare("{{ros.package_name}}"), + "config", + "open_params.yaml", + ])], + ) + ]) diff --git a/python/opengen/templates/ros2/open_params.yaml b/python/opengen/templates/ros2/open_params.yaml new file mode 100644 index 00000000..adde7b45 --- /dev/null +++ b/python/opengen/templates/ros2/open_params.yaml @@ -0,0 +1,5 @@ +/**: + ros__parameters: + result_topic: "{{ros.publisher_subtopic}}" + params_topic: "{{ros.subscriber_subtopic}}" + rate: {{ "%.1f"|format(ros.rate) if ros.rate == (ros.rate|int) else ros.rate }} diff --git a/python/opengen/templates/ros2/package.xml b/python/opengen/templates/ros2/package.xml new file mode 100644 index 00000000..c183538d --- /dev/null +++ b/python/opengen/templates/ros2/package.xml @@ -0,0 +1,24 @@ + + + {{ros.package_name}} + {{meta.version}} + {{ros.description}} + chung + {{meta.licence}} + + ament_cmake + + rosidl_default_generators + + launch + launch_ros + rclcpp + + rosidl_default_runtime + + rosidl_interface_packages + + + ament_cmake + + diff --git a/open-codegen/opengen/templates/tcp/tcp_server.rs b/python/opengen/templates/tcp/tcp_server.rs similarity index 63% rename from open-codegen/opengen/templates/tcp/tcp_server.rs rename to python/opengen/templates/tcp/tcp_server.rs index 042e176f..c676a7f5 100644 --- a/open-codegen/opengen/templates/tcp/tcp_server.rs +++ b/python/opengen/templates/tcp/tcp_server.rs @@ -9,7 +9,7 @@ extern crate clap; use std::{ io::{prelude::Read, Write}, - net::TcpListener, + net::{TcpListener, TcpStream}, }; use clap::{Arg, App}; @@ -80,27 +80,54 @@ struct OptimizerSolution<'a> { cost: f64, } -fn pong(stream: &mut std::net::TcpStream, code: i32) { - let error_message = format!( +fn write_bytes_to_stream(stream: &mut TcpStream, payload: &[u8], context: &str) -> bool { + if let Err(err) = stream.write_all(payload) { + warn!("{}: {}", context, err); + return false; + } + true +} + +fn write_json_to_stream( + stream: &mut TcpStream, + payload: &T, + context: &str, +) -> bool { + let payload_json = match serde_json::to_vec_pretty(payload) { + Ok(payload_json) => payload_json, + Err(err) => { + error!("{}: {}", context, err); + return false; + } + }; + write_bytes_to_stream(stream, &payload_json, context) +} + +fn pong(stream: &mut TcpStream, code: i32) { + let pong_message = format!( {% raw %}"{{\n\t\"Pong\" : {}\n}}\n"{% endraw %}, code ); - stream - .write_all(error_message.as_bytes()) - .expect("cannot write to stream"); + write_bytes_to_stream(stream, pong_message.as_bytes(), "could not write pong to stream"); } /// Writes an error to the communication stream -fn write_error_message(stream: &mut std::net::TcpStream, code: i32, error_msg: &str) { - let error_message = format!( - {% raw %}"{{\n\t\"type\" : \"Error\", \n\t\"code\" : {}, \n\t\"message\" : \"{}\"\n}}\n"{% endraw %}, +#[derive(Serialize)] +struct ErrorResponse<'a> { + #[serde(rename = "type")] + response_type: &'a str, + code: i32, + message: &'a str, +} + +fn write_error_message(stream: &mut TcpStream, code: i32, error_msg: &str) { + let error_response = ErrorResponse { + response_type: "Error", code, - error_msg - ); - warn!("Invalid request {:?}", code); - stream - .write_all(error_message.as_bytes()) - .expect("cannot write to stream"); + message: error_msg, + }; + warn!("TCP error {}: {}", code, error_msg); + write_json_to_stream(stream, &error_response, "could not write error response to stream"); } /// Serializes the solution and solution status and returns it @@ -108,7 +135,7 @@ fn write_error_message(stream: &mut std::net::TcpStream, code: i32, error_msg: & fn return_solution_to_client( status: AlmOptimizerStatus, solution: &[f64], - stream: &mut std::net::TcpStream, + stream: &mut TcpStream, ) { let empty_vec : [f64; 0] = Default::default(); let solution: OptimizerSolution = OptimizerSolution { @@ -125,10 +152,7 @@ fn return_solution_to_client( cost: status.cost(), }; - let solution_json = serde_json::to_string_pretty(&solution).unwrap(); - stream - .write_all(solution_json.as_bytes()) - .expect("cannot write to stream"); + write_json_to_stream(stream, &solution, "could not write optimizer solution to stream"); } /// Handles an execution request @@ -137,7 +161,7 @@ fn execution_handler( execution_parameter: &ExecutionParameter, u: &mut [f64], p: &mut [f64], - stream: &mut std::net::TcpStream, + stream: &mut TcpStream, ) { // ---------------------------------------------------- // Set initial value @@ -150,7 +174,12 @@ fn execution_handler( Some(u0) => { if u0.len() != {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES { warn!("initial guess has incompatible dimensions"); - write_error_message(stream, 1600, "Initial guess has incompatible dimensions"); + let error_message = format!( + "initial guess has incompatible dimensions: provided {}, expected {}", + u0.len(), + {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES + ); + write_error_message(stream, 1600, &error_message); return; } u.copy_from_slice(u0); @@ -162,7 +191,12 @@ fn execution_handler( // ---------------------------------------------------- if let Some(y0) = &execution_parameter.initial_lagrange_multipliers { if y0.len() != {{meta.optimizer_name|upper}}_N1 { - write_error_message(stream, 1700, "wrong dimension of Langrange multipliers"); + let error_message = format!( + "wrong dimension of Langrange multipliers: provided {}, expected {}", + y0.len(), + {{meta.optimizer_name|upper}}_N1 + ); + write_error_message(stream, 1700, &error_message); return; } } @@ -172,7 +206,12 @@ fn execution_handler( // ---------------------------------------------------- let parameter = &execution_parameter.parameter; if parameter.len() != {{meta.optimizer_name|upper}}_NUM_PARAMETERS { - write_error_message(stream, 3003, "wrong number of parameters"); + let error_message = format!( + "wrong number of parameters: provided {}, expected {}", + parameter.len(), + {{meta.optimizer_name|upper}}_NUM_PARAMETERS + ); + write_error_message(stream, 3003, &error_message); return; } p.copy_from_slice(parameter); @@ -185,8 +224,9 @@ fn execution_handler( Ok(ok_status) => { return_solution_to_client(ok_status, u, stream); } - Err(_) => { - write_error_message(stream, 2000, "Problem solution failed (solver error)"); + Err(err) => { + let error_message = format!("problem solution failed: {}", err); + write_error_message(stream, 2000, &error_message); } } } @@ -196,22 +236,52 @@ fn run_server(tcp_config: &TcpServerConfiguration) { let mut p = [0.0; {{meta.optimizer_name|upper}}_NUM_PARAMETERS]; let mut cache = initialize_solver(); info!("Done"); - let listener = TcpListener::bind(format!("{}:{}", tcp_config.ip, tcp_config.port)).unwrap(); + let listener = match TcpListener::bind(format!("{}:{}", tcp_config.ip, tcp_config.port)) { + Ok(listener) => listener, + Err(err) => { + error!( + "failed to bind TCP server at {}:{}: {}", + tcp_config.ip, + tcp_config.port, + err + ); + return; + } + }; let mut u = [0.0; {{meta.optimizer_name|upper}}_NUM_DECISION_VARIABLES]; info!("listening started, ready to accept connections at {}:{}", tcp_config.ip, tcp_config.port); - for stream in listener.incoming() { - let mut stream = stream.unwrap(); + 'incoming: for stream in listener.incoming() { + let mut stream = match stream { + Ok(stream) => stream, + Err(err) => { + warn!("failed to accept incoming TCP connection: {}", err); + continue; + } + }; //The following is more robust compared to `read_to_string` let mut bytes_buffer = vec![0u8; READ_BUFFER_SIZE]; let mut read_data_length = 1; let mut buffer = String::new(); while read_data_length != 0 { - read_data_length = stream - .read(&mut bytes_buffer) - .expect("could not read stream"); - let new_string = String::from_utf8(bytes_buffer[0..read_data_length].to_vec()) - .expect("sent data is not UFT-8"); + read_data_length = match stream.read(&mut bytes_buffer) { + Ok(read_data_length) => read_data_length, + Err(err) => { + warn!("could not read stream: {}", err); + continue 'incoming; + } + }; + let new_string = match String::from_utf8(bytes_buffer[0..read_data_length].to_vec()) { + Ok(new_string) => new_string, + Err(err) => { + let error_message = format!( + "invalid request: request body is not valid UTF-8 ({})", + err.utf8_error() + ); + write_error_message(&mut stream, 1000, &error_message); + continue 'incoming; + } + }; buffer.push_str(&new_string); } @@ -236,8 +306,9 @@ fn run_server(tcp_config: &TcpServerConfiguration) { pong(&mut stream, ping_code); } }, - Err(_) => { - write_error_message(&mut stream, 1000, "Invalid request"); + Err(err) => { + let error_message = format!("invalid request: {}", err); + write_error_message(&mut stream, 1000, &error_message); } } } diff --git a/open-codegen/opengen/templates/tcp/tcp_server_cargo.toml b/python/opengen/templates/tcp/tcp_server_cargo.toml similarity index 100% rename from open-codegen/opengen/templates/tcp/tcp_server_cargo.toml rename to python/opengen/templates/tcp/tcp_server_cargo.toml diff --git a/open-codegen/publish-pypi.sh b/python/publish-pypi.sh old mode 100644 new mode 100755 similarity index 56% rename from open-codegen/publish-pypi.sh rename to python/publish-pypi.sh index 1feca85b..ddaf594d --- a/open-codegen/publish-pypi.sh +++ b/python/publish-pypi.sh @@ -4,9 +4,30 @@ set -eu # This script facilitates releasing a new version of opengen to PyPI. # It expects a local virtual environment at ./venv with publishing tools. -echo "[OpEnGen] Checking out master" -git checkout master -git pull origin master +version=$(cat VERSION) +current_branch=$(git rev-parse --abbrev-ref HEAD) + +is_alpha_version=false +case "$version" in + *a[0-9]*) + is_alpha_version=true + ;; +esac + +if [ "$current_branch" != "master" ] && [ "$is_alpha_version" = false ]; then + echo "[OpEnGen] Warning: version $version is not an alpha release and the current branch is '$current_branch' (not 'master')." + printf "Proceed anyway? [y/N] " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + echo "[OpEnGen] Proceeding from branch '$current_branch'" + ;; + *) + echo "[OpEnGen] Publish cancelled" + exit 0 + ;; + esac +fi echo "[OpEnGen] Cleaning previous build artifacts" rm -rf ./build ./dist ./opengen.egg-info @@ -23,7 +44,7 @@ python -m build echo "[OpEnGen] Checking distributions with twine" python -m twine check dist/* -echo "[OpEnGen] Uploading to PyPI..." +echo "[OpEnGen] You are about to publish version $version from branch '$current_branch'." printf "Are you sure? [y/N] " read -r response case "$response" in @@ -37,6 +58,5 @@ case "$response" in esac echo "[OpEnGen] Don't forget to create a tag; run:" -version=$(cat VERSION) echo "\$ git tag -a opengen-$version -m 'opengen-$version'" echo "\$ git push --tags" diff --git a/open-codegen/pyproject.toml b/python/pyproject.toml similarity index 97% rename from open-codegen/pyproject.toml rename to python/pyproject.toml index df6ded5a..715a0a72 100644 --- a/open-codegen/pyproject.toml +++ b/python/pyproject.toml @@ -49,11 +49,12 @@ dependencies = [ Homepage = "https://github.com/alphaville/optimization-engine" Documentation = "https://alphaville.github.io/optimization-engine/" Repository = "https://github.com/alphaville/optimization-engine" -Changelog = "https://github.com/alphaville/optimization-engine/blob/master/open-codegen/CHANGELOG.md" +Changelog = "https://github.com/alphaville/optimization-engine/blob/master/python/CHANGELOG.md" [project.optional-dependencies] dev = [ "build>=1", + "pytest>=8", "twine>=5", ] diff --git a/open-codegen/setup.py b/python/setup.py similarity index 100% rename from open-codegen/setup.py rename to python/setup.py diff --git a/open-codegen/test/README.md b/python/test/README.md similarity index 85% rename from open-codegen/test/README.md rename to python/test/README.md index 75cd8379..8b070ac9 100644 --- a/open-codegen/test/README.md +++ b/python/test/README.md @@ -6,7 +6,7 @@ Firstly, you need to create a virtual environment, activate it, and install open ``` pip install . ``` -from within `open-codegen`. +from within `python`. ## Benchmarking @@ -22,9 +22,9 @@ Then, firstly, create some necessary optimizers to be benchmarked python prepare_benchmarks.py ``` -Run this from within `open-codegen/opengen`. +Run this from within `python/`. This will create a number of solvers to be benchmarked; -these will be stored in `open-codegen/opengen/.python_test_build/benchmarkable`. +these will be stored in `python/.python_test_build/benchmarkable`. Then benchmark them with ``` @@ -47,5 +47,6 @@ The generated benchmark looks like this: Run ``` python -W ignore test/test_constraints.py -v +python -W ignore test/test_ros2.py -v python -W ignore test/test.py -v -``` \ No newline at end of file +``` diff --git a/python/test/__init__.py b/python/test/__init__.py new file mode 100644 index 00000000..fbd4941c --- /dev/null +++ b/python/test/__init__.py @@ -0,0 +1,3 @@ +"""Test package for the project.""" + +__all__ = [] diff --git a/open-codegen/test/benchmark_open.py b/python/test/benchmark_open.py similarity index 90% rename from open-codegen/test/benchmark_open.py rename to python/test/benchmark_open.py index 3b7f0858..7939ec4a 100644 --- a/open-codegen/test/benchmark_open.py +++ b/python/test/benchmark_open.py @@ -31,22 +31,23 @@ def get_open_local_absolute_path(): - cwd = os.getcwd() - return cwd.split('open-codegen')[0] + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) def t_benchmark1(solver): a = np.random.uniform(0.5, 2) b = np.random.uniform(0.5, 15) c = np.random.uniform(0.9, 3) - _sol = solver.run([a, b, c]) + response = solver.run([a, b, c]) + _sol = response.get() def t_benchmark2(solver): x0 = np.random.uniform(-3.5, -2) y0 = np.random.uniform(-2.5, 2.5) # th0 = np.random.uniform(-0.3, 0.3) - _sol = solver.run([x0, y0, 0]) + response = solver.run([x0, y0, 0]) + _sol = response.get() def test_benchmark1(benchmark): diff --git a/open-codegen/test/prepare_benchmarks.py b/python/test/prepare_benchmarks.py similarity index 97% rename from open-codegen/test/prepare_benchmarks.py rename to python/test/prepare_benchmarks.py index 1b9fc60b..5d7b8825 100644 --- a/open-codegen/test/prepare_benchmarks.py +++ b/python/test/prepare_benchmarks.py @@ -7,8 +7,7 @@ def get_open_local_absolute_path(): - cwd = os.getcwd() - return cwd.split('open-codegen')[0] + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) def solver_configuration(do_precond=False): @@ -103,4 +102,4 @@ def benchmark2(optimizer_name, with_obstacles=False, do_precond=False): benchmark2("benchmark2", do_precond=False) benchmark2("benchmark2p", do_precond=True) benchmark2("benchmark2o", with_obstacles=True, do_precond=False) -benchmark2("benchmark2op", with_obstacles=True, do_precond=True) \ No newline at end of file +benchmark2("benchmark2op", with_obstacles=True, do_precond=True) diff --git a/python/test/test.py b/python/test/test.py new file mode 100644 index 00000000..4122e55b --- /dev/null +++ b/python/test/test.py @@ -0,0 +1,1374 @@ +import os +import unittest +import json +import socket +import shutil +import sys +import importlib +import casadi.casadi as cs +import opengen as og +import subprocess +import logging +import numpy as np +from pathlib import PureWindowsPath +from types import SimpleNamespace + + + + + +class BuildConfigurationTestCase(unittest.TestCase): + + def test_local_path_is_toml_safe_on_windows(self): + # some windows-type path... + windows_style_path = PureWindowsPath("C:/temp/optimization-engine") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=windows_style_path) + + self.assertEqual( + "C:/temp/optimization-engine", + build_config.local_path + ) + + def test_with_build_mode_rejects_invalid_values(self): + """`with_build_mode` should reject unsupported build modes.""" + build_config = og.config.BuildConfiguration() + + with self.assertRaisesRegex( + ValueError, + "build mode must be either 'debug' or 'release'", + ): + build_config.with_build_mode("profile") + + def test_with_allocator_accepts_valid_enum_value(self): + build_config = og.config.BuildConfiguration() + build_config.with_allocator(og.config.RustAllocator.JemAlloc) + self.assertEqual(og.config.RustAllocator.JemAlloc, build_config.allocator) + + def test_with_allocator_rejects_invalid_value(self): + build_config = og.config.BuildConfiguration() + with self.assertRaisesRegex( + ValueError, + "allocator must be an instance of RustAllocator", + ): + build_config.with_allocator("jemalloc") + + def test_with_open_version_accepts_wildcard(self): + build_config = og.config.BuildConfiguration().with_open_version("*") + self.assertEqual("*", build_config.open_version) + + def test_with_open_version_accepts_semver(self): + build_config = og.config.BuildConfiguration().with_open_version("1.2.3-alpha.1+build.5") + self.assertEqual("1.2.3-alpha.1+build.5", build_config.open_version) + + def test_with_open_version_rejects_invalid_version(self): + build_config = og.config.BuildConfiguration() + with self.assertRaisesRegex( + ValueError, + "invalid OpEn version", + ): + build_config.with_open_version("^1.2") + + +class SmokeTests(unittest.TestCase): + + def test_incompatible_constraints_dimensions(self): + u = cs.SX.sym("u", 5) + p = cs.SX.sym("p", 2) + phi = og.functions.rosenbrock(u, p) + bounds = og.constraints.Rectangle(xmin=[1, 2], xmax=[3, 4]) + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + meta = og.config.OptimizerMeta().with_optimizer_name("abcd") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust"))) \ + .with_build_directory(".whatever") + solver_cfg = og.config.SolverConfiguration() + builder = og.builder.OpEnOptimizerBuilder(problem, meta, build_config, solver_cfg) + with self.assertRaises(ValueError): + builder.build() + +class OcpSolutionTestCase(unittest.TestCase): + + def test_ocp_solution_defaults_missing_raw_fields_to_none(self): + solution = og.ocp.OcpSolution(SimpleNamespace(), inputs=[], states=[]) + + self.assertEqual([], solution.solution) + self.assertIsNone(solution.cost) + self.assertIsNone(solution.exit_status) + self.assertIsNone(solution.solve_time_ms) + self.assertIsNone(solution.penalty) + self.assertIsNone(solution.num_outer_iterations) + self.assertIsNone(solution.num_inner_iterations) + self.assertIsNone(solution.last_problem_norm_fpr) + self.assertIsNone(solution.f1_infeasibility) + self.assertIsNone(solution.f2_norm) + self.assertIsNone(solution.lagrange_multipliers) + + def test_ocp_solution_repr_formats_nested_values(self): + raw = SimpleNamespace( + solution=[1.0, -0.0], + cost=-0.0, + exit_status="Converged", + solve_time_ms=1.2345, + penalty=0.0, + num_outer_iterations=2, + num_inner_iterations=5, + last_problem_norm_fpr=0.125, + f1_infeasibility=(False, 0.0), + f2_norm=[3.5], + lagrange_multipliers=(1.0, 2.0), + ) + + solution = og.ocp.OcpSolution( + raw, + inputs=[(1.0, 2.0)], + states=[[0.0, -0.0]], + ) + + expected = "\n".join([ + "OCP Solution:", + " Exit status.......... Converged", + " Cost................. 0.0", + " Solve time [ms]...... 1.234", + " Penalty.............. 0.0", + " Outer iterations..... 2", + " Inner iterations..... 5", + " FPR.................. 0.125", + " ALM infeasibility.... (False, 0.0)", + " PM infeasibility..... [3.5]", + " Decision variables... [1, 0.0]", + " Inputs............... [(1, 2)]", + " States............... [[0.0, 0.0]]", + " Lagrange multipliers. (1, 2)", + ]) + + self.assertEqual(expected, repr(solution)) + self.assertEqual(expected, str(solution)) + + +class AffineSpaceTestCase(unittest.TestCase): + + def test_affine_space_exposes_its_data_and_shape_flags(self): + matrix = np.array([[1.0, 2.0], [3.0, 4.0]]) + vector = np.array([5.0, 6.0]) + affine_space = og.constraints.AffineSpace(matrix, vector) + + np.testing.assert_array_equal(np.array([1.0, 2.0, 3.0, 4.0]), affine_space.matrix_a) + np.testing.assert_array_equal(vector, affine_space.vector_b) + self.assertTrue(affine_space.is_convex()) + self.assertFalse(affine_space.is_compact()) + + def test_affine_space_distance_and_projection_are_not_implemented(self): + affine_space = og.constraints.AffineSpace(np.eye(2), np.array([1.0, 0.0])) + + with self.assertRaises(NotImplementedError): + affine_space.distance_squared([0.0, 0.0]) + + with self.assertRaises(NotImplementedError): + affine_space.project([0.0, 0.0]) + + +class RosConfigurationTestCase(unittest.TestCase): + + def test_ros_configuration_defaults_to_expected_values(self): + ros_config = og.config.RosConfiguration() + + self.assertEqual({ + "package_name": "open_ros", + "node_name": "ros_node_optimizer", + "description": "parametric optimization with OpEn", + "rate": 10.0, + "result_topic_queue_size": 100, + "params_topic_queue_size": 100, + "publisher_subtopic": "result", + "subscriber_subtopic": "parameters", + }, ros_config.to_dict()) + + def test_ros_configuration_supports_custom_values(self): + ros_config = og.config.RosConfiguration() \ + .with_package_name("demo_pkg") \ + .with_node_name("demo_node") \ + .with_rate(25.0) \ + .with_description("custom description") \ + .with_queue_sizes(result_topic_queue_size=5, parameter_topic_queue_size=7) \ + .with_publisher_subtopic("solutions") \ + .with_subscriber_subtopic("requests") + + self.assertEqual("demo_pkg", ros_config.package_name) + self.assertEqual("demo_node", ros_config.node_name) + self.assertEqual("solutions", ros_config.publisher_subtopic) + self.assertEqual("requests", ros_config.subscriber_subtopic) + self.assertEqual({ + "package_name": "demo_pkg", + "node_name": "demo_node", + "description": "custom description", + "rate": 25.0, + "result_topic_queue_size": 5, + "params_topic_queue_size": 7, + "publisher_subtopic": "solutions", + "subscriber_subtopic": "requests", + }, ros_config.to_dict()) + + def test_ros_configuration_rejects_invalid_names(self): + ros_config = og.config.RosConfiguration() + + with self.assertRaisesRegex(ValueError, "invalid package name"): + ros_config.with_package_name("invalid package") + + with self.assertRaisesRegex(ValueError, "invalid node name"): + ros_config.with_node_name("invalid node") + + +class SetYCalculatorTestCase(unittest.TestCase): + + def test_set_y_for_rectangle_uses_large_bounds_only_where_needed(self): + rectangle = og.constraints.Rectangle( + xmin=[float("-inf"), -1.0], + xmax=[2.0, float("inf")], + ) + + y_set = og.builder.SetYCalculator(rectangle).obtain() + + self.assertIsInstance(y_set, og.constraints.Rectangle) + self.assertEqual([0.0, -og.builder.SetYCalculator.LARGE_NUM], y_set.xmin) + self.assertEqual([og.builder.SetYCalculator.LARGE_NUM, 0.0], y_set.xmax) + + def test_set_y_for_compact_set_returns_large_infinity_ball(self): + y_set = og.builder.SetYCalculator(og.constraints.Ball2(radius=1.0)).obtain() + + self.assertIsInstance(y_set, og.constraints.BallInf) + self.assertIsNone(y_set.center) + self.assertEqual(og.builder.SetYCalculator.LARGE_NUM, y_set.radius) + + def test_set_y_for_halfspace_raises_specific_error(self): + y_calc = og.builder.SetYCalculator(og.constraints.Halfspace([0.0, 0.0], 1.0)) + + with self.assertRaisesRegex(NotImplementedError, "you cannot use a Halfspace"): + y_calc.obtain() + + +class RustBuildTestCase(unittest.TestCase): + + TEST_DIR = ".python_test_build" + + @staticmethod + def get_open_local_absolute_path(): + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) + + # Which version of OpEn Rust library to test against + OPEN_RUSTLIB_VERSION = "*" + + @classmethod + def solverConfig(cls): + solver_config = og.config.SolverConfiguration() \ + .with_lbfgs_memory(15) \ + .with_tolerance(1e-4) \ + .with_initial_tolerance(1e-4) \ + .with_delta_tolerance(1e-4) \ + .with_initial_penalty(15.0) \ + .with_penalty_weight_update_factor(10.0) \ + .with_max_inner_iterations(155) \ + .with_max_duration_micros(1e8) \ + .with_max_outer_iterations(50) \ + .with_sufficient_decrease_coefficient(0.05) \ + .with_cbfgs_parameters(1.5, 1e-10, 1e-12) \ + .with_preconditioning(False) + return solver_config + + @classmethod + def setUpPythonBindings(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + phi = og.functions.rosenbrock(u, p) # cost function + bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("python_bindings") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE)\ + .with_build_python_bindings() + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + target_lib = os.path.join( + RustBuildTestCase.TEST_DIR, "python_bindings", "src", "lib.rs") + with open(target_lib, "r", encoding="utf-8") as fh: + solver_lib = fh.read() + + anchor = ( + ' assert_eq!(u.len(), PYTHON_BINDINGS_NUM_DECISION_VARIABLES, ' + '"Wrong number of decision variables (u)");\n' + ) + injected_guard = ( + anchor + + '\n' + ' if p[0] < 0.0 {\n' + ' return Err(SolverError::Cost("forced solver error for Python bindings test"));\n' + ' }\n' + ) + if anchor not in solver_lib: + raise RuntimeError("Could not inject deterministic solver error into python_bindings") + + with open(target_lib, "w", encoding="utf-8") as fh: + fh.write(solver_lib.replace(anchor, injected_guard, 1)) + + python_bindings_dir = os.path.join( + RustBuildTestCase.TEST_DIR, "python_bindings", "python_bindings_python_bindings") + process = subprocess.Popen( + ["cargo", "build"], + cwd=python_bindings_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + _stdout, stderr = process.communicate() + if process.returncode != 0: + raise RuntimeError( + "Could not rebuild Python bindings:\n{}".format(stderr.decode()) + ) + + extension_dict = {'linux': ('.so', '.so'), + 'darwin': ('.dylib', '.so'), + 'win32': ('.dll', '.pyd')} + (original_lib_extension, + target_lib_extension) = extension_dict[sys.platform] + optimizer_prefix = "lib" if sys.platform != "win32" else "" + generated_bindings = os.path.join( + python_bindings_dir, + "target", + "debug", + "{}python_bindings{}".format(optimizer_prefix, original_lib_extension), + ) + target_bindings = os.path.join( + RustBuildTestCase.TEST_DIR, + "python_bindings", + "python_bindings{}".format(target_lib_extension)) + shutil.copyfile(generated_bindings, target_bindings) + + @classmethod + def setUpOnlyF1(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + f1 = cs.vertcat(1.5 * u[0] - u[1], u[2] - u[3]) + set_c = og.constraints.Rectangle( + xmin=[-0.01, -0.01], xmax=[0.02, 0.03]) + phi = og.functions.rosenbrock(u, p) # cost function + bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin + tcp_config = og.config.TcpServerConfiguration(bind_port=3301) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("only_f1") + problem = og.builder.Problem(u, p, phi) \ + .with_aug_lagrangian_constraints(f1, set_c) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) \ + .with_build_c_bindings() \ + .with_allocator(og.config.RustAllocator.JemAlloc) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpOnlyF2(cls, is_preconditioned=False): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + f2 = cs.vertcat(0.2 + 1.5 * u[0] - u[1], u[2] - u[3] - 0.1) + phi = og.functions.rosenbrock(u, p) + bounds = og.constraints.Ball2(None, 1.5) + tcp_config = og.config.TcpServerConfiguration( + bind_port=3302 if not is_preconditioned else 3309) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("only_f2" + ("_precond" if is_preconditioned else "")) + problem = og.builder.Problem(u, p, phi) \ + .with_penalty_constraints(f2) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) \ + .with_build_c_bindings() + slv_cfg = og.config.SolverConfiguration() \ + .with_tolerance(1e-6) \ + .with_initial_tolerance(1e-4) \ + .with_delta_tolerance(1e-5) \ + .with_penalty_weight_update_factor(10.0) \ + .with_max_inner_iterations(1000) \ + .with_max_outer_iterations(50) \ + .with_preconditioning(is_preconditioned) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=slv_cfg) \ + .build() + + @classmethod + def setUpPlain(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + phi = og.functions.rosenbrock(u, p) + bounds = og.constraints.Ball2(None, 1.5) + tcp_config = og.config.TcpServerConfiguration(bind_port=4598) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("plain") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) \ + .with_build_c_bindings() + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpRosPackageGeneration(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 2) # parameter (np = 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("rosenbrock_ros") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name("parametric_optimizer") \ + .with_node_name("open_node") \ + .with_rate(35) \ + .with_description("really cool ROS node") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpOnlyParametricF2(cls): + u = cs.MX.sym("u", 5) # decision variable (nu = 5) + p = cs.MX.sym("p", 3) # parameter (np = 3) + f2 = u[0] - p[2] + phi = og.functions.rosenbrock( + u, cs.vertcat(p[0], p[1])) # cost function + bounds = og.constraints.Ball2(None, 1.5) # ball centered at origin + tcp_config = og.config.TcpServerConfiguration(bind_port=4599) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("parametric_f2") + problem = og.builder.Problem(u, p, phi) \ + .with_penalty_constraints(f2) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) \ + .with_build_c_bindings() + solver_config = og.config.SolverConfiguration() \ + .with_tolerance(1e-6) \ + .with_initial_tolerance(1e-4) \ + .with_delta_tolerance(1e-5) \ + .with_penalty_weight_update_factor(5) + og.builder.OpEnOptimizerBuilder( + problem, meta, build_config, solver_config).build() + + @classmethod + def setUpHalfspace(cls): + u = cs.SX.sym("u", 5) # decision variable (nu = 5) + p = cs.SX.sym("p", 2) # parameter (np = 2) + phi = cs.dot(u, u) # cost function + + bounds = og.constraints.Halfspace([1., 2., 1., 5., 2.], -10.39) + + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("halfspace_optimizer") + + tcp_config = og.config.TcpServerConfiguration(bind_port=3305) + build_config = og.config.BuildConfiguration() \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) + + builder = og.builder.OpEnOptimizerBuilder(problem, + meta, + build_config, + cls.solverConfig()) + builder.build() + + @classmethod + def setUpSolverError(cls): + u = cs.MX.sym("u", 1) + p = cs.MX.sym("p", 1) + phi = cs.dot(u, u) + bounds = og.constraints.Rectangle(xmin=[-1.0], xmax=[1.0]) + tcp_config = og.config.TcpServerConfiguration(bind_port=3310) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name("solver_error") + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=RustBuildTestCase.get_open_local_absolute_path()) \ + .with_build_directory(RustBuildTestCase.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_tcp_interface_config(tcp_interface_config=tcp_config) \ + .with_build_c_bindings() + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + target_lib = os.path.join( + RustBuildTestCase.TEST_DIR, "solver_error", "src", "lib.rs") + with open(target_lib, "r", encoding="utf-8") as fh: + solver_lib = fh.read() + + # Look for this excerpt inside lib.rs (in the auto-generated solver)... + anchor = ( + ' assert_eq!(u.len(), SOLVER_ERROR_NUM_DECISION_VARIABLES, ' + '"Wrong number of decision variables (u)");\n' + ) + # Replace the anchor with this so that if p[0] < 0, the function `solve` + # will reutrn an error of type SolverError::Cost + injected_guard = ( + anchor + + '\n' + ' if p[0] < 0.0 {\n' + ' return Err(SolverError::Cost("forced solver error for TCP test"));\n' + ' }\n' + ) + if anchor not in solver_lib: + raise RuntimeError("Could not inject deterministic solver error") + + with open(target_lib, "w", encoding="utf-8") as fh: + fh.write(solver_lib.replace(anchor, injected_guard, 1)) + + @classmethod + def setUpClass(cls): + cls.setUpPythonBindings() + cls.setUpRosPackageGeneration() + cls.setUpOnlyF1() + cls.setUpOnlyF2() + cls.setUpOnlyF2(is_preconditioned=True) + cls.setUpPlain() + cls.setUpOnlyParametricF2() + cls.setUpHalfspace() + cls.setUpSolverError() + + @staticmethod + def raw_tcp_request(ip, port, payload, buffer_size=4096): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as conn_socket: + conn_socket.connect((ip, port)) + if isinstance(payload, str): + payload = payload.encode() + conn_socket.sendall(payload) + conn_socket.shutdown(socket.SHUT_WR) + + data = b'' + while True: + data_chunk = conn_socket.recv(buffer_size) + if not data_chunk: + break + data += data_chunk + + return json.loads(data.decode()) + + @staticmethod + def send_partial_tcp_payload_and_close(ip, port, payload): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as conn_socket: + conn_socket.connect((ip, port)) + if isinstance(payload, str): + payload = payload.encode() + conn_socket.sendall(payload) + + @staticmethod + def import_generated_module(module_dir, module_name): + module_path = os.path.join(RustBuildTestCase.TEST_DIR, module_dir) + if module_path not in sys.path: + sys.path.insert(1, module_path) + importlib.invalidate_caches() + return importlib.import_module(module_name) + + def start_tcp_manager(self, manager): + manager.start() + self.addCleanup(manager.kill) # at the end, kill the TCP mngr + return manager + + def test_python_bindings(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + result = solver.run([1., 2.]) + self.assertTrue(result.is_ok()) + status = result.get() + self.assertEqual("Converged", status.exit_status) + self.assertIsNotNone(status.solution) + + def test_generated_ros_templates_allow_small_positive_initial_penalty(self): + ros_dir = os.path.join( + RustBuildTestCase.TEST_DIR, + "rosenbrock_ros", + "parametric_optimizer" + ) + with open(os.path.join(ros_dir, "src", "open_optimizer.cpp"), "r", encoding="utf-8") as fh: + optimizer_cpp = fh.read() + + self.assertIn("std::numeric_limits::epsilon()", optimizer_cpp) + self.assertIn( + "params.initial_penalty > std::numeric_limits::epsilon()", + optimizer_cpp + ) + + def test_generated_ros_result_message_uses_wide_iteration_counters(self): + ros_dir = os.path.join( + RustBuildTestCase.TEST_DIR, + "rosenbrock_ros", + "parametric_optimizer" + ) + with open(os.path.join(ros_dir, "msg", "OptimizationResult.msg"), "r", encoding="utf-8") as fh: + result_msg = fh.read() + + self.assertIn("uint64 inner_iterations", result_msg) + self.assertIn("uint64 outer_iterations", result_msg) + + def test_python_bindings_error_details(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + result = solver.run([1., 2., 3.]) + self.assertFalse(result.is_ok()) + error = result.get() + self.assertEqual(3003, error.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + error.message + ) + + def test_python_bindings_initial_guess_error_details(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + result = solver.run([1., 2.], initial_guess=[0.0]) + self.assertFalse(result.is_ok()) + error = result.get() + self.assertEqual(1600, error.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 1, expected 5", + error.message + ) + + def test_python_bindings_initial_lagrange_multipliers_error_details(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + result = solver.run([1., 2.], initial_lagrange_multipliers=[0.1]) + self.assertFalse(result.is_ok()) + error = result.get() + self.assertEqual(1700, error.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 0", + error.message + ) + + def test_python_bindings_solver_error_details(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + result = solver.run([-1.0, 2.0]) + self.assertFalse(result.is_ok()) + error = result.get() + self.assertEqual(2000, error.code) + self.assertEqual( + "problem solution failed: cost or gradient evaluation failed: forced solver error for Python bindings test", + error.message + ) + + def test_python_bindings_repr(self): + python_bindings = RustBuildTestCase.import_generated_module( + "python_bindings", "python_bindings") + + solver = python_bindings.solver() + + ok_response = solver.run([1., 2.]) + self.assertIn("SolverResponse(ok=True", repr(ok_response)) + ok_status = ok_response.get() + self.assertIn("SolverStatus(", repr(ok_status)) + self.assertIn('exit_status="Converged"', repr(ok_status)) + + error_response = solver.run([1., 2., 3.]) + self.assertIn("SolverResponse(ok=False", repr(error_response)) + error = error_response.get() + self.assertIn("SolverError(", repr(error)) + self.assertIn("code=3003", repr(error)) + + def test_tcp_response_repr(self): + ok_response = og.tcp.SolverResponse({ + "exit_status": "Converged", + "num_outer_iterations": 2, + "num_inner_iterations": 7, + "last_problem_norm_fpr": 1e-6, + "delta_y_norm_over_c": 0.0, + "f2_norm": 0.0, + "solve_time_ms": 1.2, + "penalty": 10.0, + "solution": [0.1, 0.2], + "lagrange_multipliers": [], + "cost": 0.5, + }) + self.assertIn("SolverResponse(ok=True", repr(ok_response)) + self.assertIn("exit_status='Converged'", repr(ok_response)) + + error_response = og.tcp.SolverResponse({ + "type": "Error", + "code": 3003, + "message": "wrong number of parameters", + }) + self.assertIn("SolverResponse(ok=False", repr(error_response)) + self.assertIn("code=3003", repr(error_response)) + self.assertIn("SolverError(", repr(error_response.get())) + + def test_rectangle_empty(self): + xmin = [-1, 2] + xmax = [-2, 4] + with self.assertRaises(Exception) as __context: + og.constraints.Rectangle(xmin, xmax) + + def test_rectangle_incompatible_dimensions(self): + xmin = [-1, -1, 1] + xmax = [1, 1] + with self.assertRaises(Exception) as __context: + og.constraints.Rectangle(xmin, xmax) + + def test_rectangle_both_none(self): + with self.assertRaises(Exception) as __context: + og.constraints.Rectangle(None, None) + + def test_ball_negative_radius(self): + with self.assertRaises(Exception) as __context: + og.constraints.Ball2(None, -1) + + def test_solver_config_wrong_max_duration(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_max_duration_micros(0) + + def test_solver_config_wrong_update_factor(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_penalty_weight_update_factor(0.5) + + def test_solver_config_wrong_outer_iterations(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_max_outer_iterations(0) + + def test_solver_config_wrong_inner_iterations(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_max_inner_iterations(0) + + def test_solver_config_wrong_constraints_tolerance(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_delta_tolerance(0) + + def test_solver_config_wrong_inner_tolerance(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_tolerance(0) + + def test_solver_config_wrong_lbfgs_memory(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_lbfgs_memory(1) + + def test_solver_config_wrong_max_inner_iterations(self): + with self.assertRaises(Exception) as __context: + og.config.SolverConfiguration().with_max_inner_iterations() + + def test_start_multiple_servers(self): + all_managers = [] + for i in range(10): + all_managers += [og.tcp.OptimizerTcpManager( + optimizer_path=RustBuildTestCase.TEST_DIR + '/only_f1', + ip='0.0.0.0', + port=15311+i)] + + # Start all servers + for m in all_managers: + self.start_tcp_manager(m) + + # Ping all + for m in all_managers: + m.ping() + + def test_rust_build_only_f1(self): + # Start the server using a custom bind IP and port + mng = self.start_tcp_manager( + og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/only_f1', + ip='0.0.0.0', + port=13757)) + pong = mng.ping() # check if the server is alive + self.assertEqual(1, pong["Pong"]) + + # Regular call + response = mng.call(p=[2.0, 10.0]) + self.assertEqual("Converged", response["exit_status"]) + + # Call with initial params, initial y and initial penalty param + response = mng.call(p=[2.0, 10.0], + initial_guess=response["solution"], + initial_y=response["lagrange_multipliers"], + initial_penalty=response["penalty"]) + self.assertTrue(response.is_ok()) + status = response.get() + self.assertEqual(2, status.num_outer_iterations) + + response = mng.call(p=[2.0, 10.0, 50.0]) + status = response.get() + self.assertFalse(response.is_ok()) + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(3003, status.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + status.message) + + response = mng.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1600, status.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 2, expected 5", + status.message) + + response = mng.call(p=[2.0, 10.0], initial_y=[0.1]) + status = response.get() + self.assertFalse(response.is_ok()) + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1700, status.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 2", + status.message) + + def test_rust_build_only_f2_preconditioned(self): + mng1 = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/only_f2')) + mng2 = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/only_f2_precond')) + + response1 = mng1.call(p=[0.5, 8.5], initial_guess=[ + 1, 2, 3, 4, 0]).get() + response2 = mng2.call(p=[0.5, 8.5], initial_guess=[ + 1, 2, 3, 4, 0]).get() + + self.assertEqual("Converged", response1.exit_status) + self.assertEqual("Converged", response2.exit_status) + + # Further testing + slv_cfg = RustBuildTestCase.solverConfig() + # check that the solution is (near-) feasible + self.assertTrue(response1.f2_norm < slv_cfg.constraints_tolerance) + self.assertTrue(response2.f2_norm < slv_cfg.constraints_tolerance) + # check the nrom of the FPR + self.assertTrue(response1.last_problem_norm_fpr < + slv_cfg.tolerance) + self.assertTrue(response2.last_problem_norm_fpr < + slv_cfg.tolerance) + # compare the costs + self.assertAlmostEqual(response1.cost, response2.cost, 4) + + x1, x2 = response1.solution, response2.solution + for i in range(len(x1)): + self.assertAlmostEqual(x1[i], x2[i], delta=5e-4) + + response = mng1.call(p=[2.0, 10.0, 50.0]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(3003, status.code) + self.assertEqual( + "wrong number of parameters: provided 3, expected 2", + status.message) + + response = mng1.call(p=[2.0, 10.0], initial_guess=[0.1, 0.2]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1600, status.code) + self.assertEqual( + "initial guess has incompatible dimensions: provided 2, expected 5", + status.message) + + response = mng1.call(p=[2.0, 10.0], initial_y=[0.1]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(True, isinstance(status, og.tcp.SolverError)) + self.assertEqual(1700, status.code) + self.assertEqual( + "wrong dimension of Langrange multipliers: provided 1, expected 0", + status.message) + + def test_rust_build_plain(self): + mng = self.start_tcp_manager( + og.tcp.OptimizerTcpManager(RustBuildTestCase.TEST_DIR + '/plain')) + pong = mng.ping() # check if the server is alive + self.assertEqual(1, pong["Pong"]) + + # Regular call + response = mng.call(p=[2.0, 10.0]) + self.assertTrue(response.is_ok()) + status = response.get() + self.assertEqual("Converged", status.exit_status) + + def test_rust_build_plain_invalid_request_details(self): + self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/plain', + ip='127.0.0.1', + port=13758)) + + malformed_response = og.tcp.SolverResponse( + RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, '{"Run":')) + self.assertFalse(malformed_response.is_ok()) + malformed_status = malformed_response.get() + self.assertEqual(1000, malformed_status.code) + self.assertTrue( + malformed_status.message.startswith("invalid request:")) + self.assertIn("line 1 column", malformed_status.message) + + utf8_response = og.tcp.SolverResponse( + RustBuildTestCase.raw_tcp_request('127.0.0.1', 13758, b'\xff\xfe')) + self.assertFalse(utf8_response.is_ok()) + utf8_status = utf8_response.get() + self.assertEqual(1000, utf8_status.code) + self.assertTrue( + utf8_status.message.startswith( + "invalid request: request body is not valid UTF-8")) + + def test_rust_build_plain_survives_invalid_request(self): + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/plain', + ip='127.0.0.1', + port=13759)) + + malformed_response = og.tcp.SolverResponse( + RustBuildTestCase.raw_tcp_request('127.0.0.1', 13759, '{"Run":')) + self.assertFalse(malformed_response.is_ok()) + malformed_status = malformed_response.get() + self.assertEqual(1000, malformed_status.code) + + pong = mng.ping() + self.assertEqual(1, pong["Pong"]) + + response = mng.call(p=[2.0, 10.0]) + self.assertTrue(response.is_ok()) + self.assertEqual("Converged", response.get().exit_status) + + def test_rust_build_plain_survives_disconnect_mid_request(self): + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/plain', + ip='127.0.0.1', + port=13760)) + + RustBuildTestCase.send_partial_tcp_payload_and_close( + '127.0.0.1', + 13760, + '{"Run":{"parameter":[2.0' + ) + + pong = mng.ping() + self.assertEqual(1, pong["Pong"]) + + response = mng.call(p=[2.0, 10.0]) + self.assertTrue(response.is_ok()) + self.assertEqual("Converged", response.get().exit_status) + + def test_rust_build_solver_error_details(self): + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/solver_error')) + + response = mng.call(p=[-1.0]) + self.assertFalse(response.is_ok()) + status = response.get() + self.assertEqual(2000, status.code) + self.assertEqual( + "problem solution failed: cost or gradient evaluation failed: forced solver error for TCP test", + status.message) + + def test_rust_build_parametric_f2(self): + # introduced to tackle issue #123 + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/parametric_f2')) + pong = mng.ping() # check if the server is alive + self.assertEqual(1, pong["Pong"]) + + # Regular call + response = mng.call(p=[1.0, 1.0, 0.5]) + self.assertTrue(response.is_ok()) + status = response.get() + self.assertEqual("Converged", status.exit_status) + self.assertTrue(status.f2_norm < 1e-4) + + def test_rust_build_parametric_halfspace(self): + mng = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/halfspace_optimizer')) + pong = mng.ping() # check if the server is alive + self.assertEqual(1, pong["Pong"]) + + # Regular call + response = mng.call(p=[1.0, 1.0]) + self.assertTrue(response.is_ok()) + status = response.get() + self.assertEqual("Converged", status.exit_status) + u = status.solution + c = [1., 2., 1., 5., 2.] + b = -10.39 + eps = 1e-14 + self.assertTrue(sum([u[i] * c[i] for i in range(5)]) - b <= eps) + self.assertTrue(-sum([u[i] * c[i] for i in range(5)]) + b <= eps) + + @staticmethod + def rebuild_generated_staticlib(optimizer_name): + optimizer_dir = os.path.join(RustBuildTestCase.TEST_DIR, optimizer_name) + process = subprocess.Popen( + ["cargo", "build"], + cwd=optimizer_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + _stdout, stderr = process.communicate() + return process.returncode, stderr.decode() + + @staticmethod + def c_bindings_helper(optimizer_name): + if sys.platform == "win32": + result = RustBuildTestCase.c_bindings_cmake_helper( + optimizer_name=optimizer_name, + build_dir_name="cmake-build-run") + compile_stdout = result["configure_stdout"] + result["build_stdout"] + compile_stderr = result["configure_stderr"] + result["build_stderr"] + compile_returncode = ( + result["configure_returncode"] + if result["configure_returncode"] != 0 + else result["build_returncode"] + ) + + run_stdout = "" + run_stderr = "" + run_returncode = None + if compile_returncode == 0: + executable_candidates = [ + os.path.join(result["build_dir"], "Debug", "optimizer.exe"), + os.path.join(result["build_dir"], "optimizer.exe"), + os.path.join(result["build_dir"], "Debug", "optimizer"), + os.path.join(result["build_dir"], "optimizer"), + ] + executable_path = next( + (candidate for candidate in executable_candidates if os.path.exists(candidate)), + None, + ) + if executable_path is None: + raise RuntimeError("Could not locate built optimizer executable") + + run_process = subprocess.Popen( + [executable_path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + run_stdout_bytes, run_stderr_bytes = run_process.communicate() + run_stdout = run_stdout_bytes.decode() + run_stderr = run_stderr_bytes.decode() + run_returncode = run_process.returncode + + return { + "compile_returncode": compile_returncode, + "compile_stdout": compile_stdout, + "compile_stderr": compile_stderr, + "run_returncode": run_returncode, + "run_stdout": run_stdout, + "run_stderr": run_stderr, + } + + compile_process = subprocess.Popen( + ["/usr/bin/gcc", + RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/example_optimizer.c", + "-I" + RustBuildTestCase.TEST_DIR + "/" + optimizer_name, + "-pthread", + RustBuildTestCase.TEST_DIR + "/" + optimizer_name + + "/target/debug/lib" + optimizer_name + ".a", + "-lm", + "-ldl", + "-std=c99", + "-o", + RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/optimizer"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + compile_stdout, compile_stderr = compile_process.communicate() + + run_stdout = b"" + run_stderr = b"" + run_returncode = None + if compile_process.returncode == 0: + run_process = subprocess.Popen( + [RustBuildTestCase.TEST_DIR + "/" + optimizer_name + "/optimizer"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + run_stdout, run_stderr = run_process.communicate() + run_returncode = run_process.returncode + + return { + "compile_returncode": compile_process.returncode, + "compile_stdout": compile_stdout.decode(), + "compile_stderr": compile_stderr.decode(), + "run_returncode": run_returncode, + "run_stdout": run_stdout.decode(), + "run_stderr": run_stderr.decode(), + } + + @staticmethod + def patch_c_bindings_example_parameter_initializer(optimizer_name, replacement_line): + example_file = os.path.join( + RustBuildTestCase.TEST_DIR, optimizer_name, "example_optimizer.c") + with open(example_file, "r", encoding="utf-8") as fh: + example_source = fh.read() + + original_line = None + for line in example_source.splitlines(): + if "double p[" in line and "= {" in line: + original_line = line + break + + if original_line is None: + raise RuntimeError("Could not locate parameter initializer in example_optimizer.c") + + with open(example_file, "w", encoding="utf-8") as fh: + fh.write(example_source.replace(original_line, replacement_line, 1)) + + return original_line + + @staticmethod + def c_bindings_cmake_helper(optimizer_name, build_dir_name="cmake-build-test"): + cmake_executable = shutil.which("cmake") + if cmake_executable is None: + raise unittest.SkipTest("cmake is not available in PATH") + + optimizer_dir = os.path.join(RustBuildTestCase.TEST_DIR, optimizer_name) + build_dir = os.path.join(optimizer_dir, build_dir_name) + if os.path.isdir(build_dir): + shutil.rmtree(build_dir) + os.makedirs(build_dir) + + configure_process = subprocess.Popen( + [cmake_executable, ".."], + cwd=build_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + configure_stdout, configure_stderr = configure_process.communicate() + + build_stdout = b"" + build_stderr = b"" + build_returncode = None + if configure_process.returncode == 0: + build_command = [cmake_executable, "--build", "."] + if sys.platform == "win32": + build_command.extend(["--config", "Debug"]) + build_process = subprocess.Popen( + build_command, + cwd=build_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + build_stdout, build_stderr = build_process.communicate() + build_returncode = build_process.returncode + + return { + "configure_returncode": configure_process.returncode, + "configure_stdout": configure_stdout.decode(), + "configure_stderr": configure_stderr.decode(), + "build_returncode": build_returncode, + "build_stdout": build_stdout.decode(), + "build_stderr": build_stderr.decode(), + "build_dir": build_dir, + } + + def test_c_bindings(self): + result = RustBuildTestCase.c_bindings_helper(optimizer_name="only_f1") + self.assertEqual( + 0, + result["compile_returncode"], + msg=result["compile_stdout"] + result["compile_stderr"]) + self.assertEqual(0, result["run_returncode"], msg=result["run_stdout"] + result["run_stderr"]) + self.assertIn("Converged", result["run_stdout"]) + self.assertIn("exit status : 0", result["run_stdout"]) + self.assertIn("error code : 0", result["run_stdout"]) + + result = RustBuildTestCase.c_bindings_helper(optimizer_name="only_f2") + self.assertEqual( + 0, + result["compile_returncode"], + msg=result["compile_stdout"] + result["compile_stderr"]) + self.assertIn("Converged", result["run_stdout"]) + self.assertEqual(0, result["run_returncode"], msg=result["run_stdout"] + result["run_stderr"]) + self.assertIn("exit status : 0", result["run_stdout"]) + self.assertIn("error code : 0", result["run_stdout"]) + + result = RustBuildTestCase.c_bindings_helper(optimizer_name="plain") + self.assertIn("Converged", result["run_stdout"]) + self.assertEqual( + 0, + result["compile_returncode"], + msg=result["compile_stdout"] + result["compile_stderr"]) + self.assertEqual(0, result["run_returncode"], msg=result["run_stdout"] + result["run_stderr"]) + self.assertIn("exit status : 0", result["run_stdout"]) + self.assertIn("error code : 0", result["run_stdout"]) + + def test_c_bindings_error_path(self): + rebuild_rc, rebuild_stderr = RustBuildTestCase.rebuild_generated_staticlib( + optimizer_name="solver_error") + self.assertEqual(0, rebuild_rc, msg=rebuild_stderr) + + original_line = RustBuildTestCase.patch_c_bindings_example_parameter_initializer( + optimizer_name="solver_error", + replacement_line=" double p[SOLVER_ERROR_NUM_PARAMETERS] = {-1.0};") + try: + result = RustBuildTestCase.c_bindings_helper(optimizer_name="solver_error") + finally: + RustBuildTestCase.patch_c_bindings_example_parameter_initializer( + optimizer_name="solver_error", + replacement_line=original_line) + self.assertEqual( + 0, + result["compile_returncode"], + msg=result["compile_stdout"] + result["compile_stderr"]) + self.assertNotEqual(0, result["run_returncode"]) + self.assertIn("error code : 2000", result["run_stdout"]) + self.assertIn("forced solver error for TCP test", result["run_stdout"]) + self.assertIn( + "Solver returned an error; solution vector is not printed.", + result["run_stderr"]) + + def test_c_bindings_cmake_example_builds(self): + result = RustBuildTestCase.c_bindings_cmake_helper(optimizer_name="plain") + self.assertEqual( + 0, + result["configure_returncode"], + msg=result["configure_stdout"] + result["configure_stderr"]) + self.assertEqual( + 0, + result["build_returncode"], + msg=result["build_stdout"] + result["build_stderr"]) + + def test_tcp_generated_server_builds(self): + tcp_iface_dir = os.path.join( + RustBuildTestCase.TEST_DIR, "plain", "tcp_iface_plain") + process = subprocess.Popen( + ["cargo", "build", "--quiet"], + cwd=tcp_iface_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + _stdout, stderr = process.communicate() + + self.assertEqual( + 0, + process.returncode, + msg=stderr.decode() + ) + + def test_tcp_manager_start_fails_cleanly_when_port_is_in_use(self): + mng1 = self.start_tcp_manager(og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/plain', + ip='127.0.0.1', + port=13761)) + + mng2 = og.tcp.OptimizerTcpManager( + RustBuildTestCase.TEST_DIR + '/only_f1', + ip='127.0.0.1', + port=13761) + with self.assertRaises(Exception) as context: + mng2.start() + + self.assertIn("Port 13761 not available", str(context.exception)) + + pong = mng1.ping() + self.assertEqual(1, pong["Pong"]) + + def test_tcp_manager_remote_cannot_start(self): + remote_tcp_manager = og.tcp.OptimizerTcpManager( + ip='10.8.0.1', port=3345) + with self.assertRaises(Exception) as __context: + remote_tcp_manager.start() + + def test_tcp_manager_remote_ip_no_port(self): + with self.assertRaises(Exception) as __context: + _remote_tcp_manager = og.tcp.OptimizerTcpManager(ip='10.8.0.1') + + def test_tcp_manager_remote_port_no_ip(self): + with self.assertRaises(Exception) as __context: + _remote_tcp_manager = og.tcp.OptimizerTcpManager(port=8888) + + def test_set_y(self): + c = og.constraints.Ball2(radius=1) + y_calc = og.builder.SetYCalculator(c) + y = y_calc.obtain() + + def test_squared_norm(self): + u = np.array([3, 4]) + y = og.functions.norm2_squared(u) + self.assertAlmostEqual(25., y, places=12) + + u = [3, 4] + y = og.functions.norm2_squared(u) + self.assertAlmostEqual(25., y, places=12) + + u = cs.SX.sym("u", 2) + f = og.functions.norm2_squared(u) + fun = cs.Function('fun', [u], [f]) + y = fun([3, 4]) + self.assertAlmostEqual(25., y, places=12) + + def test_optimizer_meta_valid_version(self): + meta = og.config.OptimizerMeta().with_version("1.2.3-alpha.1+build.5") + self.assertEqual("1.2.3-alpha.1+build.5", meta.version) + + def test_optimizer_meta_invalid_version1(self): + with self.assertRaises(ValueError) as context: + og.config.OptimizerMeta().with_version("^1.2") + + self.assertIn("Cargo package version", str(context.exception)) + + def test_optimizer_meta_invalid_version2(self): + with self.assertRaises(ValueError) as context: + og.config.OptimizerMeta().with_version("0.1") + + self.assertIn("Cargo package version", str(context.exception)) + +if __name__ == '__main__': + logging.getLogger('retry').setLevel(logging.ERROR) + unittest.main() diff --git a/open-codegen/test/test_2_solvers.c b/python/test/test_2_solvers.c similarity index 100% rename from open-codegen/test/test_2_solvers.c rename to python/test/test_2_solvers.c diff --git a/open-codegen/test/test_constraints.py b/python/test/test_constraints.py similarity index 88% rename from open-codegen/test/test_constraints.py rename to python/test/test_constraints.py index 8cff3f6c..2f58acc1 100644 --- a/open-codegen/test/test_constraints.py +++ b/python/test/test_constraints.py @@ -11,7 +11,7 @@ class ConstraintsTestCase(unittest.TestCase): # Infinity Ball # ----------------------------------------------------------------------- - def test_ball_inf_origin(self): + def test_ball_inf_origin(self): ball = og.constraints.BallInf(None, 1) x = np.array([3, 2]) x_sym = cs.SX.sym("x", 2) @@ -58,6 +58,13 @@ def test_ball_inf_origin_compact(self): ball = og.constraints.BallInf() self.assertTrue(ball.is_compact()) + def test_dimension_ballInf(self): + ball = og.constraints.BallInf() + self.assertIsNone(ball.dimension()) + + ball = og.constraints.BallInf(center=[1., 2., -3.]) + self.assertEqual(3, ball.dimension()) + # ----------------------------------------------------------------------- # Euclidean Ball # ----------------------------------------------------------------------- @@ -122,6 +129,13 @@ def test_ball_euclidean_origin_compact(self): ball = og.constraints.Ball2() self.assertTrue(ball.is_compact()) + def test_dimension_ball2(self): + ball = og.constraints.Ball2() + self.assertIsNone(ball.dimension()) + + ball = og.constraints.Ball2(center=[1., 2., -3.]) + self.assertEqual(3, ball.dimension()) + # ----------------------------------------------------------------------- # Rectangle # ----------------------------------------------------------------------- @@ -194,6 +208,14 @@ def test_rectangle_is_orthant(self): self.assertFalse(rect.is_orthant()) rect = og.constraints.Rectangle([-1.0, float('-inf')], [10.0, 3.0]) self.assertFalse(rect.is_orthant()) + + def test_rectangle_dimension(self): + rec_only_xmin = og.constraints.Rectangle(xmin=[1]) + rec_only_xmax = og.constraints.Rectangle(xmax=[5, 6]) + rec_xmin_and_xmax = og.constraints.Rectangle(xmin=[0, -1, 2], xmax=[1, 0, 2]) + self.assertEqual(1, rec_only_xmin.dimension()) + self.assertEqual(2, rec_only_xmax.dimension()) + self.assertEqual(3, rec_xmin_and_xmax.dimension()) # ----------------------------------------------------------------------- # Second-Order Cone (SOC) @@ -264,6 +286,10 @@ def test_second_order_cone_convex(self): def test_second_order_cone_convex(self): soc = og.constraints.SecondOrderCone() self.assertFalse(soc.is_compact()) + + def test_soc_dimension(self): + soc = og.constraints.SecondOrderCone() + self.assertIsNone(soc.dimension()) # ----------------------------------------------------------------------- # No Constraints @@ -282,6 +308,10 @@ def test_no_constraints_convex(self): def test_no_constraints_compact(self): whole_rn = og.constraints.NoConstraints() self.assertFalse(whole_rn.is_compact()) + + def test_no_constraints_dimension(self): + whole_rn = og.constraints.NoConstraints() + self.assertIsNone(whole_rn.dimension()) # ----------------------------------------------------------------------- # Cartesian product of constraints @@ -345,6 +375,20 @@ def test_cartesian_segments_empty_args(self): with self.assertRaises(ValueError) as __context: og.constraints.CartesianProduct([], sets) + def test_cartesian_segment_ids_are_inclusive_last_indices(self): + cartesian = og.constraints.CartesianProduct( + [0, 2, 5], + [ + og.constraints.NoConstraints(), + og.constraints.NoConstraints(), + og.constraints.NoConstraints(), + ], + ) + + self.assertEqual(1, cartesian.segment_dimension(0)) + self.assertEqual(2, cartesian.segment_dimension(1)) + self.assertEqual(3, cartesian.segment_dimension(2)) + def test_cartesian_convex(self): ball_inf = og.constraints.BallInf(None, 1) ball_eucl = og.constraints.Ball2(None, 1) @@ -363,6 +407,18 @@ def test_cartesian_convex(self): [5, 10, 11], [ball_inf, ball_eucl, free]) self.assertFalse(cartesian.is_compact()) + def test_cartesian_dimension(self): + inf = float('inf') + ball_inf = og.constraints.BallInf(None, 1) + ball_eucl = og.constraints.Ball2(None, 1) + rect = og.constraints.Rectangle(xmin=[0.0, 1.0, -inf, 2.0], + xmax=[1.0, inf, 10.0, 10.0]) + cartesian = og.constraints.CartesianProduct( + [1, 4, 8], [ball_inf, ball_eucl, rect]) + self.assertEqual(9, cartesian.dimension()) + + + # ----------------------------------------------------------------------- # Finite Set # ----------------------------------------------------------------------- @@ -462,6 +518,10 @@ def test_simplex_projection_random_optimality(self): self.assertLessEqual( np.dot(x-x_star, z-x_star), 1e-10, "Simplex optimality conditions failed") + def test_simplex_dimension(self): + simplex = og.constraints.Simplex(alpha=1.0) + self.assertIsNone(simplex.dimension()) + # ----------------------------------------------------------------------- # Ball1 # ----------------------------------------------------------------------- @@ -517,6 +577,13 @@ def test_ball1_project_random_points_center(self): self.assertLessEqual( np.dot(e-x_star, x-x_star), 1e-10, "Ball1 optimality conditions failed (2)") + def test_dimension_ball1(self): + ball = og.constraints.Ball1() + self.assertIsNone(ball.dimension()) + + ball = og.constraints.Ball1(center=[1., 2., -3.]) + self.assertEqual(3, ball.dimension()) + # ----------------------------------------------------------------------- # Sphere2 # ----------------------------------------------------------------------- @@ -541,8 +608,23 @@ def test_sphere2_no_center(self): sphere = og.constraints.Sphere2(radius=0.5) u = [0, 0, 0, 0] dist = sphere.distance_squared(u) - self.assertAlmostEqual(0.25, dist, places=12) + self.assertAlmostEqual(0.25, dist, places=12) + + def test_dimension_sphere2(self): + sphere = og.constraints.Sphere2() + self.assertIsNone(sphere.dimension()) + + sphere = og.constraints.Sphere2(center=[1., 2., -3.]) + self.assertEqual(3, sphere.dimension()) + + + # ----------------------------------------------------------------------- + # Zero + # ----------------------------------------------------------------------- + def test_zero_dimension(self): + z = og.constraints.Zero() + self.assertIsNone(z.dimension()) if __name__ == '__main__': unittest.main() diff --git a/open-codegen/test/test_ocp.py b/python/test/test_ocp.py similarity index 90% rename from open-codegen/test/test_ocp.py rename to python/test/test_ocp.py index 5407fbd8..a6459dbc 100644 --- a/open-codegen/test/test_ocp.py +++ b/python/test/test_ocp.py @@ -1,5 +1,7 @@ import json import os +import sys +import tempfile import unittest import casadi.casadi as cs @@ -21,10 +23,31 @@ def __init__(self, solution): self.lagrange_multipliers = [] +class DummySolverError: + def __init__(self, code, message): + self.code = code + self.message = message + + def __str__(self): + return self.message + + +class DummySolverResponse: + def __init__(self, payload): + self._payload = payload + + def is_ok(self): + return isinstance(self._payload, DummySolverStatus) + + def get(self): + return self._payload + + class DummyDirectSolver: def __init__(self, solution): self.solution = solution self.last_call = None + self.error = None def run(self, p, initial_guess=None, initial_lagrange_multipliers=None, initial_penalty=None): self.last_call = { @@ -33,15 +56,20 @@ def run(self, p, initial_guess=None, initial_lagrange_multipliers=None, initial_ "initial_lagrange_multipliers": initial_lagrange_multipliers, "initial_penalty": initial_penalty, } - return DummySolverStatus(self.solution) + if self.error is not None: + return DummySolverResponse(self.error) + return DummySolverResponse(DummySolverStatus(self.solution)) class OcpTestCase(unittest.TestCase): - TEST_DIR = ".python_test_build_ocp" + if sys.platform == "win32": + TEST_DIR = os.path.join(tempfile.gettempdir(), "og_ocp") + else: + TEST_DIR = ".python_test_build_ocp" @staticmethod def get_open_local_absolute_path(): - return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) @classmethod def setUpOCP1(cls): @@ -517,6 +545,72 @@ def test_generated_optimizer_defaults(self): self.assertEqual(result.f1_infeasibility, 2e-5) self.assertEqual(result.f2_norm, 3e-5) + def test_generated_optimizer_direct_error_response(self): + ocp = self.make_ocp() + backend = DummyDirectSolver(solution=[0.1, 0.2, 0.3]) + backend.error = DummySolverError(3003, "wrong number of parameters") + optimizer = og.ocp.GeneratedOptimizer( + ocp=ocp, + optimizer_name="dummy_error", + target_dir=".", + backend=backend, + backend_kind="direct", + ) + + with self.assertRaises(RuntimeError) as context: + optimizer.solve(x0=[0.0, 0.0]) + + self.assertIn("wrong number of parameters", str(context.exception)) + + def test_generated_optimizer_missing_required_parameter(self): + loaded_optimizer = og.ocp.GeneratedOptimizer.load(self.ocp1_manifest_path) + + with self.assertRaises(ValueError) as context: + loaded_optimizer.solve(xref=[0.0, 0.0]) + + self.assertIn("missing values for parameters: x0", str(context.exception)) + + def test_generated_optimizer_parameter_dimension_mismatch_at_solve(self): + loaded_optimizer = og.ocp.GeneratedOptimizer.load(self.ocp1_manifest_path) + + with self.assertRaises(ValueError) as context: + loaded_optimizer.solve(x0=[1.0]) + + self.assertIn("parameter 'x0' has incompatible dimension", str(context.exception)) + + def test_generated_optimizer_direct_invalid_initial_guess(self): + with self.assertRaises(RuntimeError) as context: + self.ocp1_optimizer.solve( + x0=[1.0, 0.0], + initial_guess=[0.0], + ) + + self.assertIn("initial guess has incompatible dimensions", str(context.exception)) + + def test_generated_optimizer_tcp_invalid_initial_guess(self): + optimizer = og.ocp.GeneratedOptimizer.load(self.ocp2_single_manifest_path) + + try: + with self.assertRaises(RuntimeError) as context: + optimizer.solve( + x0=[1.0, -1.0], + xref=[0.0, 0.0], + initial_guess=[0.0], + ) + + self.assertIn("initial guess has incompatible dimensions", str(context.exception)) + finally: + optimizer.kill() + + def test_generated_optimizer_repr(self): + optimizer = og.ocp.GeneratedOptimizer.load(self.ocp1_manifest_path) + optimizer_repr = repr(optimizer) + + self.assertIn("GeneratedOptimizer(", optimizer_repr) + self.assertIn("optimizer_name='ocp_manifest_bindings'", optimizer_repr) + self.assertIn("backend_kind='direct'", optimizer_repr) + self.assertIn("shooting='single'", optimizer_repr) + def test_optimizer_manifest_roundtrip(self): manifest_path = self.ocp1_manifest_path rollout_path = self.ocp1_rollout_path diff --git a/open-codegen/test/test_raspberry_pi.py b/python/test/test_raspberry_pi.py similarity index 95% rename from open-codegen/test/test_raspberry_pi.py rename to python/test/test_raspberry_pi.py index f040f95f..5cd1e29f 100644 --- a/open-codegen/test/test_raspberry_pi.py +++ b/python/test/test_raspberry_pi.py @@ -10,8 +10,7 @@ class RaspberryPiTest(unittest.TestCase): @staticmethod def get_open_local_absolute_path(): - cwd = os.getcwd() - return cwd.split('open-codegen')[0] + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) # ----------------------------------------------------------------------- # Cross-compile to Raspberry Pi diff --git a/python/test/test_ros2.py b/python/test/test_ros2.py new file mode 100644 index 00000000..61cc004e --- /dev/null +++ b/python/test/test_ros2.py @@ -0,0 +1,669 @@ +import logging +import os +import re +import shlex +import signal +import shutil +import subprocess +import sys +import time +import unittest + +import casadi.casadi as cs +import opengen as og + + +class BuildConfigurationRos2TestCase(unittest.TestCase): + """Unit tests for ROS2-specific build configuration behavior.""" + + def test_with_ros2_sets_ros2_config_and_enables_c_bindings(self): + """`with_ros2` should store the ROS2 config and enable C bindings.""" + ros2_config = og.config.RosConfiguration().with_package_name("unit_test_ros2_pkg") + build_config = og.config.BuildConfiguration().with_ros2(ros2_config) + + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + self.assertTrue(build_config.build_c_bindings) + + build_dict = build_config.to_dict() + self.assertIn("ros2_config", build_dict) + self.assertNotIn("ros_config", build_dict) + self.assertEqual("unit_test_ros2_pkg", build_dict["ros2_config"]["package_name"]) + + def test_ros_and_ros2_configs_clear_each_other(self): + """Selecting ROS1 or ROS2 should clear the other package configuration.""" + ros1_config = og.config.RosConfiguration().with_package_name("unit_test_ros_pkg") + ros2_config = og.config.RosConfiguration().with_package_name("unit_test_ros2_pkg") + build_config = og.config.BuildConfiguration() + + build_config.with_ros2(ros2_config) + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + + build_config.with_ros(ros1_config) + self.assertIs(build_config.ros_config, ros1_config) + self.assertIsNone(build_config.ros2_config) + + build_config.with_ros2(ros2_config) + self.assertIs(build_config.ros2_config, ros2_config) + self.assertIsNone(build_config.ros_config) + +class Ros2TemplateCustomizationTestCase(unittest.TestCase): + """Generation tests for custom ROS2 configuration values.""" + + TEST_DIR = ".python_test_build" + OPTIMIZER_NAME = "rosenbrock_ros2_custom" + PACKAGE_NAME = "custom_parametric_optimizer_ros2" + NODE_NAME = "custom_open_node_ros2" + DESCRIPTION = "custom ROS2 package for generation tests" + RESULT_TOPIC = "custom_result_topic" + PARAMS_TOPIC = "custom_params_topic" + RATE = 17.5 + RESULT_QUEUE_SIZE = 11 + PARAMS_QUEUE_SIZE = 13 + + @staticmethod + def get_open_local_absolute_path(): + """Return the absolute path to the local OpEn repository root.""" + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) + + @classmethod + def solverConfig(cls): + """Return a solver configuration shared by the ROS2 generation tests.""" + return Ros2BuildTestCase.solverConfig() + + @classmethod + def setUpCustomRos2PackageGeneration(cls): + """Generate a ROS2 package with non-default configuration values.""" + u = cs.MX.sym("u", 5) + p = cs.MX.sym("p", 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name(cls.OPTIMIZER_NAME) + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name(cls.PACKAGE_NAME) \ + .with_node_name(cls.NODE_NAME) \ + .with_description(cls.DESCRIPTION) \ + .with_rate(cls.RATE) \ + .with_queue_sizes(cls.RESULT_QUEUE_SIZE, cls.PARAMS_QUEUE_SIZE) \ + .with_publisher_subtopic(cls.RESULT_TOPIC) \ + .with_subscriber_subtopic(cls.PARAMS_TOPIC) + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=cls.get_open_local_absolute_path()) \ + .with_build_directory(cls.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros2(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def setUpClass(cls): + """Generate the custom ROS2 package once before running tests.""" + cls.setUpCustomRos2PackageGeneration() + + @classmethod + def ros2_package_dir(cls): + """Return the filesystem path to the generated custom ROS2 package.""" + return os.path.join( + cls.TEST_DIR, + cls.OPTIMIZER_NAME, + cls.PACKAGE_NAME) + + def test_custom_ros2_configuration_is_rendered_into_generated_files(self): + """Custom ROS2 config values should appear in the generated package files.""" + ros2_dir = self.ros2_package_dir() + + # The package metadata should reflect the user-provided ROS2 package name + # and description, not the defaults from the templates. + with open(os.path.join(ros2_dir, "package.xml"), encoding="utf-8") as f: + package_xml = f.read() + self.assertIn(f"{self.PACKAGE_NAME}", package_xml) + self.assertIn(f"{self.DESCRIPTION}", package_xml) + + # `open_optimizer.hpp` is where the generated node constants are wired in. + # These assertions make sure the custom topic names, node name, rate, and + # queue sizes are propagated into the generated C++ code. + with open(os.path.join(ros2_dir, "include", "open_optimizer.hpp"), encoding="utf-8") as f: + optimizer_header = f.read() + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_NODE_NAME "{self.NODE_NAME}"', + optimizer_header) + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RESULT_TOPIC "{self.RESULT_TOPIC}"', + optimizer_header) + self.assertIn(f'#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_PARAMS_TOPIC "{self.PARAMS_TOPIC}"', + optimizer_header) + self.assertIn(f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RATE {self.RATE}", + optimizer_header) + self.assertIn( + f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_RESULT_TOPIC_QUEUE_SIZE {self.RESULT_QUEUE_SIZE}", + optimizer_header) + self.assertIn( + f"#define ROS2_NODE_{self.OPTIMIZER_NAME.upper()}_PARAMS_TOPIC_QUEUE_SIZE {self.PARAMS_QUEUE_SIZE}", + optimizer_header) + + # The runtime YAML configuration should carry the custom topic names and + # timer rate so the launched node uses the intended ROS2 parameters. + with open(os.path.join(ros2_dir, "config", "open_params.yaml"), encoding="utf-8") as f: + params_yaml = f.read() + self.assertIn(f'result_topic: "{self.RESULT_TOPIC}"', params_yaml) + self.assertIn(f'params_topic: "{self.PARAMS_TOPIC}"', params_yaml) + self.assertIn(f"rate: {self.RATE}", params_yaml) + + # The generated launch file should point to the correct package and + # executable so `ros2 launch` can start the generated node. + with open(os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"), encoding="utf-8") as f: + launch_file = f.read() + self.assertIn(f'package="{self.PACKAGE_NAME}"', launch_file) + self.assertIn(f'executable="{self.NODE_NAME}"', launch_file) + self.assertIn(f'name="{self.NODE_NAME}"', launch_file) + self.assertIn(f'FindPackageShare("{self.PACKAGE_NAME}")', launch_file) + + with open(os.path.join(ros2_dir, "msg", "OptimizationResult.msg"), encoding="utf-8") as f: + result_msg = f.read() + self.assertIn("uint8 STATUS_INVALID_REQUEST=5", result_msg) + self.assertIn("uint64 inner_iterations", result_msg) + self.assertIn("uint64 outer_iterations", result_msg) + self.assertIn("uint16 error_code", result_msg) + self.assertIn("string error_message", result_msg) + + with open(os.path.join(ros2_dir, "include", f"{self.OPTIMIZER_NAME}_bindings.hpp"), + encoding="utf-8") as f: + bindings_header = f.read() + self.assertIn("error_code", bindings_header) + self.assertIn("error_message", bindings_header) + + with open(os.path.join(ros2_dir, "src", "open_optimizer.cpp"), encoding="utf-8") as f: + optimizer_cpp = f.read() + self.assertIn("kInitialPenaltyEpsilon", optimizer_cpp) + self.assertIn("params_.initial_penalty > kInitialPenaltyEpsilon", optimizer_cpp) + + +class Ros2BuildTestCase(unittest.TestCase): + """Integration tests for auto-generated ROS2 packages.""" + + TEST_DIR = ".python_test_build" + OPTIMIZER_NAME = "rosenbrock_ros2" + PACKAGE_NAME = "parametric_optimizer_ros2" + NODE_NAME = "open_node_ros2" + + @staticmethod + def get_open_local_absolute_path(): + """Return the absolute path to the local OpEn repository root.""" + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "rust")) + + @classmethod + def solverConfig(cls): + """Return a solver configuration shared by the ROS2 tests.""" + return og.config.SolverConfiguration() \ + .with_lbfgs_memory(15) \ + .with_tolerance(1e-4) \ + .with_initial_tolerance(1e-4) \ + .with_delta_tolerance(1e-4) \ + .with_initial_penalty(15.0) \ + .with_penalty_weight_update_factor(10.0) \ + .with_max_inner_iterations(155) \ + .with_max_duration_micros(1e8) \ + .with_max_outer_iterations(50) \ + .with_sufficient_decrease_coefficient(0.05) \ + .with_cbfgs_parameters(1.5, 1e-10, 1e-12) \ + .with_preconditioning(False) + + @classmethod + def setUpRos2PackageGeneration(cls): + """Generate the ROS2 package used by the ROS2 integration tests.""" + u = cs.MX.sym("u", 5) + p = cs.MX.sym("p", 2) + phi = og.functions.rosenbrock(u, p) + c = cs.vertcat(1.5 * u[0] - u[1], + cs.fmax(0.0, u[2] - u[3] + 0.1)) + bounds = og.constraints.Ball2(None, 1.5) + meta = og.config.OptimizerMeta() \ + .with_optimizer_name(cls.OPTIMIZER_NAME) + problem = og.builder.Problem(u, p, phi) \ + .with_constraints(bounds) \ + .with_penalty_constraints(c) + ros_config = og.config.RosConfiguration() \ + .with_package_name(cls.PACKAGE_NAME) \ + .with_node_name(cls.NODE_NAME) \ + .with_rate(35) \ + .with_description("really cool ROS2 node") + build_config = og.config.BuildConfiguration() \ + .with_open_version(local_path=cls.get_open_local_absolute_path()) \ + .with_build_directory(cls.TEST_DIR) \ + .with_build_mode(og.config.BuildConfiguration.DEBUG_MODE) \ + .with_build_c_bindings() \ + .with_ros2(ros_config) + og.builder.OpEnOptimizerBuilder(problem, + metadata=meta, + build_configuration=build_config, + solver_configuration=cls.solverConfig()) \ + .build() + + @classmethod + def _inject_deterministic_solver_error(cls): + """Patch the generated solver so negative `p[0]` triggers a known error.""" + solver_root = os.path.join(cls.TEST_DIR, cls.OPTIMIZER_NAME) + target_lib = os.path.join(solver_root, "src", "lib.rs") + with open(target_lib, "r", encoding="utf-8") as fh: + solver_lib = fh.read() + + if "forced solver error for ROS2 test" in solver_lib: + return + + anchor = ( + ' assert_eq!(u.len(), ROSENBROCK_ROS2_NUM_DECISION_VARIABLES, ' + '"Wrong number of decision variables (u)");\n' + ) + injected_guard = ( + anchor + + '\n' + ' if p[0] < 0.0 {\n' + ' return Err(SolverError::Cost("forced solver error for ROS2 test"));\n' + ' }\n' + ) + if anchor not in solver_lib: + raise RuntimeError("Could not inject deterministic solver error into ROS2 solver") + + with open(target_lib, "w", encoding="utf-8") as fh: + fh.write(solver_lib.replace(anchor, injected_guard, 1)) + + @classmethod + def _rebuild_generated_solver_library(cls): + """Rebuild the generated Rust solver and refresh the ROS2 static library.""" + solver_root = os.path.join(cls.TEST_DIR, cls.OPTIMIZER_NAME) + process = subprocess.Popen( + ["cargo", "build"], + cwd=solver_root, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + _stdout, stderr = process.communicate() + if process.returncode != 0: + raise RuntimeError( + "Could not rebuild generated ROS2 solver:\n{}".format(stderr.decode()) + ) + + generated_static_lib = os.path.join( + solver_root, "target", "debug", f"lib{cls.OPTIMIZER_NAME}.a") + ros2_static_lib = os.path.join( + cls.ros2_package_dir(), "extern_lib", f"lib{cls.OPTIMIZER_NAME}.a") + shutil.copyfile(generated_static_lib, ros2_static_lib) + + @classmethod + def setUpClass(cls): + """Generate the ROS2 package once before all tests run.""" + if shutil.which("ros2") is None or shutil.which("colcon") is None: + raise unittest.SkipTest("ROS2 CLI tools are not available in PATH") + cls.setUpRos2PackageGeneration() + cls._inject_deterministic_solver_error() + cls._rebuild_generated_solver_library() + + @classmethod + def ros2_package_dir(cls): + """Return the filesystem path to the generated ROS2 package.""" + return os.path.join( + cls.TEST_DIR, + cls.OPTIMIZER_NAME, + cls.PACKAGE_NAME) + + @classmethod + def ros2_test_env(cls): + """Return the subprocess environment used by ROS2 integration tests.""" + env = os.environ.copy() + ros2_dir = cls.ros2_package_dir() + os.makedirs(os.path.join(ros2_dir, ".ros_log"), exist_ok=True) + # Keep ROS2 logs inside the generated package directory so the tests do + # not depend on a global writable log location. + env["ROS_LOG_DIR"] = os.path.join(ros2_dir, ".ros_log") + # Fast DDS is the most reliable middleware choice in our CI/local test + # setup when checking node discovery from separate processes. + env.setdefault("RMW_IMPLEMENTATION", "rmw_fastrtps_cpp") + env.pop("ROS_LOCALHOST_ONLY", None) + ros_env_prefix = env.get("CONDA_PREFIX") or sys.prefix + ros_env_lib = os.path.join(ros_env_prefix, "lib") + if os.path.isdir(ros_env_lib): + for var_name in ("DYLD_LIBRARY_PATH", "DYLD_FALLBACK_LIBRARY_PATH", "LD_LIBRARY_PATH"): + current_value = env.get(var_name, "") + env[var_name] = ( + f"{ros_env_lib}{os.pathsep}{current_value}" + if current_value else ros_env_lib + ) + return env + + @classmethod + def ros2_shell(cls): + """Return the preferred shell executable and setup script for ROS2 commands.""" + shell_path = "/bin/bash" + setup_script = "install/setup.bash" + preferred_shell = os.path.basename(os.environ.get("SHELL", "")) + zsh_setup = os.path.join(cls.ros2_package_dir(), "install", "setup.zsh") + if preferred_shell == "zsh" and os.path.isfile(zsh_setup): + shell_path = "/bin/zsh" + setup_script = "install/setup.zsh" + return shell_path, setup_script + + @classmethod + def _run_shell(cls, command, cwd, env=None, timeout=180, check=True): + """Run a command in the preferred shell and return the completed process.""" + shell_path, _ = cls.ros2_shell() + shell_command = command + if env is not None: + exported_vars = [] + for var_name in ("PATH", "CONDA_PREFIX", "RMW_IMPLEMENTATION", "ROS_LOG_DIR", + "DYLD_LIBRARY_PATH", "DYLD_FALLBACK_LIBRARY_PATH", "LD_LIBRARY_PATH"): + if var_name in env: + exported_vars.append( + f"export {var_name}={shlex.quote(env[var_name])};" + ) + shell_command = f"{' '.join(exported_vars)} {command}" + result = subprocess.run( + [shell_path, "-c", shell_command], + cwd=cwd, + env=env, + text=True, + capture_output=True, + timeout=timeout, + check=False) + if check and result.returncode != 0: + raise AssertionError( + "Command failed with exit code " + f"{result.returncode}: {command}\n" + f"stdout:\n{result.stdout}\n" + f"stderr:\n{result.stderr}" + ) + return result + + @staticmethod + def _terminate_process(process, timeout=10): + """Terminate a spawned shell process and its children, then collect output.""" + if process.poll() is None: + try: + os.killpg(process.pid, signal.SIGTERM) + except ProcessLookupError: + pass + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + try: + os.killpg(process.pid, signal.SIGKILL) + except ProcessLookupError: + pass + process.wait(timeout=timeout) + try: + stdout, _ = process.communicate(timeout=1) + except subprocess.TimeoutExpired: + stdout = "" + return stdout or "" + + def _build_generated_package(self, ros2_dir, env): + """Build the generated ROS2 package with the active Python executable.""" + for stale_dir in ("build", "install", "log"): + stale_path = os.path.join(ros2_dir, stale_dir) + if os.path.exists(stale_path): + shutil.rmtree(stale_path) + + python_executable = shlex.quote(sys.executable) + self._run_shell( + f"source {self.ros2_shell()[1]} >/dev/null 2>&1 || true; " + f"colcon build --packages-select {self.PACKAGE_NAME} " + f"--cmake-args -DPython3_EXECUTABLE={python_executable}", + cwd=ros2_dir, + env=env, + timeout=600) + + def _spawn_ros_process(self, command, ros2_dir, env): + """Start a long-running ROS2 command in a fresh process group.""" + shell_path, setup_script = self.ros2_shell() + exported_vars = [] + for var_name in ("PATH", "CONDA_PREFIX", "RMW_IMPLEMENTATION", "ROS_LOG_DIR", + "DYLD_LIBRARY_PATH", "DYLD_FALLBACK_LIBRARY_PATH", "LD_LIBRARY_PATH"): + if var_name in env: + exported_vars.append( + f"export {var_name}={shlex.quote(env[var_name])};" + ) + return subprocess.Popen( + [ + shell_path, + "-c", + f"{' '.join(exported_vars)} source {setup_script} && {command}" + ], + cwd=ros2_dir, + env=env, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + start_new_session=True) + + def _wait_for_node_and_topics(self, ros2_dir, env, process=None): + """Wait until the generated ROS2 node and its topics become discoverable.""" + _, setup_script = self.ros2_shell() + node_result = None + topic_result = None + for _ in range(6): + if process is not None and process.poll() is not None: + process_output = self._terminate_process(process) + raise unittest.SkipTest( + "Generated ROS2 node could not start in this environment.\n" + f"Process output:\n{process_output}") + # `ros2 node list` confirms that the process joined the ROS graph, + # while `ros2 topic list` confirms that the expected interfaces are + # actually being advertised. + node_result = self._run_shell( + f"source {setup_script} && " + "ros2 node list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + topic_result = self._run_shell( + f"source {setup_script} && " + "ros2 topic list --no-daemon --spin-time 5", + cwd=ros2_dir, + env=env, + timeout=30, + check=False) + node_seen = f"/{self.NODE_NAME}" in node_result.stdout + topics_seen = "/parameters" in topic_result.stdout and "/result" in topic_result.stdout + if node_seen and topics_seen: + return + time.sleep(1) + + if process is not None and process.poll() is not None: + process_output = self._terminate_process(process) + raise unittest.SkipTest( + "Generated ROS2 node exited before it became discoverable.\n" + f"Process output:\n{process_output}") + + self.fail( + "Generated ROS2 node did not become discoverable.\n" + f"ros2 node list output:\n{node_result.stdout if node_result else ''}\n" + f"ros2 topic list output:\n{topic_result.stdout if topic_result else ''}") + + def _assert_result_message(self, echo_stdout): + """Assert that the echoed result message indicates a successful solve.""" + # We do not compare the full numeric solution here; instead, we check + # that the generated node returned a structurally valid result and that + # the solver reported convergence. + self.assertIn("solution", echo_stdout) + self.assertRegex( + echo_stdout, + r"solution:\s*\n(?:- .+\n)+", + msg=f"Expected a non-empty solution vector in result output:\n{echo_stdout}") + # `status: 0` matches `STATUS_CONVERGED` in the generated result message. + self.assertIn("status: 0", echo_stdout) + self.assertIn("error_code: 0", echo_stdout) + self.assertIn("error_message:", echo_stdout) + self.assertRegex( + echo_stdout, + r"inner_iterations:\s*[1-9]\d*", + msg=f"Expected a positive inner iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"outer_iterations:\s*[1-9]\d*", + msg=f"Expected a positive outer iteration count in result output:\n{echo_stdout}") + self.assertRegex( + echo_stdout, + r"cost:\s*-?\d+(?:\.\d+)?(?:e[+-]?\d+)?", + msg=f"Expected a numeric cost in result output:\n{echo_stdout}") + self.assertIn("solve_time_ms", echo_stdout) + + def _assert_invalid_request_message(self, echo_stdout, error_code, error_message_fragment): + """Assert that the echoed result message reports an invalid request.""" + self.assertIn("status: 5", echo_stdout) + self.assertIn(f"error_code: {error_code}", echo_stdout) + self.assertIn(error_message_fragment, echo_stdout) + + def _assert_solver_error_message(self, echo_stdout, error_message_fragment): + """Assert that the echoed result message reports a solver-side failure.""" + self.assertIn("status: 3", echo_stdout) + self.assertIn("error_code: 2000", echo_stdout) + self.assertIn(error_message_fragment, echo_stdout) + + def _publish_request_and_collect_result(self, ros2_dir, env, request_payload): + """Publish one request and return one echoed result message.""" + _, setup_script = self.ros2_shell() + echo_process = self._spawn_ros_process("ros2 topic echo /result --once", ros2_dir, env) + + try: + time.sleep(1) + self._run_shell( + f"source {setup_script} && " + "ros2 topic pub --once /parameters " + f"{self.PACKAGE_NAME}/msg/OptimizationParameters " + f"'{request_payload}'", + cwd=ros2_dir, + env=env, + timeout=60) + echo_stdout, _ = echo_process.communicate(timeout=60) + finally: + if echo_process.poll() is None: + self._terminate_process(echo_process) + + return echo_stdout + + def _exercise_running_optimizer(self, ros2_dir, env): + """Publish one request and verify that one valid result message is returned.""" + echo_stdout = self._publish_request_and_collect_result( + ros2_dir, + env, + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}") + self._assert_result_message(echo_stdout) + + def _exercise_invalid_request(self, ros2_dir, env): + """Publish an invalid request and verify that the node reports it clearly.""" + echo_stdout = self._publish_request_and_collect_result( + ros2_dir, + env, + "{parameter: [1.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}") + self._assert_invalid_request_message( + echo_stdout, + 3003, + "wrong number of parameters") + + def _exercise_invalid_initial_guess(self, ros2_dir, env): + """Verify that invalid warm-start dimensions are reported clearly.""" + echo_stdout = self._publish_request_and_collect_result( + ros2_dir, + env, + "{parameter: [1.0, 2.0], initial_guess: [0.0], initial_y: [], initial_penalty: 15.0}") + self._assert_invalid_request_message( + echo_stdout, + 1600, + "initial guess has incompatible dimensions") + + def _exercise_invalid_initial_y(self, ros2_dir, env): + """Verify that invalid multiplier dimensions are reported clearly.""" + echo_stdout = self._publish_request_and_collect_result( + ros2_dir, + env, + "{parameter: [1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [0.0], initial_penalty: 15.0}") + self._assert_invalid_request_message( + echo_stdout, + 1700, + "wrong dimension of Lagrange multipliers") + + def _exercise_solver_error(self, ros2_dir, env): + """Verify that solver-side failures propagate to the ROS2 result message.""" + echo_stdout = self._publish_request_and_collect_result( + ros2_dir, + env, + "{parameter: [-1.0, 2.0], initial_guess: [0.0, 0.0, 0.0, 0.0, 0.0], initial_y: [], initial_penalty: 15.0}") + self._assert_solver_error_message( + echo_stdout, + "forced solver error for ROS2 test") + + def test_ros2_package_generation(self): + """Verify the ROS2 package files are generated.""" + ros2_dir = self.ros2_package_dir() + # This is a lightweight smoke test for the generator itself before we + # attempt the slower build/run integration tests below. + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "package.xml"))) + self.assertTrue(os.path.isfile(os.path.join(ros2_dir, "CMakeLists.txt"))) + self.assertTrue(os.path.isfile( + os.path.join(ros2_dir, "launch", "open_optimizer.launch.py"))) + + def test_generated_ros2_package_works(self): + """Build, run, and call the generated ROS2 package.""" + ros2_dir = self.ros2_package_dir() + env = self.ros2_test_env() + + # First validate the plain `ros2 run` path, which exercises the + # generated executable directly without going through the launch file. + self._build_generated_package(ros2_dir, env) + + node_process = self._spawn_ros_process( + f"ros2 run {self.PACKAGE_NAME} {self.NODE_NAME}", + ros2_dir, + env) + + try: + self._wait_for_node_and_topics(ros2_dir, env, node_process) + self._exercise_running_optimizer(ros2_dir, env) + self._exercise_invalid_request(ros2_dir, env) + self._exercise_invalid_initial_guess(ros2_dir, env) + self._exercise_invalid_initial_y(ros2_dir, env) + self._exercise_solver_error(ros2_dir, env) + self._exercise_running_optimizer(ros2_dir, env) + finally: + if node_process.poll() is None: + self._terminate_process(node_process) + + def test_generated_ros2_launch_file_works(self): + """Build the package, launch the node, and verify the launch file works.""" + ros2_dir = self.ros2_package_dir() + env = self.ros2_test_env() + + # Then validate the generated launch description, which should bring up + # the exact same node and parameters via `ros2 launch`. + self._build_generated_package(ros2_dir, env) + + launch_process = self._spawn_ros_process( + f"ros2 launch {self.PACKAGE_NAME} open_optimizer.launch.py", + ros2_dir, + env) + + try: + self._wait_for_node_and_topics(ros2_dir, env, launch_process) + self._exercise_running_optimizer(ros2_dir, env) + self._exercise_invalid_request(ros2_dir, env) + self._exercise_invalid_initial_guess(ros2_dir, env) + self._exercise_invalid_initial_y(ros2_dir, env) + self._exercise_solver_error(ros2_dir, env) + self._exercise_running_optimizer(ros2_dir, env) + finally: + if launch_process.poll() is None: + self._terminate_process(launch_process) + + +if __name__ == '__main__': + logging.getLogger('retry').setLevel(logging.ERROR) + unittest.main() diff --git a/rust/CHANGELOG.md b/rust/CHANGELOG.md new file mode 100644 index 00000000..ff145f6d --- /dev/null +++ b/rust/CHANGELOG.md @@ -0,0 +1,394 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +Note: This is the main Changelog file for the Rust solver. The Changelog file for the Python interface (`opengen`) can be found in [../python/CHANGELOG.md](python/CHANGELOG.md) + + + +## [v0.12.0] - 31 March 2026 + +### Added + +- Richer Rust-side solver errors with human-readable reasons for projection failures, non-finite computations, linear algebra failures, and invalid internal solver states +- Fallible constraint projections via `Constraint::project(...) -> FunctionCallResult`, with error propagation through FBS, PANOC, and ALM +- Checked affine-space construction through `AffineSpace::try_new(...)` and `AffineSpaceError` + +### Changed + +- Rust solver supports generic float types +- Expanded Rust constraint and solver test coverage with constructor validation, boundary/idempotence checks, additional `BallP` / epigraph projection cases, and broader `f32`/`f64` coverage +- Swapped the cross-platform timer dependency to `web-time`, removed the old `instant`-specific wasm feature wiring, and updated optimizer timing call sites to use `web_time::Instant` +- Improved Rust-side error handling across constraints and core solvers so projection failures and invalid numerical states are reported explicitly instead of being silently flattened +- Refined `BallP`, `EpigraphSquaredNorm`, and related constraint implementations and docs for stronger numerical robustness and clearer behavior +- Restructured folder structure of entire repo: OpEn moved to folder `rust` + + +## [v0.11.1] - 2026-03-23 + + +### Fixed + +- Return best PANOC half-step on early exit (issue #325) + + + +## [v0.11.0] - 2026-03-14 + +### Added + +- Implementation of `BallP` in Rust: projection on lp-ball + +### Changed + +- Algorithmic improvements in `EpigraphSquaredNorm` (numerically stable Newton refinement) and more detailed docs +- Assertion in `Ball1`, `Ball2`, and `BallInf` to check that that dimensions of `x` and `xc` are compatible (with unit test) +- Added validation in `Rectangle` and `Hyperplane` for invalid constructor inputs and strengthened dimension checks in hyperplane projection +- Added validation in `Sphere2` for empty inputs and incompatible center dimensions +- Added validation in `FiniteSet` for incompatible projection dimensions and corrected convexity detection for singleton sets +- Added unit tests for invalid `Rectangle`, `Simplex`, `Hyperplane`, `Sphere2`, and `FiniteSet` edge cases + +### Fixed + +- Typos and doctest annotations in docs of `CartesianProduct` (in Rust), `Rectangle`, and `Hyperplane`, with more detailed documentation + + + +## [v0.10.0] - 2026-03-10 + +### Added + +- Custom implementation of Cholesky factorisation (and solver); this is used in `AffineSpace` now. +- New function in `matrix_operations` to compute AA' given a matrix A + +### Changed + +- Update version of `ndarray`, in `Cargo.toml` +- Removed `modcholesky` because it was causing a bug (see issue #378) + + +## [v0.9.1] - 2024-08-14 + + +### Fixed + +- Order of dependencies in `Cargo.toml` fixes crate-not-found issue on Windows + + + + +## [v0.9.0] - 2024-03-20 + +### Added + +- Rust implementation of epigraph of squared Euclidean norm (constraint) +- Implementation of `AffineSpace` + +### Fixed + +- Clippy fixes + + +## [v0.8.1] - 2023-10-27 + +### Fixed + +- Fix bug in implementation of `ball2.rs` (radius was being ignored for balls centered not at the origin) + + + + +## [v0.8.1] - 2023-10-27 + +### Added + +- New constraint: sphere of Euclidean norm + + +## [v0.7.7] - 2023-01-17 + +### Fixed + +- Change `time::Instant` to `instant::Instant` to support WASM + + + + +## [v0.7.6] - 2022-10-11 + +### Added + +- Update functions in `AlmOptimizerStatus` + + + + + +## [v0.7.5] - 2022-06-22 + +### Fixed + +- Fixed estimation of initial Lipschitz constant, `L`, when it is close to or equal to zero (e.g., Huber loss function) +- Fixed issue in `AlmFactory` related to (F2) penalty constraints + + + +## [v0.7.4] - 2021-11-15 + +### Added + +- Optional feature `wasm` in `Cargo.toml` (WebAssembly support); see https://alphaville.github.io/optimization-engine/docs/openrust-features for details +- Using `instant::Instant` instead of `std::Instant` (Wasm-compatible) +- Fixed Rust documentation of `Ball1` + + +## [v0.7.3] - 2021-11-1 + +### Added + +* Implementation of Simplex and Ball1 constraints in Rust +* Fix issue with simultaneous use of features `jem` and `rp` + + + +## [v0.7.2] - 2021-10-27 + +### Changed + +* Removed unnecessary `#[no_mangle]` annotations +* Took care of additional clippy warnings +* Bump versions: `cbindgen`: `0.8 --> 0.20` and `libc`: `0.2.0 -> 0.2.*` + +### Added + +* Support for [`rpmalloc`](https://github.com/EmbarkStudios/rpmalloc-rs) and [`jemalloc`](https://github.com/gnzlbg/jemallocator) using the features `jem` and `rp` + + + + + +## [v0.7.1] - 2020-09-04 + +### Added + +* Introduced `Halfspace` (implemented and tested) +* Introduced `Hyperplane` (implemented and tested) +* New types: `FunctionCallResult`, `MappingType` and `JacobianMappingType` +* Various clippy-related code improvements + + + +## [v0.7.0] - 2020-05-04 + + +### Added + +* ALM: compute cost value at solution + + + + +## [v0.6.2] - 2019-10-29 + +### Fixed + +* Bug in codegen for Cartesian products (PR #147) +* Removed the use of `Default` in Rust (does not work for large slices) +* Python: fixed typo in method `with_lfbgs_memory` + +### Added + +* New support for C-to-Rust interface via bindgen +* Generation of example C code for C-to-Rust interface +* CMakeLists for auto-generated example in C +* Additional Python examples on web page +* Chat button in web page (for gitter) +* Added option `local_path` in `with_open_version` + +### Changed + +* Homotopy module in Rust is annotated as deprecated +* TCP server response is cast into Python objects (PR #144) +* Auto-generated code links to most recent crate, unless overriden +* Changed `jacobian` to `gradient` in Python + +## [v0.6.1-alpha.2] - 2019-09-7 + +### Fixed + +* TCP server: Malformed error JSON is now fixed +* Algorithm now returns `u_bar`, which is feasible (not `u`) + +### Added + +* Introduced C interface to CasADi-generated C functions +* Rust and Python implementations of joint ALM/PM algorithms +* Rust docs for augmented Lagrangian method (ALM) +* Release of crate version `0.6.1-alpha.1` and `0.6.1-alpha.2` +* Introduced `#![allow(dead_code)]` in ALM implementation +* New AKKT-compliant termination criterion +* Tolerance relaxation in penalty method +* Finite sets supported in Rust +* Rust/Python: setting CBFGS parameters +* Second-order cones supported in Rust +* Rust docs: support for equations with KaTeX + +### Changed + +* Updated README + + +### Removed + +* Support for Python <3.6 (deprecated) +* Module `continuation` is going to become obsolete + + +## [v0.5.0] - 2019-06-22 + +### Fixed + +* Fixed `with_max_duration` in `PANOC` not following the builder pattern +* Fixed misplaced `.unwrap()` in the `HomotopyOptimizer` +* Fixed so the Python builder uses the current directory as default + +### Added + +* Generation of C/C++ bindings added in the Python interface and included in the test suite +* Support in Rust for Cartesian product of constraints + +### Removed + +* Deprecated: `enable_tcp_interface` and `enable_c_bindings_generation` + + + +## [v0.4.0] - 2019-06-03 + +### Fixed + +* Windows interoperability of `matlab_open_root()` [closes #24] +* Issues with file separator on Windows [#26 and #27] +* Handling corner cases such as wrong input parameters +* Rust: checking for `NaN` and `Inf` values in solution + +### Added + +* New Python interface for code generation (works with Python 2.7, 3.4 and 3.6) +* Homotopy method implemented in Rust +* TCP interface in Rust is generated automatically on request +* Support for OSX and linux distros on [travis] [closes #25] +* Continuous integration on [Appveyor] +* Experimental C bindings library +* Documentation for new Rust code and Python code +* Unit tests in Python using `unittest` + +### Changed + +* Rust API: Using `Option<>` and `Result<>` to handle errors +* Updated L-BFGS dependency; now using version `0.2` (no NonZeroUsize) + + +## [v0.3.1] - 2019-05-21 + +### Fixed + +* An error in the Matlab codegen which made it inoperable + +### Added + +* Support for compiling for different targets + + + +## [v0.3.0] - 2019-05-16 + +This is a breaking API change. + +### Fixed + +* A lot of internal fixes and clean up +* `PANOCEngine` and `FBSEngine` is no longer explicitly needed +* Simplified import system +* Cost functions now need to return a `Result<(), Error>` to indicate if the evaluation was successful + +### Added + +* Started an `examples` folder + + + + +[v0.12.0]: https://github.com/alphaville/optimization-engine/compare/v0.11.1...v0.12.0 +[v0.11.1]: https://github.com/alphaville/optimization-engine/compare/v0.11.0...v0.11.1 +[v0.11.0]: https://github.com/alphaville/optimization-engine/compare/v0.10.0...v0.11.0 +[v0.10.0]: https://github.com/alphaville/optimization-engine/compare/v0.9.1...v0.10.0 +[v0.9.1]: https://github.com/alphaville/optimization-engine/compare/v0.9.0...v0.9.1 +[v0.9.0]: https://github.com/alphaville/optimization-engine/compare/v0.8.1...v0.9.0 +[v0.8.1]: https://github.com/alphaville/optimization-engine/compare/v0.8.0...v0.8.1 +[v0.8.0]: https://github.com/alphaville/optimization-engine/compare/v0.7.7...v0.8.0 +[v0.7.7]: https://github.com/alphaville/optimization-engine/compare/v0.7.6...v0.7.7 +[v0.7.6]: https://github.com/alphaville/optimization-engine/compare/v0.7.5...v0.7.6 +[v0.7.5]: https://github.com/alphaville/optimization-engine/compare/v0.7.4...v0.7.5 +[v0.7.4]: https://github.com/alphaville/optimization-engine/compare/v0.7.3...v0.7.4 +[v0.7.3]: https://github.com/alphaville/optimization-engine/compare/v0.7.2...v0.7.3 +[v0.7.2]: https://github.com/alphaville/optimization-engine/compare/v0.7.1...v0.7.2 +[v0.7.1]: https://github.com/alphaville/optimization-engine/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/alphaville/optimization-engine/compare/v0.6.2...v0.7.0 +[v0.6.2]: https://github.com/alphaville/optimization-engine/compare/v0.6.1-alpha.2...v0.6.2 +[v0.6.1-alpha.2]: https://github.com/alphaville/optimization-engine/compare/v0.5.0...v0.6.1-alpha.2 +[v0.5.0]: https://github.com/alphaville/optimization-engine/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/alphaville/optimization-engine/compare/v0.3.1...v0.4.0 +[v0.3.1]: https://github.com/alphaville/optimization-engine/compare/v0.3.0...v0.3.1 +[v0.3.0]: https://github.com/alphaville/optimization-engine/compare/v0.2.2...v0.3.0 + + +[closes #24]: https://github.com/alphaville/optimization-engine/issues/24 +[closes #25]: https://github.com/alphaville/optimization-engine/issues/25 + + +[travis]: https://travis-ci.org/alphaville/optimization-engine/builds/537155440 +[Appveyor]: https://ci.appveyor.com/project/alphaville/optimization-engine diff --git a/Cargo.toml b/rust/Cargo.toml similarity index 89% rename from Cargo.toml rename to rust/Cargo.toml index e4850148..d088ab20 100644 --- a/Cargo.toml +++ b/rust/Cargo.toml @@ -30,7 +30,7 @@ documentation = "https://docs.rs/optimization_engine" license = "MIT OR Apache-2.0" # README file -readme = "README.md" +readme = "../README.md" # Name of this crate name = "optimization_engine" @@ -42,7 +42,7 @@ homepage = "https://alphaville.github.io/optimization-engine/" repository = "https://github.com/alphaville/optimization-engine" # Version of this crate (SemVer) -version = "0.11.1" +version = "0.12.0" edition = "2018" @@ -56,15 +56,7 @@ edition = "2018" # - Update the webpage (e.g., make a blog post) publish = true -exclude = [ - "design/*", - "docs/*", - "icasadi/*", - "matlab/*", - "open-clib/*", - "open-codegen/*", - "website/*", -] +exclude = [] # -------------------------------------------------------------------------- # D.O.C.S @@ -82,11 +74,9 @@ num = "0.4" # Our own stuff - L-BFGS: limited-memory BFGS directions lbfgs = "0.3" -# Instant is a generic timer that works on Wasm (with wasm-bindgen) -instant = { version = "0.1" } +# Cross-platform time primitives with WebAssembly support +web-time = "1" -# Wasm-bindgen is only activated if OpEn is compiled with `--features wasm` -wasm-bindgen = { version = "0.2", optional = true } # sc-allocator provides an implementation of a bump allocator rpmalloc = { version = "0.2", features = [ @@ -116,9 +106,6 @@ jem = ["jemallocator"] # RPMalloc rp = ["rpmalloc"] -# WebAssembly -wasm = ["wasm-bindgen", "instant/wasm-bindgen", "instant/inaccurate"] - # -------------------------------------------------------------------------- # T.E.S.T. D.E.P.E.N.D.E.N.C.I.E.S # -------------------------------------------------------------------------- diff --git a/Makefile.toml b/rust/Makefile.toml similarity index 94% rename from Makefile.toml rename to rust/Makefile.toml index ee2093c2..48f9bac5 100644 --- a/Makefile.toml +++ b/rust/Makefile.toml @@ -2,6 +2,7 @@ # cargo make docs # cargo make doc-katex [tasks.doc-katex] +cwd = "rust" env = { "RUSTDOCFLAGS" = "--html-in-header katex-header.html" } command = "cargo" args = ["doc", "--no-deps"] @@ -11,4 +12,3 @@ dependencies = [ "docs", "doc-katex", ] - diff --git a/examples/README.md b/rust/examples/README.md similarity index 100% rename from examples/README.md rename to rust/examples/README.md diff --git a/examples/alm_pm.rs b/rust/examples/alm_pm.rs similarity index 100% rename from examples/alm_pm.rs rename to rust/examples/alm_pm.rs diff --git a/examples/panoc_ex1.rs b/rust/examples/panoc_ex1.rs similarity index 88% rename from examples/panoc_ex1.rs rename to rust/examples/panoc_ex1.rs index 01f3e497..c50d3301 100644 --- a/examples/panoc_ex1.rs +++ b/rust/examples/panoc_ex1.rs @@ -29,7 +29,9 @@ fn main() { // define the cost function and its gradient let df = |u: &[f64], grad: &mut [f64]| -> Result<(), SolverError> { if a < 0.0 || b < 0.0 { - Err(SolverError::Cost) + Err(SolverError::Cost( + "Rosenbrock parameters must be nonnegative", + )) } else { rosenbrock_grad(a, b, u, grad); Ok(()) @@ -38,7 +40,9 @@ fn main() { let f = |u: &[f64], c: &mut f64| -> Result<(), SolverError> { if a < 0.0 || b < 0.0 { - Err(SolverError::Cost) + Err(SolverError::Cost( + "Rosenbrock parameters must be nonnegative", + )) } else { *c = rosenbrock_cost(a, b, u); Ok(()) diff --git a/examples/panoc_ex2.rs b/rust/examples/panoc_ex2.rs similarity index 100% rename from examples/panoc_ex2.rs rename to rust/examples/panoc_ex2.rs diff --git a/examples/pm.rs b/rust/examples/pm.rs similarity index 100% rename from examples/pm.rs rename to rust/examples/pm.rs diff --git a/katex-header.html b/rust/katex-header.html similarity index 100% rename from katex-header.html rename to rust/katex-header.html diff --git a/rust/src/alm/alm_cache.rs b/rust/src/alm/alm_cache.rs new file mode 100644 index 00000000..e65911d2 --- /dev/null +++ b/rust/src/alm/alm_cache.rs @@ -0,0 +1,170 @@ +use crate::{numeric::cast, panoc::PANOCCache}; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; + +fn default_initial_penalty() -> T { + cast::(10.0) +} + +/// Cache and mutable state for `AlmOptimizer` +/// +/// `AlmCache` stores the data that the outer ALM/PM loop updates from one +/// iteration to the next, together with the [`PANOCCache`] used to solve the +/// inner problems. +/// +/// The problem definition itself is stored separately in `AlmProblem`. +/// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// +#[derive(Debug)] +pub struct AlmCache +where + T: Float + LbfgsPrecision + Sum, +{ + /// PANOC cache for inner problems + pub(crate) panoc_cache: PANOCCache, + /// Lagrange multipliers (next) + pub(crate) y_plus: Option>, + /// Vector $\xi^\nu = (c^\nu, y^\nu)$ + pub(crate) xi: Option>, + /// Infeasibility related to ALM-type constraints + pub(crate) delta_y_norm: T, + /// Delta y at iteration `nu+1` + pub(crate) delta_y_norm_plus: T, + /// Value $\Vert F_2(u^\nu) \Vert$ + pub(crate) f2_norm: T, + /// Value $\Vert F_2(u^{\nu+1}) \Vert$ + pub(crate) f2_norm_plus: T, + /// Auxiliary variable `w` + pub(crate) w_alm_aux: Option>, + /// Infeasibility related to PM-type constraints, `w_pm = F2(u)` + pub(crate) w_pm: Option>, + /// (Outer) iteration count + pub(crate) iteration: usize, + /// Counter for inner iterations + pub(crate) inner_iteration_count: usize, + /// Value of the norm of the fixed-point residual for the last + /// solved inner problem + pub(crate) last_inner_problem_norm_fpr: T, + /// Available time left for ALM/PM computations (the value `None` + /// corresponds to an unspecified available time, i.e., there are + /// no bounds on the maximum time). The maximum time is specified, + /// if at all, in `AlmOptimizer` + pub(crate) available_time: Option, +} + +impl AlmCache +where + T: Float + LbfgsPrecision + Sum, +{ + /// Constructs a new `AlmCache` + /// + /// # Arguments + /// + /// - `panoc_cache`: cache used by the inner PANOC solver + /// - `n1`: dimension of the ALM mapping `F1` + /// - `n2`: dimension of the PM mapping `F2` + /// + /// The scalar type `T` is inferred from `panoc_cache`. Depending on the + /// values of `n1` and `n2`, this constructor allocates the auxiliary + /// vectors needed by the ALM and PM updates: + /// + /// - `y_plus` and `w_alm_aux` are allocated only when `n1 > 0` + /// - `w_pm` is allocated only when `n2 > 0` + /// - `xi` is allocated when `n1 + n2 > 0` and is initialized as + /// `xi = (c^0, y^0)`, where `c^0` is the default initial penalty and + /// `y^0` is the zero vector in `R^{n1}` + /// + /// # Examples + /// + /// Using the default scalar type (`f64`): + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(4, 1e-6, 8); + /// let _alm_cache = AlmCache::new(panoc_cache, 2, 1); + /// ``` + /// + /// Using `f32` explicitly: + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(4, 1e-5_f32, 8); + /// let _alm_cache = AlmCache::::new(panoc_cache, 2, 1); + /// ``` + /// + pub fn new(panoc_cache: PANOCCache, n1: usize, n2: usize) -> Self { + AlmCache { + panoc_cache, + y_plus: if n1 > 0 { + Some(vec![T::zero(); n1]) + } else { + None + }, + // Allocate memory for xi = (c, y) if either n1 or n2 is nonzero, + // otherwise, xi is None + xi: if n1 + n2 > 0 { + let mut xi_init = vec![default_initial_penalty(); 1]; + xi_init.append(&mut vec![T::zero(); n1]); + Some(xi_init) + } else { + None + }, + // w_alm_aux should be allocated only if n1 > 0 + w_alm_aux: if n1 > 0 { + Some(vec![T::zero(); n1]) + } else { + None + }, + // w_pm is needed only if n2 > 0 + w_pm: if n2 > 0 { + Some(vec![T::zero(); n2]) + } else { + None + }, + iteration: 0, + delta_y_norm: T::zero(), + delta_y_norm_plus: T::infinity(), + f2_norm: T::zero(), + f2_norm_plus: T::infinity(), + inner_iteration_count: 0, + last_inner_problem_norm_fpr: -T::one(), + available_time: None, + } + } + + /// Resets the cache to its initial iteration state + /// + /// This method: + /// + /// - resets the stored [`PANOCCache`] + /// - clears the outer iteration counters + /// - resets the stored infeasibility and fixed-point-residual related norms + /// + /// The allocated work vectors remain allocated so the cache can be reused + /// without additional memory allocations. + /// + /// # Examples + /// + /// ``` + /// use optimization_engine::{alm::AlmCache, panoc::PANOCCache}; + /// + /// let panoc_cache = PANOCCache::new(3, 1e-6, 5); + /// let mut alm_cache = AlmCache::new(panoc_cache, 1, 1); + /// + /// alm_cache.reset(); + /// ``` + pub fn reset(&mut self) { + self.panoc_cache.reset(); + self.iteration = 0; + self.f2_norm = T::zero(); + self.f2_norm_plus = T::zero(); + self.delta_y_norm = T::zero(); + self.delta_y_norm_plus = T::zero(); + self.inner_iteration_count = 0; + } +} diff --git a/src/alm/alm_factory.rs b/rust/src/alm/alm_factory.rs similarity index 69% rename from src/alm/alm_factory.rs rename to rust/src/alm/alm_factory.rs index 21776453..81987c8d 100644 --- a/src/alm/alm_factory.rs +++ b/rust/src/alm/alm_factory.rs @@ -5,10 +5,16 @@ /* prepares psi and d_psi, which can be used to define an AlmOptimizer */ /* ---------------------------------------------------------------------------- */ -use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; +use crate::{constraints::Constraint, matrix_operations, numeric::cast, FunctionCallResult}; +use num::Float; +use std::marker::PhantomData; +use std::{iter::Sum, ops::AddAssign}; -/// Prepares function $\psi$ and its gradient given the problem data: $f$, $\nabla{}f$, -/// and optionally $F_1$, $JF_1$, $C$ and $F_2$ +/// Prepares the ALM/PM merit function $\psi$ and its gradient from the problem data. +/// +/// This is a low-level helper used by the ALM implementation to assemble the +/// augmented cost seen by the inner solver from the user-provided cost, +/// gradient, mappings, Jacobian-vector products, and set data. /// /// # Types /// @@ -16,7 +22,7 @@ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; /// by a function with signature: /// ///```rust,ignore -///fn f(u: &[f64], cost: &mut f64) -> FunctionCallResult +///fn f(u: &[T], cost: &mut T) -> FunctionCallResult ///``` /// /// where `cost` is updated with the value $f(u)$, @@ -25,7 +31,7 @@ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; /// which is computed by a function with signature /// /// ```rust,ignore -/// fn df(u: &[f64], grad: &mut [f64]) -> FunctionCallResult +/// fn df(u: &[T], grad: &mut [T]) -> FunctionCallResult /// ``` /// /// where on exit `grad` stores the @@ -35,7 +41,7 @@ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; /// with signature /// /// ```rust,ignore -/// fn mapping(u: &[f64], fu: &mut [f64]) -> FunctionCallResult +/// fn mapping(u: &[T], fu: &mut [T]) -> FunctionCallResult /// ``` /// /// - `JacobianMappingF1Trans` and `JacobianMappingF2Trans`: functions that compute @@ -45,6 +51,9 @@ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; /// - `SetC`: A set $C\subseteq \mathbb{R}^{n_1}$, which is used in the definition /// of the constraints $F_1(u) \in C$ /// +/// - `T`: scalar floating-point type used throughout the ALM data, typically +/// `f64` or `f32` +/// /// The above are used to compute $\psi:\mathbb{R}^{n_u}\to\mathbb{R}$ for given /// $u\in\mathbb{R}^{n_u}$ and $\xi=(c, y)\in\mathbb{R}^{n_1+1}$, where $c\in\mathbb{R}$ /// and $y\in\mathbb{R}^{n_1}$ are the penalty parameter and vector of Lagrange @@ -64,6 +73,8 @@ use crate::{constraints::Constraint, matrix_operations, FunctionCallResult}; /// /// where $t(u) = F_1(u) + \bar{c}^{-1}y$. /// +/// The default scalar type is `f64`. +/// pub struct AlmFactory< MappingF1, JacobianMappingF1Trans, @@ -72,14 +83,16 @@ pub struct AlmFactory< Cost, CostGradient, SetC, + T = f64, > where - Cost: Fn(&[f64], &mut f64) -> FunctionCallResult, // f(u, result) - CostGradient: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // df(u, result) - MappingF1: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f1(u, result) - JacobianMappingF1Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf1(u, d, result) - MappingF2: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f2(u, result) - JacobianMappingF2Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf2(u, d, result) - SetC: Constraint, + T: Float + Sum + AddAssign, + Cost: Fn(&[T], &mut T) -> FunctionCallResult, // f(u, result) + CostGradient: Fn(&[T], &mut [T]) -> FunctionCallResult, // df(u, result) + MappingF1: Fn(&[T], &mut [T]) -> FunctionCallResult, // f1(u, result) + JacobianMappingF1Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf1(u, d, result) + MappingF2: Fn(&[T], &mut [T]) -> FunctionCallResult, // f2(u, result) + JacobianMappingF2Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf2(u, d, result) + SetC: Constraint, { f: Cost, df: CostGradient, @@ -89,6 +102,7 @@ pub struct AlmFactory< jacobian_mapping_f2_trans: Option, set_c: Option, n2: usize, + marker: PhantomData, } impl< @@ -99,6 +113,7 @@ impl< Cost, CostGradient, SetC, + T, > AlmFactory< MappingF1, @@ -108,17 +123,19 @@ impl< Cost, CostGradient, SetC, + T, > where - Cost: Fn(&[f64], &mut f64) -> FunctionCallResult, // f(u, result) - CostGradient: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // df(u, result) - MappingF1: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f1(u, result) - JacobianMappingF1Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf1(u, d, result) - MappingF2: Fn(&[f64], &mut [f64]) -> FunctionCallResult, // f2(u, result) - JacobianMappingF2Trans: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, // jf2(u, d, result) - SetC: Constraint, + T: Float + Sum + AddAssign, + Cost: Fn(&[T], &mut T) -> FunctionCallResult, // f(u, result) + CostGradient: Fn(&[T], &mut [T]) -> FunctionCallResult, // df(u, result) + MappingF1: Fn(&[T], &mut [T]) -> FunctionCallResult, // f1(u, result) + JacobianMappingF1Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf1(u, d, result) + MappingF2: Fn(&[T], &mut [T]) -> FunctionCallResult, // f2(u, result) + JacobianMappingF2Trans: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, // jf2(u, d, result) + SetC: Constraint, { - /// Construct a new instance of `MockFactory` + /// Construct a new instance of [`AlmFactory`]. /// /// # Arguments /// - `f` cost function $f$ @@ -130,8 +147,23 @@ where /// - `set_c` (optional) set $C$ or `NO_SET` /// - `n2` image dimension of $F_2$ (can be 0) /// + /// The scalar type `T` is inferred from the supplied functions and set. + /// + /// # Panics + /// + /// This constructor panics if: + /// + /// - `mapping_f2` is provided but `n2 == 0`, + /// - `n2 > 0` but `mapping_f2` is not provided, + /// - `mapping_f2` and `jacobian_mapping_f2_trans` are not provided together, + /// - `mapping_f1` and `jacobian_mapping_f1_trans` are not provided together, + /// - `mapping_f1` and `set_c` are not provided together. + /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// ```rust /// use optimization_engine::{constraints::Ball2, alm::*, FunctionCallResult}; /// @@ -155,6 +187,7 @@ where /// ); /// ``` /// + #[allow(clippy::too_many_arguments)] pub fn new( f: Cost, df: CostGradient, @@ -190,10 +223,11 @@ where jacobian_mapping_f2_trans, set_c, n2, + marker: PhantomData, } } - /// Computes function $\psi$ given by + /// Computes the function $\psi$ given by /// /// $$\psi(u) = f(u) + \tfrac{c}{2}\left[\mathrm{dist}_C^2\left(F_1(u) + \bar{c}^{-1}y\right) /// + \Vert F_2(u) \Vert^2\right],$$ @@ -210,20 +244,38 @@ where /// - `xi` is the vector $\xi = (c, y) \in \mathbb{R}^{n_1 + 1}$ /// - `cost`: stores the value of $\psi(u; \xi)$ on exit /// + /// If `F1` is present, `xi` must contain the penalty parameter `c` in + /// `xi[0]` followed by the Lagrange multiplier vector `y`. + /// + /// If only `F2` is present, `xi` must still contain at least the penalty + /// parameter `c` as its first entry. + /// + /// If neither `F1` nor `F2` is present, `xi` may be empty. + /// /// # Returns /// /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// - pub fn psi(&self, u: &[f64], xi: &[f64], cost: &mut f64) -> FunctionCallResult { + /// # Panics + /// + /// This method may panic if the supplied slices are inconsistent with the + /// dimensions expected by the provided mappings or set projection. + /// + pub fn psi(&self, u: &[T], xi: &[T], cost: &mut T) -> FunctionCallResult { (self.f)(u, cost)?; let ny = if !xi.is_empty() { xi.len() - 1 } else { 0 }; - let mut f1_u_plus_y_over_c = vec![0.0; ny]; - let mut s = vec![0.0; ny]; + let mut f1_u_plus_y_over_c = vec![T::zero(); ny]; + let mut s = vec![T::zero(); ny]; if let (Some(set_c), Some(mapping_f1)) = (&self.set_c, &self.mapping_f1) { let penalty_parameter = xi[0]; mapping_f1(u, &mut f1_u_plus_y_over_c)?; // f1_u = F1(u) let y_lagrange_mult = &xi[1..]; + let penalty_scale = if penalty_parameter > T::one() { + penalty_parameter + } else { + T::one() + }; // Note: In the first term below, we divide by 'max(c, 1)', instead of // just 'c'. The reason is that this allows to set c=0 and // retrieve the value of the original cost function @@ -231,23 +283,25 @@ where f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti += yi / f64::max(penalty_parameter, 1.0)); + .for_each(|(ti, yi)| *ti += *yi / penalty_scale); s.copy_from_slice(&f1_u_plus_y_over_c); - set_c.project(&mut s); - *cost += 0.5 - * penalty_parameter - * matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); + set_c.project(&mut s)?; + let dist_sq: T = matrix_operations::norm2_squared_diff(&f1_u_plus_y_over_c, &s); + let scaling: T = cast::(0.5) * penalty_parameter; + *cost += scaling * dist_sq; } if let Some(f2) = &self.mapping_f2 { let c = xi[0]; - let mut z = vec![0.0; self.n2]; + let mut z = vec![T::zero(); self.n2]; f2(u, &mut z)?; - *cost += 0.5 * c * matrix_operations::norm2_squared(&z); + let norm_sq: T = matrix_operations::norm2_squared(&z); + let scaling: T = cast::(0.5) * c; + *cost += scaling * norm_sq; } Ok(()) } - /// Computes the gradient of $\psi$ + /// Computes the gradient of $\psi$. /// /// The gradient of `psi` is given by /// @@ -262,12 +316,65 @@ where /// - `xi` is the vector $\xi = (c, y) \in \mathbb{R}^{n_1 + 1}$ /// - `grad`: stores the value of $\nabla \psi(u; \xi)$ on exit /// + /// As with [`AlmFactory::psi`], `xi` must contain the penalty parameter + /// `c` as its first entry whenever `F1` or `F2` is active. + /// /// # Returns /// /// This method returns `Ok(())` if the computation is successful or an appropriate /// `SolverError` otherwise. /// - pub fn d_psi(&self, u: &[f64], xi: &[f64], grad: &mut [f64]) -> FunctionCallResult { + /// # Panics + /// + /// This method may panic if the supplied slices are inconsistent with the + /// dimensions expected by the provided mappings, Jacobian-vector products, + /// or set projection. + /// + /// # Example + /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// + /// ```rust + /// use optimization_engine::{constraints::Ball2, alm::*, FunctionCallResult}; + /// + /// let set_c = Ball2::new(None, 1.0); + /// + /// let f = |_u: &[f64], cost: &mut f64| -> FunctionCallResult { + /// *cost = 0.0; + /// Ok(()) + /// }; + /// let df = |_u: &[f64], grad: &mut [f64]| -> FunctionCallResult { + /// grad.fill(0.0); + /// Ok(()) + /// }; + /// let f1 = |u: &[f64], f1u: &mut [f64]| -> FunctionCallResult { + /// f1u[0] = u[0]; + /// Ok(()) + /// }; + /// let jf1_tr = |_u: &[f64], d: &[f64], res: &mut [f64]| -> FunctionCallResult { + /// res[0] = d[0]; + /// Ok(()) + /// }; + /// + /// let factory = AlmFactory::new( + /// f, + /// df, + /// Some(f1), + /// Some(jf1_tr), + /// NO_MAPPING, + /// NO_JACOBIAN_MAPPING, + /// Some(set_c), + /// 0, + /// ); + /// + /// let u = [0.5_f64]; + /// let xi = [2.0_f64, 0.1_f64]; + /// let mut grad = [0.0_f64]; + /// factory.d_psi(&u, &xi, &mut grad).unwrap(); + /// ``` + /// + pub fn d_psi(&self, u: &[T], xi: &[T], grad: &mut [T]) -> FunctionCallResult { let nu = u.len(); // The following statement is needed to account for the case where n1=n2=0 @@ -285,38 +392,38 @@ where &self.jacobian_mapping_f1_trans, ) { let c_penalty_parameter = xi[0]; - let mut f1_u_plus_y_over_c = vec![0.0; ny]; - let mut s_aux_var = vec![0.0; ny]; // auxiliary variable `s` + let mut f1_u_plus_y_over_c = vec![T::zero(); ny]; + let mut s_aux_var = vec![T::zero(); ny]; // auxiliary variable `s` let y_lagrange_mult = &xi[1..]; - let mut jac_prod = vec![0.0; nu]; + let mut jac_prod = vec![T::zero(); nu]; mapping_f1(u, &mut f1_u_plus_y_over_c)?; // f1_u_plus_y_over_c = F1(u) // f1_u_plus_y_over_c = F1(u) + y/c f1_u_plus_y_over_c .iter_mut() .zip(y_lagrange_mult.iter()) - .for_each(|(ti, yi)| *ti += yi / c_penalty_parameter); + .for_each(|(ti, yi)| *ti += *yi / c_penalty_parameter); s_aux_var.copy_from_slice(&f1_u_plus_y_over_c); // s = t - set_c.project(&mut s_aux_var); // s = Proj_C(F1(u) + y/c) + set_c.project(&mut s_aux_var)?; // s = Proj_C(F1(u) + y/c) // t = F1(u) + y/c - Proj_C(F1(u) + y/c) f1_u_plus_y_over_c .iter_mut() .zip(s_aux_var.iter()) - .for_each(|(ti, si)| *ti -= si); + .for_each(|(ti, si)| *ti = *ti - *si); jf1t(u, &f1_u_plus_y_over_c, &mut jac_prod)?; // grad += c*t grad.iter_mut() .zip(jac_prod.iter()) - .for_each(|(gradi, jac_prodi)| *gradi += c_penalty_parameter * jac_prodi); + .for_each(|(gradi, jac_prodi)| *gradi += c_penalty_parameter * *jac_prodi); } // Compute second part: JF2(u)'*F2(u) if let (Some(f2), Some(jf2)) = (&self.mapping_f2, &self.jacobian_mapping_f2_trans) { let c = xi[0]; - let mut f2u_aux = vec![0.0; self.n2]; - let mut jf2u_times_f2u_aux = vec![0.0; nu]; + let mut f2u_aux = vec![T::zero(); self.n2]; + let mut jf2u_times_f2u_aux = vec![T::zero(); nu]; f2(u, &mut f2u_aux)?; // f2u_aux = F2(u) jf2(u, &f2u_aux, &mut jf2u_times_f2u_aux)?; // jf2u_times_f2u_aux = JF2(u)'*f2u_aux // = JF2(u)'*F2(u) @@ -324,7 +431,7 @@ where // grad += c * jf2u_times_f2u_aux grad.iter_mut() .zip(jf2u_times_f2u_aux.iter()) - .for_each(|(gradi, jf2u_times_f2u_aux_i)| *gradi += c * jf2u_times_f2u_aux_i); + .for_each(|(gradi, jf2u_times_f2u_aux_i)| *gradi += c * *jf2u_times_f2u_aux_i); } Ok(()) } @@ -412,7 +519,9 @@ mod tests { let f2 = mapping_f2; let jac_f2_tr = |_u: &[f64], _d: &[f64], _res: &mut [f64]| -> Result<(), crate::SolverError> { - Err(SolverError::NotFiniteComputation) + Err(SolverError::NotFiniteComputation( + "mock Jacobian-transpose product returned a non-finite result", + )) }; let factory = AlmFactory::new( mocks::f0, diff --git a/src/alm/alm_optimizer.rs b/rust/src/alm/alm_optimizer.rs similarity index 86% rename from src/alm/alm_optimizer.rs rename to rust/src/alm/alm_optimizer.rs index c7d9ec08..e55ce7b3 100644 --- a/src/alm/alm_optimizer.rs +++ b/rust/src/alm/alm_optimizer.rs @@ -1,19 +1,17 @@ use crate::{ alm::*, constraints, - core::{panoc::PANOCOptimizer, ExitStatus, Optimizer, Problem, SolverStatus}, - matrix_operations, FunctionCallResult, SolverError, + core::{panoc::PANOCOptimizer, ExitStatus, Problem, SolverStatus}, + matrix_operations, + numeric::cast, + FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::{iter::Sum, ops::AddAssign}; const DEFAULT_MAX_OUTER_ITERATIONS: usize = 50; const DEFAULT_MAX_INNER_ITERATIONS: usize = 5000; -const DEFAULT_EPSILON_TOLERANCE: f64 = 1e-6; -const DEFAULT_DELTA_TOLERANCE: f64 = 1e-4; -const DEFAULT_PENALTY_UPDATE_FACTOR: f64 = 5.0; -const DEFAULT_EPSILON_UPDATE_FACTOR: f64 = 0.1; -const DEFAULT_INFEAS_SUFFICIENT_DECREASE_FACTOR: f64 = 0.1; -const DEFAULT_INITIAL_TOLERANCE: f64 = 0.1; -const SMALL_EPSILON: f64 = f64::EPSILON; /// Internal/private structure used by method AlmOptimizer.step /// to return some minimal information about the inner problem @@ -115,6 +113,9 @@ impl InnerProblemStatus { /// of $C$ and $\delta_{U}$, $\delta_{C^{\ast}}$ are the indicator functions of $U$ and $C^{\ast}$ /// respectively. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// pub struct AlmOptimizer< 'life, MappingAlm, @@ -124,17 +125,19 @@ pub struct AlmOptimizer< ConstraintsType, AlmSetC, LagrangeSetY, + T = f64, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: constraints::Constraint, - AlmSetC: constraints::Constraint, - LagrangeSetY: constraints::Constraint, + T: Float + LbfgsPrecision + Sum + AddAssign, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: constraints::Constraint, + AlmSetC: constraints::Constraint, + LagrangeSetY: constraints::Constraint, { /// ALM cache (borrowed) - alm_cache: &'life mut AlmCache, + alm_cache: &'life mut AlmCache, /// ALM problem definition (oracle) alm_problem: AlmProblem< MappingAlm, @@ -144,6 +147,7 @@ pub struct AlmOptimizer< ConstraintsType, AlmSetC, LagrangeSetY, + T, >, /// Maximum number of outer iterations max_outer_iterations: usize, @@ -152,19 +156,19 @@ pub struct AlmOptimizer< /// Maximum duration max_duration: Option, /// epsilon for inner AKKT condition - epsilon_tolerance: f64, + epsilon_tolerance: T, /// delta for outer AKKT condition - delta_tolerance: f64, + delta_tolerance: T, /// At every outer iteration, c is multiplied by this scalar - penalty_update_factor: f64, + penalty_update_factor: T, /// The epsilon-tolerance is multiplied by this factor until /// it reaches its target value - epsilon_update_factor: f64, + epsilon_update_factor: T, /// If current_infeasibility <= sufficient_decrease_coeff * previous_infeasibility, /// then the penalty parameter is kept constant - sufficient_decrease_coeff: f64, + sufficient_decrease_coeff: T, // Initial tolerance (for the inner problem) - epsilon_inner_initial: f64, + epsilon_inner_initial: T, } impl< @@ -176,6 +180,7 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > AlmOptimizer< 'life, @@ -186,15 +191,17 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: constraints::Constraint, - AlmSetC: constraints::Constraint, - LagrangeSetY: constraints::Constraint, + T: Float + LbfgsPrecision + Sum + AddAssign, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: constraints::Constraint, + AlmSetC: constraints::Constraint, + LagrangeSetY: constraints::Constraint, { /* ---------------------------------------------------------------------------- */ /* CONSTRUCTOR */ @@ -210,9 +217,15 @@ where /// $\nabla_u \psi(u, \xi)$, $F_1(u)$ (if any), $F_2(u)$ (if any), and sets /// $C$, $U$ and $Y$) /// + /// The scalar type `T` is inferred from `alm_cache`, `alm_problem`, and the + /// supplied closures and sets. + /// /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// ```rust /// use optimization_engine::{alm::*, FunctionCallResult, core::{panoc::*, constraints}}; /// @@ -249,8 +262,9 @@ where /// .with_max_outer_iterations(10); ///``` /// + #[must_use] pub fn new( - alm_cache: &'life mut AlmCache, + alm_cache: &'life mut AlmCache, alm_problem: AlmProblem< MappingAlm, MappingPm, @@ -259,26 +273,25 @@ where ConstraintsType, AlmSetC, LagrangeSetY, + T, >, ) -> Self { // set the initial value of the inner tolerance; this step is // not necessary, however, because we set the initial tolerance // in #solve (see below) - alm_cache - .panoc_cache - .set_akkt_tolerance(DEFAULT_INITIAL_TOLERANCE); + alm_cache.panoc_cache.set_akkt_tolerance(cast::(0.1)); AlmOptimizer { alm_cache, alm_problem, max_outer_iterations: DEFAULT_MAX_OUTER_ITERATIONS, max_inner_iterations: DEFAULT_MAX_INNER_ITERATIONS, max_duration: None, - epsilon_tolerance: DEFAULT_EPSILON_TOLERANCE, - delta_tolerance: DEFAULT_DELTA_TOLERANCE, - penalty_update_factor: DEFAULT_PENALTY_UPDATE_FACTOR, - epsilon_update_factor: DEFAULT_EPSILON_UPDATE_FACTOR, - sufficient_decrease_coeff: DEFAULT_INFEAS_SUFFICIENT_DECREASE_FACTOR, - epsilon_inner_initial: DEFAULT_INITIAL_TOLERANCE, + epsilon_tolerance: cast::(1e-6), + delta_tolerance: cast::(1e-4), + penalty_update_factor: cast::(5.0), + epsilon_update_factor: cast::(0.1), + sufficient_decrease_coeff: cast::(0.1), + epsilon_inner_initial: cast::(0.1), } } @@ -301,6 +314,7 @@ where /// The method panics if the specified number of outer iterations is zero /// /// + #[must_use] pub fn with_max_outer_iterations(mut self, max_outer_iterations: usize) -> Self { assert!( max_outer_iterations > 0, @@ -326,6 +340,7 @@ where /// The method panics if the specified number of inner iterations is zero /// /// + #[must_use] pub fn with_max_inner_iterations(mut self, max_inner_iterations: usize) -> Self { assert!( max_inner_iterations > 0, @@ -348,6 +363,7 @@ where /// /// Returns the current mutable and updated instance of the provided object /// + #[must_use] pub fn with_max_duration(mut self, max_duration: std::time::Duration) -> Self { self.max_duration = Some(max_duration); self @@ -367,8 +383,12 @@ where /// /// The method panics if the specified tolerance is not positive /// - pub fn with_delta_tolerance(mut self, delta_tolerance: f64) -> Self { - assert!(delta_tolerance > 0.0, "delta_tolerance must be positive"); + #[must_use] + pub fn with_delta_tolerance(mut self, delta_tolerance: T) -> Self { + assert!( + delta_tolerance > T::zero(), + "delta_tolerance must be positive" + ); self.delta_tolerance = delta_tolerance; self } @@ -387,9 +407,10 @@ where /// /// The method panics if the specified tolerance is not positive /// - pub fn with_epsilon_tolerance(mut self, epsilon_tolerance: f64) -> Self { + #[must_use] + pub fn with_epsilon_tolerance(mut self, epsilon_tolerance: T) -> Self { assert!( - epsilon_tolerance > 0.0, + epsilon_tolerance > T::zero(), "epsilon_tolerance must be positive" ); self.epsilon_tolerance = epsilon_tolerance; @@ -412,13 +433,14 @@ where /// /// # Panics /// - /// The method panics if the update factor is not larger than `1.0 + f64::EPSILON` + /// The method panics if the update factor is not larger than `1.0 + T::epsilon()` /// /// - pub fn with_penalty_update_factor(mut self, penalty_update_factor: f64) -> Self { + #[must_use] + pub fn with_penalty_update_factor(mut self, penalty_update_factor: T) -> Self { assert!( - penalty_update_factor > 1.0 + SMALL_EPSILON, - "`penalty_update_factor` must be larger than 1.0 + f64::EPSILON" + penalty_update_factor > T::one() + T::epsilon(), + "`penalty_update_factor` must be larger than 1.0 + T::epsilon()" ); self.penalty_update_factor = penalty_update_factor; self @@ -442,16 +464,14 @@ where /// # Panics /// /// The method panics if the specified tolerance update factor is not in the - /// interval from `f64::EPSILON` to `1.0 - f64::EPSILON`. + /// interval from `T::epsilon()` to `1.0 - T::epsilon()`. /// - pub fn with_inner_tolerance_update_factor( - mut self, - inner_tolerance_update_factor: f64, - ) -> Self { + #[must_use] + pub fn with_inner_tolerance_update_factor(mut self, inner_tolerance_update_factor: T) -> Self { assert!( - inner_tolerance_update_factor > SMALL_EPSILON - && inner_tolerance_update_factor < 1.0 - SMALL_EPSILON, - "the tolerance update factor needs to be in (f64::EPSILON, 1)" + inner_tolerance_update_factor > T::epsilon() + && inner_tolerance_update_factor < T::one() - T::epsilon(), + "the tolerance update factor needs to be in (T::epsilon(), 1)" ); self.epsilon_update_factor = inner_tolerance_update_factor; self @@ -480,7 +500,8 @@ where /// `with_inner_tolerance` to do so before invoking `with_initial_inner_tolerance`. /// /// - pub fn with_initial_inner_tolerance(mut self, initial_inner_tolerance: f64) -> Self { + #[must_use] + pub fn with_initial_inner_tolerance(mut self, initial_inner_tolerance: T) -> Self { assert!( initial_inner_tolerance >= self.epsilon_tolerance, "the initial tolerance should be no less than the target tolerance" @@ -510,16 +531,17 @@ where /// # Panics /// /// The method panics if the specified sufficient decrease coefficient is not - /// in the range `(f64::EPSILON, 1.0 - f64::EPSILON)` + /// in the range `(T::epsilon(), 1.0 - T::epsilon())` /// + #[must_use] pub fn with_sufficient_decrease_coefficient( mut self, - sufficient_decrease_coefficient: f64, + sufficient_decrease_coefficient: T, ) -> Self { assert!( - sufficient_decrease_coefficient < 1.0 - SMALL_EPSILON - && sufficient_decrease_coefficient > SMALL_EPSILON, - "sufficient_decrease_coefficient must be in (f64::EPSILON, 1.0 - f64::EPSILON)" + sufficient_decrease_coefficient < T::one() - T::epsilon() + && sufficient_decrease_coefficient > T::epsilon(), + "sufficient_decrease_coefficient must be in (T::epsilon(), 1.0 - T::epsilon())" ); self.sufficient_decrease_coeff = sufficient_decrease_coefficient; self @@ -529,7 +551,7 @@ where /// /// # Arguments /// - /// - `y_init`: initial vector of Lagrange multipliers (type: `&[f64]`) of + /// - `y_init`: initial vector of Lagrange multipliers (type: `&[T]`) of /// length equal to `n1` /// /// # Returns @@ -540,7 +562,8 @@ where /// /// The method will panic if the length of `y_init` is not equal to `n1` /// - pub fn with_initial_lagrange_multipliers(mut self, y_init: &[f64]) -> Self { + #[must_use] + pub fn with_initial_lagrange_multipliers(mut self, y_init: &[T]) -> Self { let cache = &mut self.alm_cache; assert!( y_init.len() == self.alm_problem.n1, @@ -568,12 +591,13 @@ where /// # Panics /// /// The method panics if the specified initial penalty parameter is not - /// larger than `f64::EPSILON` + /// larger than `T::epsilon()` /// - pub fn with_initial_penalty(self, c0: f64) -> Self { + #[must_use] + pub fn with_initial_penalty(self, c0: T) -> Self { assert!( - c0 > SMALL_EPSILON, - "the initial penalty must be larger than f64::EPSILON" + c0 > T::epsilon(), + "the initial penalty must be larger than T::epsilon()" ); if let Some(xi_in_cache) = &mut self.alm_cache.xi { xi_in_cache[0] = c0; @@ -596,7 +620,7 @@ where } /// Computes PM infeasibility, that is, ||F2(u)|| - fn compute_pm_infeasibility(&mut self, u: &[f64]) -> FunctionCallResult { + fn compute_pm_infeasibility(&mut self, u: &[T]) -> FunctionCallResult { let problem = &self.alm_problem; // ALM problem let cache = &mut self.alm_cache; // ALM cache @@ -613,7 +637,7 @@ where /// /// `y_plus <-- y + c*[F1(u_plus) - Proj_C(F1(u_plus) + y/c)]` /// - fn update_lagrange_multipliers(&mut self, u: &[f64]) -> FunctionCallResult { + fn update_lagrange_multipliers(&mut self, u: &[T]) -> FunctionCallResult { let problem = &self.alm_problem; // ALM problem let cache = &mut self.alm_cache; // ALM cache @@ -647,10 +671,10 @@ where .iter_mut() .zip(y.iter()) .zip(w_alm_aux.iter()) - .for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = w_alm_aux_i + y_i / c); + .for_each(|((y_plus_i, y_i), w_alm_aux_i)| *y_plus_i = *w_alm_aux_i + *y_i / c); // Step #3: y_plus := Proj_C(y_plus) - alm_set_c.project(y_plus); + alm_set_c.project(y_plus)?; // Step #4 y_plus @@ -659,7 +683,7 @@ where .zip(w_alm_aux.iter()) .for_each(|((y_plus_i, y_i), w_alm_aux_i)| { // y_plus := y + c * (w_alm_aux - y_plus) - *y_plus_i = y_i + c * (w_alm_aux_i - *y_plus_i) + *y_plus_i = *y_i + c * (*w_alm_aux_i - *y_plus_i) }); } @@ -667,18 +691,19 @@ where } /// Project y on set Y - fn project_on_set_y(&mut self) { + fn project_on_set_y(&mut self) -> FunctionCallResult { let problem = &self.alm_problem; if let Some(y_set) = &problem.alm_set_y { // NOTE: as_mut() converts from &mut Option to Option<&mut T> - // * cache.y is Option> - // * cache.y.as_mut is Option<&mut Vec> - // * which can be treated as Option<&mut [f64]> - // * y_vec is &mut [f64] + // * cache.y is Option> + // * cache.y.as_mut is Option<&mut Vec> + // * which can be treated as Option<&mut [T]> + // * y_vec is &mut [T] if let Some(xi_vec) = self.alm_cache.xi.as_mut() { - y_set.project(&mut xi_vec[1..]); + y_set.project(&mut xi_vec[1..])?; } } + Ok(()) } /// Solve inner problem @@ -691,12 +716,12 @@ where /// /// # Returns /// - /// Returns an instance of `Result`, where `SolverStatus` + /// Returns an instance of `Result, SolverError>`, where `SolverStatus` /// is the solver status of the inner problem and `SolverError` is a potential /// error in solving the inner problem. /// /// - fn solve_inner_problem(&mut self, u: &mut [f64]) -> Result { + fn solve_inner_problem(&mut self, u: &mut [T]) -> Result, SolverError> { let alm_problem = &self.alm_problem; // Problem let alm_cache = &mut self.alm_cache; // ALM cache @@ -704,7 +729,7 @@ where // empty vector, otherwise. We do that becaues the user has the option // to not use any ALM/PM constraints; in that case, `alm_cache.xi` is // `None` - let xi_empty = Vec::new(); + let xi_empty = Vec::::new(); let xi = if let Some(xi_cached) = &alm_cache.xi { xi_cached } else { @@ -713,11 +738,11 @@ where // Construct psi and psi_grad (as functions of `u` alone); it is // psi(u) = psi(u; xi) and psi_grad(u) = phi_grad(u; xi) // psi: R^nu --> R - let psi = |u: &[f64], psi_val: &mut f64| -> FunctionCallResult { + let psi = |u: &[T], psi_val: &mut T| -> FunctionCallResult { (alm_problem.parametric_cost)(u, xi, psi_val) }; // psi_grad: R^nu --> R^nu - let psi_grad = |u: &[f64], psi_grad: &mut [f64]| -> FunctionCallResult { + let psi_grad = |u: &[T], psi_grad: &mut [T]| -> FunctionCallResult { (alm_problem.parametric_gradient)(u, xi, psi_grad) }; // define the inner problem @@ -740,7 +765,7 @@ where inner_solver.solve(u) } - fn is_exit_criterion_satisfied(&self) -> bool { + fn is_exit_criterion_satisfied(&self) -> Result { let cache = &self.alm_cache; let problem = &self.alm_problem; // Criterion 1: ||Delta y|| <= c * delta @@ -750,7 +775,7 @@ where || if let Some(xi) = &cache.xi { let c = xi[0]; cache.iteration > 0 - && cache.delta_y_norm_plus <= c * self.delta_tolerance + SMALL_EPSILON + && cache.delta_y_norm_plus <= c * self.delta_tolerance + T::epsilon() } else { true }; @@ -758,14 +783,20 @@ where // If n2 = 0, there are no PM-type constraints, so this // criterion is automatically satisfied let criterion_2 = - problem.n2 == 0 || cache.f2_norm_plus <= self.delta_tolerance + SMALL_EPSILON; + problem.n2 == 0 || cache.f2_norm_plus <= self.delta_tolerance + T::epsilon(); // Criterion 3: epsilon_nu <= epsilon // This function will panic is there is no akkt_tolerance // This should never happen because we set the AKKT tolerance // in the constructor and can never become `None` again let criterion_3 = - cache.panoc_cache.akkt_tolerance.unwrap() <= self.epsilon_tolerance + SMALL_EPSILON; - criterion_1 && criterion_2 && criterion_3 + cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while checking the exit criterion", + ))? + <= self.epsilon_tolerance + T::epsilon(); + Ok(criterion_1 && criterion_2 && criterion_3) } /// Whether the penalty parameter should not be updated @@ -781,9 +812,9 @@ where let is_alm = problem.n1 > 0; let is_pm = problem.n2 > 0; let criterion_alm = cache.delta_y_norm_plus - <= self.sufficient_decrease_coeff * cache.delta_y_norm + SMALL_EPSILON; + <= self.sufficient_decrease_coeff * cache.delta_y_norm + T::epsilon(); let criterion_pm = - cache.f2_norm_plus <= self.sufficient_decrease_coeff * cache.f2_norm + SMALL_EPSILON; + cache.f2_norm_plus <= self.sufficient_decrease_coeff * cache.f2_norm + T::epsilon(); if is_alm && !is_pm { return criterion_alm; } else if !is_alm && is_pm { @@ -798,17 +829,29 @@ where fn update_penalty_parameter(&mut self) { let cache = &mut self.alm_cache; if let Some(xi) = &mut cache.xi { - xi[0] *= self.penalty_update_factor; + xi[0] = xi[0] * self.penalty_update_factor; } } - fn update_inner_akkt_tolerance(&mut self) { + fn update_inner_akkt_tolerance(&mut self) -> FunctionCallResult { let cache = &mut self.alm_cache; // epsilon_{nu+1} := max(epsilon, beta*epsilon_nu) - cache.panoc_cache.set_akkt_tolerance(f64::max( - cache.panoc_cache.akkt_tolerance.unwrap() * self.epsilon_update_factor, - self.epsilon_tolerance, - )); + let akkt_tolerance = + cache + .panoc_cache + .akkt_tolerance + .ok_or(SolverError::InvalidProblemState( + "missing inner AKKT tolerance while updating it", + ))?; + let next_tolerance = akkt_tolerance * self.epsilon_update_factor; + cache + .panoc_cache + .set_akkt_tolerance(if next_tolerance > self.epsilon_tolerance { + next_tolerance + } else { + self.epsilon_tolerance + }); + Ok(()) } fn final_cache_update(&mut self) { @@ -837,18 +880,18 @@ where /// - Shrinks the inner tolerance and /// - Updates the ALM cache /// - fn step(&mut self, u: &mut [f64]) -> Result { + fn step(&mut self, u: &mut [T]) -> Result { // store the exit status of the inner problem in this problem // (we'll need to return it within `InnerProblemStatus`) let mut inner_exit_status: ExitStatus = ExitStatus::Converged; // Project y on Y - self.project_on_set_y(); + self.project_on_set_y()?; // If the inner problem fails miserably, the failure should be propagated // upstream (using `?`). If the inner problem has not converged, that is fine, // we should keep solving. - self.solve_inner_problem(u).map(|status: SolverStatus| { + self.solve_inner_problem(u).map(|status: SolverStatus| { let inner_iters = status.iterations(); self.alm_cache.last_inner_problem_norm_fpr = status.norm_fpr(); self.alm_cache.inner_iteration_count += inner_iters; @@ -867,7 +910,7 @@ where self.compute_alm_infeasibility()?; // ALM: ||y_plus - y|| // Check exit criterion - if self.is_exit_criterion_satisfied() { + if self.is_exit_criterion_satisfied()? { // Do not continue the outer iteration // An (epsilon, delta)-AKKT point has been found return Ok(InnerProblemStatus::new(false, inner_exit_status)); @@ -876,7 +919,7 @@ where } // Update inner problem tolerance - self.update_inner_akkt_tolerance(); + self.update_inner_akkt_tolerance()?; // conclusive step: updated iteration count, resets PANOC cache, // sets f2_norm = f2_norm_plus etc @@ -885,18 +928,18 @@ where Ok(InnerProblemStatus::new(true, inner_exit_status)) // `true` means do continue the outer iterations } - fn compute_cost_at_solution(&mut self, u: &mut [f64]) -> Result { + fn compute_cost_at_solution(&mut self, u: &mut [T]) -> Result { /* WORK IN PROGRESS */ let alm_problem = &self.alm_problem; // Problem let alm_cache = &mut self.alm_cache; // ALM Cache let mut empty_vec = std::vec::Vec::new(); // Empty vector - let xi: &mut std::vec::Vec = alm_cache.xi.as_mut().unwrap_or(&mut empty_vec); - let mut __c: f64 = 0.0; + let xi: &mut std::vec::Vec = alm_cache.xi.as_mut().unwrap_or(&mut empty_vec); + let mut __c = T::zero(); if !xi.is_empty() { __c = xi[0]; - xi[0] = 0.0; + xi[0] = T::zero(); } - let mut cost_value: f64 = 0.0; + let mut cost_value = T::zero(); (alm_problem.parametric_cost)(u, xi, &mut cost_value)?; if !xi.is_empty() { xi[0] = __c; @@ -910,11 +953,14 @@ where /// Solve the specified ALM problem /// + /// The scalar type of `u` is the same generic floating-point type `T` used by + /// the optimizer, typically `f64` or `f32`. + /// /// - pub fn solve(&mut self, u: &mut [f64]) -> Result { + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { let mut num_outer_iterations = 0; // let tic = std::time::Instant::now(); - let tic = instant::Instant::now(); + let tic = web_time::Instant::now(); let mut exit_status = ExitStatus::Converged; self.alm_cache.reset(); // first, reset the cache self.alm_cache.available_time = self.max_duration; @@ -965,7 +1011,7 @@ where let c = if let Some(xi) = &self.alm_cache.xi { xi[0] } else { - 0.0 + T::zero() }; let cost = self.compute_cost_at_solution(u)?; @@ -979,12 +1025,11 @@ where .with_penalty(c) .with_cost(cost); if self.alm_problem.n1 > 0 { - let status = status.with_lagrange_multipliers( - self.alm_cache - .y_plus - .as_ref() - .expect("Although n1 > 0, there is no vector y (Lagrange multipliers)"), - ); + let status = status.with_lagrange_multipliers(self.alm_cache.y_plus.as_ref().ok_or( + SolverError::InvalidProblemState( + "missing Lagrange multipliers at the ALM solution", + ), + )?); Ok(status) } else { Ok(status) @@ -1006,21 +1051,24 @@ mod tests { FunctionCallResult, }; - fn make_dummy_alm_problem( - n1: usize, - n2: usize, - ) -> AlmProblem< - impl Fn(&[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - impl Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - impl Constraint, - impl Constraint, - impl Constraint, - > { + type DummyParametricGradient = fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult; + type DummyParametricCost = fn(&[f64], &[f64], &mut f64) -> FunctionCallResult; + type DummyMapping = MappingType; + type DummyConstraint = Ball2<'static>; + type DummyAlmProblem = AlmProblem< + DummyMapping, + DummyMapping, + DummyParametricGradient, + DummyParametricCost, + DummyConstraint, + DummyConstraint, + DummyConstraint, + >; + + fn make_dummy_alm_problem(n1: usize, n2: usize) -> DummyAlmProblem { // Main problem data - let psi = void_parameteric_cost; - let d_psi = void_parameteric_gradient; + let psi: DummyParametricCost = void_parameteric_cost; + let d_psi: DummyParametricGradient = void_parameteric_gradient; let bounds = Ball2::new(None, 10.0); // ALM-type data let f1: Option = if n1 == 0 { @@ -1064,7 +1112,7 @@ mod tests { // Test: with_initial_penalty let alm_optimizer = alm_optimizer.with_initial_penalty(7.0); - assert!(!alm_optimizer.alm_cache.xi.is_none()); + assert!(alm_optimizer.alm_cache.xi.is_some()); if let Some(xi) = &alm_optimizer.alm_cache.xi { unit_test_utils::assert_nearly_equal( 7.0, @@ -1129,7 +1177,7 @@ mod tests { .with_initial_penalty(25.0) .with_initial_lagrange_multipliers(&[2., 3., 4., 10.]); - alm_optimizer.project_on_set_y(); + alm_optimizer.project_on_set_y().unwrap(); if let Some(xi_after_proj) = &alm_optimizer.alm_cache.xi { println!("xi = {:#?}", xi_after_proj); let y_projected_correct = [ @@ -1282,7 +1330,7 @@ mod tests { .with_initial_inner_tolerance(1e-1) .with_inner_tolerance_update_factor(0.2); - alm_optimizer.update_inner_akkt_tolerance(); + alm_optimizer.update_inner_akkt_tolerance().unwrap(); unit_test_utils::assert_nearly_equal( 0.1, @@ -1305,7 +1353,7 @@ mod tests { ); for _i in 1..=5 { - alm_optimizer.update_inner_akkt_tolerance(); + alm_optimizer.update_inner_akkt_tolerance().unwrap(); } unit_test_utils::assert_nearly_equal( 2e-5, @@ -1411,20 +1459,20 @@ mod tests { // should not exit yet... assert!( - !alm_optimizer.is_exit_criterion_satisfied(), + !alm_optimizer.is_exit_criterion_satisfied().unwrap(), "exists right away" ); let alm_optimizer = alm_optimizer .with_initial_inner_tolerance(1e-3) .with_epsilon_tolerance(1e-3); - assert!(!alm_optimizer.is_exit_criterion_satisfied()); + assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap()); alm_optimizer.alm_cache.delta_y_norm_plus = 1e-3; - assert!(!alm_optimizer.is_exit_criterion_satisfied()); + assert!(!alm_optimizer.is_exit_criterion_satisfied().unwrap()); alm_optimizer.alm_cache.f2_norm_plus = 1e-3; - assert!(alm_optimizer.is_exit_criterion_satisfied()); + assert!(alm_optimizer.is_exit_criterion_satisfied().unwrap()); } #[test] diff --git a/src/alm/alm_optimizer_status.rs b/rust/src/alm/alm_optimizer_status.rs similarity index 81% rename from src/alm/alm_optimizer_status.rs rename to rust/src/alm/alm_optimizer_status.rs index 5c10e477..6805c030 100644 --- a/src/alm/alm_optimizer_status.rs +++ b/rust/src/alm/alm_optimizer_status.rs @@ -1,4 +1,5 @@ use crate::core::ExitStatus; +use num::Float; /// Solution statistics for `AlmOptimizer` /// @@ -6,8 +7,14 @@ use crate::core::ExitStatus; /// The idea is that only Optimization Engine can create optimizer /// `AlmOptimizerStatus` instances. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// #[derive(Debug)] -pub struct AlmOptimizerStatus { +pub struct AlmOptimizerStatus +where + T: Float, +{ /// Exit status exit_status: ExitStatus, /// Number of outer iterations @@ -18,23 +25,23 @@ pub struct AlmOptimizerStatus { /// inner solvers num_inner_iterations: usize, /// Norm of the fixed-point residual of the the problem - last_problem_norm_fpr: f64, + last_problem_norm_fpr: T, /// Lagrange multipliers vector - lagrange_multipliers: Option>, + lagrange_multipliers: Option>, /// Total solve time solve_time: std::time::Duration, /// Last value of penalty parameter - penalty: f64, + penalty: T, /// A measure of infeasibility of constraints F1(u; p) in C - delta_y_norm: f64, + delta_y_norm: T, /// Norm of F2 at the solution, which is a measure of infeasibility /// of constraints F2(u; p) = 0 - f2_norm: f64, + f2_norm: T, /// Value of cost function at optimal solution (optimal cost) - cost: f64, + cost: T, } -impl AlmOptimizerStatus { +impl AlmOptimizerStatus { /// Constructor for instances of `AlmOptimizerStatus` /// /// This method is only accessibly within this crate. @@ -60,13 +67,13 @@ impl AlmOptimizerStatus { exit_status, num_outer_iterations: 0, num_inner_iterations: 0, - last_problem_norm_fpr: -1.0, + last_problem_norm_fpr: -T::one(), lagrange_multipliers: None, solve_time: std::time::Duration::from_nanos(0), - penalty: 0.0, - delta_y_norm: 0.0, - f2_norm: 0.0, - cost: 0.0, + penalty: T::zero(), + delta_y_norm: T::zero(), + f2_norm: T::zero(), + cost: T::zero(), } } @@ -129,7 +136,7 @@ impl AlmOptimizerStatus { /// Does not panic; it is the responsibility of the caller to provide a vector of /// Lagrange multipliers of correct length /// - pub(crate) fn with_lagrange_multipliers(mut self, lagrange_multipliers: &[f64]) -> Self { + pub(crate) fn with_lagrange_multipliers(mut self, lagrange_multipliers: &[T]) -> Self { self.lagrange_multipliers = Some(vec![]); if let Some(y) = &mut self.lagrange_multipliers { y.extend_from_slice(lagrange_multipliers); @@ -144,9 +151,9 @@ impl AlmOptimizerStatus { /// /// The method panics if the provided penalty parameter is negative /// - pub(crate) fn with_penalty(mut self, penalty: f64) -> Self { + pub(crate) fn with_penalty(mut self, penalty: T) -> Self { assert!( - penalty >= 0.0, + penalty >= T::zero(), "the penalty parameter should not be negative" ); self.penalty = penalty; @@ -161,28 +168,31 @@ impl AlmOptimizerStatus { /// The method panics if the provided norm of the fixed-point residual is /// negative /// - pub(crate) fn with_last_problem_norm_fpr(mut self, last_problem_norm_fpr: f64) -> Self { + pub(crate) fn with_last_problem_norm_fpr(mut self, last_problem_norm_fpr: T) -> Self { assert!( - last_problem_norm_fpr >= 0.0, + last_problem_norm_fpr >= T::zero(), "last_problem_norm_fpr should not be negative" ); self.last_problem_norm_fpr = last_problem_norm_fpr; self } - pub(crate) fn with_delta_y_norm(mut self, delta_y_norm: f64) -> Self { - assert!(delta_y_norm >= 0.0, "delta_y_norm must be nonnegative"); + pub(crate) fn with_delta_y_norm(mut self, delta_y_norm: T) -> Self { + assert!( + delta_y_norm >= T::zero(), + "delta_y_norm must be nonnegative" + ); self.delta_y_norm = delta_y_norm; self } - pub(crate) fn with_f2_norm(mut self, f2_norm: f64) -> Self { - assert!(f2_norm >= 0.0, "f2_norm must be nonnegative"); + pub(crate) fn with_f2_norm(mut self, f2_norm: T) -> Self { + assert!(f2_norm >= T::zero(), "f2_norm must be nonnegative"); self.f2_norm = f2_norm; self } - pub(crate) fn with_cost(mut self, cost: f64) -> Self { + pub(crate) fn with_cost(mut self, cost: T) -> Self { self.cost = cost; self } @@ -192,17 +202,17 @@ impl AlmOptimizerStatus { // ------------------------------------------------- /// Update cost (to be used when the cost needs to be scaled as a result of preconditioning) - pub fn update_cost(&mut self, new_cost: f64) { + pub fn update_cost(&mut self, new_cost: T) { self.cost = new_cost; } /// Update ALM infeasibility - pub fn update_f1_infeasibility(&mut self, new_alm_infeasibility: f64) { + pub fn update_f1_infeasibility(&mut self, new_alm_infeasibility: T) { self.delta_y_norm = new_alm_infeasibility; } /// Update PM infeasibility - pub fn update_f2_norm(&mut self, new_pm_infeasibility: f64) { + pub fn update_f2_norm(&mut self, new_pm_infeasibility: T) { self.f2_norm = new_pm_infeasibility; } @@ -241,7 +251,7 @@ impl AlmOptimizerStatus { /// Vector of Lagrange multipliers at the solution /// - /// The method returns a reference to an `Option>` which contains + /// The method returns a reference to an `Option>` which contains /// the vector of Lagrange multipliers at the solution, or is `None` if /// the problem has no ALM-type constraints. /// @@ -249,7 +259,7 @@ impl AlmOptimizerStatus { /// /// Does not panic /// - pub fn lagrange_multipliers(&self) -> &Option> { + pub fn lagrange_multipliers(&self) -> &Option> { &self.lagrange_multipliers } @@ -259,7 +269,7 @@ impl AlmOptimizerStatus { /// /// Does not panic /// - pub fn last_problem_norm_fpr(&self) -> f64 { + pub fn last_problem_norm_fpr(&self) -> T { self.last_problem_norm_fpr } @@ -278,23 +288,23 @@ impl AlmOptimizerStatus { /// # Panics /// /// Does not panic - pub fn penalty(&self) -> f64 { + pub fn penalty(&self) -> T { self.penalty } /// Norm of Delta y divided by max{c, 1} - measure of infeasibility - pub fn delta_y_norm_over_c(&self) -> f64 { + pub fn delta_y_norm_over_c(&self) -> T { let c = self.penalty(); - self.delta_y_norm / if c < 1.0 { 1.0 } else { c } + self.delta_y_norm / if c < T::one() { T::one() } else { c } } /// Norm of F2(u) - measure of infeasibility of F2(u) = 0 - pub fn f2_norm(&self) -> f64 { + pub fn f2_norm(&self) -> T { self.f2_norm } /// Value of the cost function at the solution - pub fn cost(&self) -> f64 { + pub fn cost(&self) -> T { self.cost } } diff --git a/src/alm/alm_problem.rs b/rust/src/alm/alm_problem.rs similarity index 77% rename from src/alm/alm_problem.rs rename to rust/src/alm/alm_problem.rs index 9f28bd2e..98d24955 100644 --- a/src/alm/alm_problem.rs +++ b/rust/src/alm/alm_problem.rs @@ -1,4 +1,6 @@ use crate::{constraints::Constraint, FunctionCallResult}; +use num::Float; +use std::marker::PhantomData; /// Definition of optimization problem to be solved with `AlmOptimizer`. The optimization /// problem has the general form @@ -24,6 +26,9 @@ use crate::{constraints::Constraint, FunctionCallResult}; /// are mappings with smooth partial derivatives, and /// - $C\subseteq\mathbb{R}^{n_1}$ is a convex closed set on which we can easily compute projections. /// +/// The scalar type `T` is generic and is typically `f64` or `f32`. The default +/// is `f64`. +/// pub struct AlmProblem< MappingAlm, MappingPm, @@ -32,16 +37,18 @@ pub struct AlmProblem< ConstraintsType, AlmSetC, LagrangeSetY, + T = f64, > where + T: Float, // This is function F1: R^xn --> R^n1 (ALM) - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, // This is function F2: R^xn --> R^n2 (PM) - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: Constraint, - AlmSetC: Constraint, - LagrangeSetY: Constraint, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: Constraint, + AlmSetC: Constraint, + LagrangeSetY: Constraint, { // // NOTE: the reason why we need to define different set types (ConstraintsType, @@ -67,6 +74,13 @@ pub struct AlmProblem< pub(crate) n1: usize, /// number of PM-type parameters (range dim of F2) pub(crate) n2: usize, + /// This phantom data object is used because all other attributes + /// are not tied to the type T directly. T appears in some + /// trait bounds (e.g., MappingAlm, ParametricCostType, etc), but this + /// is not enough for the struct layout/type system. + /// Without this, Rust gives a bunch of errors. Movoer, this is a zero-size + /// object. + marker: PhantomData, } impl< @@ -77,6 +91,7 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > AlmProblem< MappingAlm, @@ -86,15 +101,17 @@ impl< ConstraintsType, AlmSetC, LagrangeSetY, + T, > where - MappingAlm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - MappingPm: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - ParametricGradientType: Fn(&[f64], &[f64], &mut [f64]) -> FunctionCallResult, - ParametricCostType: Fn(&[f64], &[f64], &mut f64) -> FunctionCallResult, - ConstraintsType: Constraint, - AlmSetC: Constraint, - LagrangeSetY: Constraint, + T: Float, + MappingAlm: Fn(&[T], &mut [T]) -> FunctionCallResult, + MappingPm: Fn(&[T], &mut [T]) -> FunctionCallResult, + ParametricGradientType: Fn(&[T], &[T], &mut [T]) -> FunctionCallResult, + ParametricCostType: Fn(&[T], &[T], &mut T) -> FunctionCallResult, + ConstraintsType: Constraint, + AlmSetC: Constraint, + LagrangeSetY: Constraint, { ///Constructs new instance of `AlmProblem` /// @@ -116,8 +133,13 @@ where /// /// Instance of `AlmProblem` /// + /// The scalar type `T` is inferred from the closures and constraint types. + /// /// # Example /// + /// This example uses `f64` for simplicity, but the same API also works with + /// `f32`. + /// /// /// ```rust /// use optimization_engine::{FunctionCallResult, alm::*, constraints::Ball2}; @@ -132,6 +154,8 @@ where /// ); /// ``` /// + #[allow(clippy::too_many_arguments)] + #[must_use] pub fn new( constraints: ConstraintsType, alm_set_c: Option, @@ -163,6 +187,7 @@ where mapping_f2, n1, n2, + marker: PhantomData, } } } diff --git a/src/alm/mod.rs b/rust/src/alm/mod.rs similarity index 65% rename from src/alm/mod.rs rename to rust/src/alm/mod.rs index 2221c2cd..c58358aa 100644 --- a/src/alm/mod.rs +++ b/rust/src/alm/mod.rs @@ -16,6 +16,10 @@ //! the iterative procedure, such as the solution time, number of iterations, //! measures of accuracy and more, in the form of an [`AlmOptimizerStatus`] //! +//! All public ALM types are generic over a scalar type `T`, which is typically +//! `f64` or `f32`. The default is `f64`, so existing code can often omit `T`. +//! The examples in this module use `f64` for brevity. +//! //! When using `AlmOptimizer`, the user is expected to provide a modified cost //! function, `psi` (see [`AlmOptimizer`] for details). This should not be a problem //! for users that use Optimization Engine via its Python or MATLAB interfaces. @@ -42,18 +46,22 @@ pub use alm_problem::AlmProblem; /// Type of mappings $F_1(u)$ and $F_2(u)$ /// +/// The scalar type `T` is a floating-point type, typically `f64` or `f32`. +/// /// Mappings $F_1$ and $F_2$ are computed by functions with signature /// /// ```ignore -/// fn mapping_f(&[f64], &mut [f64]) -> Result<(), crate::SolverError> +/// fn mapping_f(&[T], &mut [T]) -> Result<(), crate::SolverError> /// ``` -pub type MappingType = fn(&[f64], &mut [f64]) -> Result<(), crate::SolverError>; +pub type MappingType = fn(&[T], &mut [T]) -> Result<(), crate::SolverError>; /// Type of the Jacobian of mappings $F_1$ and $F_2$ /// +/// The scalar type `T` is a floating-point type, typically `f64` or `f32`. +/// /// These are mappings $(u, d) \mapsto JF_1(u)^\top d$, for given vectors $u\in\mathbb{R}$ /// and $d\in\mathbb{R}^{n_1}$ (similarly for $F_2$) -pub type JacobianMappingType = fn(&[f64], &[f64], &mut [f64]) -> Result<(), crate::SolverError>; +pub type JacobianMappingType = fn(&[T], &[T], &mut [T]) -> Result<(), crate::SolverError>; /// No mapping $F_1(u)$ or $F_2(u)$ is specified pub const NO_MAPPING: Option = None::; @@ -65,6 +73,31 @@ pub const NO_JACOBIAN_MAPPING: Option = None:: = None::; +/// Helper for the generic case where no mapping is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. +pub fn no_mapping() -> Option> { + None::> +} + +/// Helper for the generic case where no Jacobian mapping is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. +pub fn no_jacobian_mapping() -> Option> { + None::> +} + +/// Helper for the generic case where no set is provided. +/// +/// This is useful when the scalar type is not the default `f64`, for example +/// when using `f32`. +pub fn no_set() -> Option { + let _ = std::marker::PhantomData::; + None:: +} + /* ---------------------------------------------------------------------------- */ /* TESTS */ /* ---------------------------------------------------------------------------- */ diff --git a/src/alm/tests.rs b/rust/src/alm/tests.rs similarity index 72% rename from src/alm/tests.rs rename to rust/src/alm/tests.rs index 0fd1c2e7..08e5ba8f 100644 --- a/src/alm/tests.rs +++ b/rust/src/alm/tests.rs @@ -153,7 +153,9 @@ fn t_create_alm_optimizer() { .with_initial_lagrange_multipliers(&vec![5.0; n1]); let mut u = vec![0.0; nx]; - println!("result = {:?}", alm_optimizer.solve(&mut u)); + let result = alm_optimizer.solve(&mut u); + println!("result = {:?}", result); + assert!(result.is_ok()); } #[test] @@ -234,6 +236,84 @@ fn t_alm_only_penalty_method() { assert!(r.f2_norm() < 1e-6); } +#[test] +fn t_alm_only_penalty_method_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 0; + let n2 = 1; + let lbfgs_mem = 5; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let bounds = NoConstraints::new(); + + let f = |u: &[f32], cost: &mut f32| -> Result<(), SolverError> { + *cost = 0.5_f32 * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); + Ok(()) + }; + + let df = |u: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + grad.iter_mut() + .zip(u.iter()) + .for_each(|(grad_i, u_i)| *grad_i = *u_i + 1.0_f32); + Ok(()) + }; + + let f2 = |u: &[f32], res: &mut [f32]| -> Result<(), SolverError> { + res[0] = matrix_operations::norm2_squared(u) - 1.0_f32; + Ok(()) + }; + let jf2t = |u: &[f32], d: &[f32], res: &mut [f32]| -> Result<(), crate::SolverError> { + res.iter_mut() + .zip(u.iter()) + .for_each(|(res_i, u_i)| *res_i = *u_i * d[0]); + Ok(()) + }; + + let factory = AlmFactory::new( + f, + df, + no_mapping::(), + no_jacobian_mapping::(), + Some(f2), + Some(jf2t), + no_set::(), + n2, + ); + + let alm_problem = AlmProblem::new( + bounds, + no_set::(), + no_set::(), + |u: &[f32], xi: &[f32], cost: &mut f32| -> Result<(), SolverError> { + factory.psi(u, xi, cost) + }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> Result<(), SolverError> { + factory.d_psi(u, xi, grad) + }, + no_mapping::(), + Some(f2), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-5_f32) + .with_epsilon_tolerance(1e-4_f32) + .with_max_outer_iterations(20) + .with_max_inner_iterations(1000) + .with_initial_penalty(5000.0_f32) + .with_penalty_update_factor(2.2_f32); + + let mut u = vec![0.1_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.f2_norm() < 1e-4_f32); +} + #[test] fn t_alm_numeric_test_1() { let tolerance = 1e-8; @@ -312,6 +392,80 @@ fn t_alm_numeric_test_1() { ); } +#[test] +fn t_alm_numeric_test_1_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 2; + let n2 = 0; + let lbfgs_mem = 3; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let set_c = Ball2::new(None, 1.0_f32); + let bounds = Ball2::new(None, 10.0_f32); + let set_y = Ball2::new(None, 10_000.0_f32); + + let factory = AlmFactory::new( + mocks::f0::, + mocks::d_f0::, + Some(mocks::mapping_f1_affine::), + Some(mocks::mapping_f1_affine_jacobian_product::), + no_mapping::(), + no_jacobian_mapping::(), + Some(set_c), + n2, + ); + + let set_c_b = Ball2::new(None, 1.0_f32); + let alm_problem = AlmProblem::new( + bounds, + Some(set_c_b), + Some(set_y), + |u: &[f32], xi: &[f32], cost: &mut f32| -> FunctionCallResult { factory.psi(u, xi, cost) }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> FunctionCallResult { + factory.d_psi(u, xi, grad) + }, + Some(mocks::mapping_f1_affine::), + no_mapping::(), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-3_f32) + .with_max_outer_iterations(30) + .with_epsilon_tolerance(1e-4_f32) + .with_initial_inner_tolerance(1e-2_f32) + .with_inner_tolerance_update_factor(0.5_f32) + .with_initial_penalty(1.0_f32) + .with_penalty_update_factor(1.2_f32) + .with_sufficient_decrease_coefficient(0.1_f32) + .with_initial_lagrange_multipliers(&vec![5.0_f32; n1]); + + let mut u = vec![0.0_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.num_outer_iterations() > 0 && r.num_outer_iterations() <= 30); + assert!(r.last_problem_norm_fpr() < tolerance); + assert!(r.delta_y_norm_over_c() < 1e-3_f32); + + let mut f1u = vec![0.0_f32; n1]; + assert!(mocks::mapping_f1_affine(&u, &mut f1u).is_ok()); + let mut projection = f1u.clone(); + let set_c_check = Ball2::new(None, 1.0_f32); + set_c_check.project(&mut projection).unwrap(); + assert!((f1u[0] - projection[0]).abs() < 2e-3_f32); + assert!((f1u[1] - projection[1]).abs() < 2e-3_f32); + + let cost_actual = r.cost(); + let mut cost_expected = 0.0_f32; + assert!(mocks::f0(&u, &mut cost_expected).is_ok()); + assert!((cost_actual - cost_expected).abs() < 2e-3_f32); +} + fn mapping_f2(u: &[f64], res: &mut [f64]) -> FunctionCallResult { res[0] = u[0]; res[1] = u[1]; @@ -327,6 +481,25 @@ fn jac_mapping_f2_tr(_u: &[f64], d: &[f64], res: &mut [f64]) -> FunctionCallResu Ok(()) } +fn mapping_f2_generic(u: &[T], res: &mut [T]) -> FunctionCallResult { + res[0] = u[0]; + res[1] = u[1]; + res[2] = u[2] - u[0]; + res[3] = u[2] - u[0] - u[1]; + Ok(()) +} + +fn jac_mapping_f2_tr_generic( + _u: &[T], + d: &[T], + res: &mut [T], +) -> FunctionCallResult { + res[0] = d[0] - d[2] - d[3]; + res[1] = d[1] - d[3]; + res[2] = d[2] + d[3]; + Ok(()) +} + #[test] fn t_alm_numeric_test_2() { let tolerance = 1e-8; @@ -391,6 +564,71 @@ fn t_alm_numeric_test_2() { println!("y = {:#?}", r.lagrange_multipliers()); } +#[test] +fn t_alm_numeric_test_2_f32() { + let tolerance = 1e-4_f32; + let nx = 3; + let n1 = 2; + let n2 = 4; + let lbfgs_mem = 3; + let panoc_cache = PANOCCache::::new(nx, tolerance, lbfgs_mem); + let mut alm_cache = AlmCache::::new(panoc_cache, n1, n2); + + let set_c = Ball2::new(None, 1.0_f32); + let bounds = Ball2::new(None, 10.0_f32); + let set_y = Ball2::new(None, 10_000.0_f32); + + let factory = AlmFactory::new( + mocks::f0::, + mocks::d_f0::, + Some(mocks::mapping_f1_affine::), + Some(mocks::mapping_f1_affine_jacobian_product::), + Some(mapping_f2_generic::), + Some(jac_mapping_f2_tr_generic::), + Some(set_c), + n2, + ); + + let set_c_b = Ball2::new(None, 1.0_f32); + let alm_problem = AlmProblem::new( + bounds, + Some(set_c_b), + Some(set_y), + |u: &[f32], xi: &[f32], cost: &mut f32| -> FunctionCallResult { factory.psi(u, xi, cost) }, + |u: &[f32], xi: &[f32], grad: &mut [f32]| -> FunctionCallResult { + factory.d_psi(u, xi, grad) + }, + Some(mocks::mapping_f1_affine::), + Some(mapping_f2_generic::), + n1, + n2, + ); + + let mut alm_optimizer = AlmOptimizer::new(&mut alm_cache, alm_problem) + .with_delta_tolerance(1e-3_f32) + .with_epsilon_tolerance(1e-4_f32) + .with_initial_inner_tolerance(1e-3_f32); + + let mut u = vec![0.0_f32; nx]; + let solver_result = alm_optimizer.solve(&mut u); + assert!(solver_result.is_ok()); + let r = solver_result.unwrap(); + assert_eq!(ExitStatus::Converged, r.exit_status()); + assert!(r.num_outer_iterations() > 0 && r.num_outer_iterations() <= 10); + assert!(r.last_problem_norm_fpr() < tolerance); + + let mut f1u = vec![0.0_f32; n1]; + assert!(mocks::mapping_f1_affine(&u, &mut f1u).is_ok()); + let mut f1_proj = f1u.clone(); + Ball2::new(None, 1.0_f32).project(&mut f1_proj).unwrap(); + assert!((f1u[0] - f1_proj[0]).abs() < 2e-3_f32); + assert!((f1u[1] - f1_proj[1]).abs() < 2e-3_f32); + + let mut f2u = vec![0.0_f32; n2]; + assert!(mapping_f2_generic(&u, &mut f2u).is_ok()); + assert!(crate::matrix_operations::norm2(&f2u) < 1e-3_f32); +} + // Trait alias (type aliases are not stable yet, so the alternative is to use // the following trait definition, i.e., to "extend" Fn and implement it) // See https://bit.ly/2zJvd6g diff --git a/src/cholesky_factorizer.rs b/rust/src/cholesky_factorizer.rs similarity index 93% rename from src/cholesky_factorizer.rs rename to rust/src/cholesky_factorizer.rs index 12cad81c..0732e3fe 100644 --- a/src/cholesky_factorizer.rs +++ b/rust/src/cholesky_factorizer.rs @@ -243,7 +243,7 @@ mod tests { let expected_l = [2.0, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; unit_test_utils::nearly_equal_array( &expected_l, - &factorizer.cholesky_factor(), + factorizer.cholesky_factor(), 1e-10, 1e-12, ); @@ -260,6 +260,21 @@ mod tests { unit_test_utils::nearly_equal_array(&expected_sol, &x, 1e-10, 1e-12); } + #[test] + fn t_cholesky_f32() { + let a = vec![4.0_f32, 12.0, -16.0, 12.0, 37.0, -43.0, -16.0, -43.0, 98.0]; + let mut factorizer = CholeskyFactorizer::new(3); + factorizer.factorize(&a).unwrap(); + + let expected_l = [2.0_f32, 0.0, 0.0, 6.0, 1.0, 0.0, -8.0, 5.0, 3.0]; + unit_test_utils::nearly_equal_array(&expected_l, factorizer.cholesky_factor(), 1e-5, 1e-6); + + let rhs = vec![-5.0_f32, 2.0, -3.0]; + let x = factorizer.solve(&rhs).unwrap(); + let expected_sol = [-280.25_f32, 77.0, -12.0]; + unit_test_utils::nearly_equal_array(&expected_sol, &x, 1e-4, 1e-5); + } + #[test] fn t_cholesky_not_square_matrix() { let a = vec![1.0_f64, 2., 7., 5., 9.]; diff --git a/rust/src/constraints/affine_space.rs b/rust/src/constraints/affine_space.rs new file mode 100644 index 00000000..2bb2fec8 --- /dev/null +++ b/rust/src/constraints/affine_space.rs @@ -0,0 +1,182 @@ +use super::Constraint; +use crate::{ + matrix_operations, CholeskyError, CholeskyFactorizer, FunctionCallResult, SolverError, +}; + +use ndarray::{ArrayView1, ArrayView2, LinalgScalar}; +use num::Float; + +#[derive(Clone)] +/// An affine space here is defined as the set of solutions of a linear equation, $Ax = b$, +/// that is, $E=\\{x\in\mathbb{R}^n: Ax = b\\}$, which is an affine space. It is assumed that +/// the matrix $AA^\intercal$ is full-rank. +pub struct AffineSpace { + a_mat: Vec, + b_vec: Vec, + factorizer: CholeskyFactorizer, + n_rows: usize, + n_cols: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// Errors that can arise when constructing an [`AffineSpace`]. +pub enum AffineSpaceError { + /// The vector `b` is empty. + EmptyB, + /// The dimensions of `A` and `b` are incompatible. + IncompatibleDimensions, + /// The matrix `AA^T` is not positive definite, which typically means + /// that `A` does not have full row rank. + NotFullRowRank, +} + +impl AffineSpace +where + T: Float + LinalgScalar + 'static, +{ + /// Construct a new affine space given the matrix $A\in\mathbb{R}^{m\times n}$ and + /// the vector $b\in\mathbb{R}^m$ + /// + /// ## Arguments + /// + /// - `a`: matrix $A$, row-wise data + /// - `b`: vector $b$ + /// + /// ## Returns + /// New Affine Space structure + /// + /// ## Panics + /// + /// Panics if: + /// + /// - `b` is empty, + /// - `A` and `b` have incompatible dimensions, + /// - `A` does not have full row rank. + /// + /// Use [`AffineSpace::try_new`] if you want to handle these conditions + /// without panicking. + /// + pub fn new(a: Vec, b: Vec) -> Self { + Self::try_new(a, b).expect("invalid affine space data") + } + + /// Construct a new affine space given the matrix $A\in\mathbb{R}^{m\times n}$ + /// and the vector $b\in\mathbb{R}^m$. + /// + /// ## Arguments + /// + /// - `a`: matrix $A$, row-wise data + /// - `b`: vector $b$ + /// + /// ## Returns + /// + /// Returns a new [`AffineSpace`] on success, or an [`AffineSpaceError`] if + /// the provided data are invalid. + /// + /// ## Example + /// + /// ```rust + /// use optimization_engine::constraints::{AffineSpace, Constraint}; + /// + /// let a = vec![1.0, 1.0, 1.0, -1.0]; + /// let b = vec![1.0, 0.0]; + /// let affine_space = AffineSpace::try_new(a, b).unwrap(); + /// + /// let mut x = [2.0, 2.0]; + /// affine_space.project(&mut x).unwrap(); + /// ``` + pub fn try_new(a: Vec, b: Vec) -> Result { + let n_rows = b.len(); + let n_elements_a = a.len(); + if n_rows == 0 { + return Err(AffineSpaceError::EmptyB); + } + if !n_elements_a.is_multiple_of(n_rows) { + return Err(AffineSpaceError::IncompatibleDimensions); + } + let n_cols = n_elements_a / n_rows; + let aat = matrix_operations::mul_a_at(&a, n_rows, n_cols) + .map_err(|_| AffineSpaceError::IncompatibleDimensions)?; + let mut factorizer = CholeskyFactorizer::new(n_rows); + factorizer.factorize(&aat).map_err(|err| match err { + CholeskyError::NotPositiveDefinite => AffineSpaceError::NotFullRowRank, + CholeskyError::DimensionMismatch | CholeskyError::NotFactorized => { + AffineSpaceError::IncompatibleDimensions + } + })?; + Ok(AffineSpace { + a_mat: a, + b_vec: b, + factorizer, + n_rows, + n_cols, + }) + } +} + +impl Constraint for AffineSpace +where + T: Float + LinalgScalar + 'static, +{ + /// Projection onto the set $E = \\{x: Ax = b\\}$, which is computed by + /// $$P_E(x) = x - A^\intercal z(x),$$ + /// where $z$ is the solution of the linear system + /// $$(AA^\intercal)z = Ax - b,$$ + /// which has a unique solution provided $A$ has full row rank. The linear system + /// is solved by computing the Cholesky factorization of $AA^\intercal$, which is + /// done using `CholeskyFactorizer` + /// + /// ## Arguments + /// + /// - `x`: The given vector $x$ is updated with the projection on the set + /// + /// ## Example + /// + /// Consider the set $X = \\{x \in \mathbb{R}^4 :Ax = b\\}$, with $A\in\mathbb{R}^{3\times 4}$ + /// being the matrix + /// $$A = \begin{bmatrix}0.5 & 0.1& 0.2& -0.3\\\\ -0.6& 0.3& 0 & 0.5 \\\\ 1.0& 0.1& -1& -0.4\end{bmatrix},$$ + /// and $b$ being the vector + /// $$b = \begin{bmatrix}1 \\\\ 2 \\\\ -0.5\end{bmatrix}.$$ + /// + /// ```rust + /// use optimization_engine::constraints::*; + /// + /// let a = vec![0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4,]; + /// let b = vec![1., 2., -0.5]; + /// let affine_set = AffineSpace::new(a, b); + /// let mut x = [1., -2., -0.3, 0.5]; + /// affine_set.project(&mut x).unwrap(); + /// ``` + /// + /// The result is stored in `x` and it can be verified that $Ax = b$. + fn project(&self, x: &mut [T]) -> FunctionCallResult { + let n = self.n_cols; + assert!(x.len() == n, "x has wrong dimension"); + + // Step 1: Compute e = Ax - b + let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat).map_err(|_| { + SolverError::InvalidProblemState("failed to construct the affine-space matrix view") + })?; + let x_view = ArrayView1::from(&x[..]); + let b = ArrayView1::from(&self.b_vec[..]); + let e = a.dot(&x_view) - b; + let e_slice: &[T] = e.as_slice().ok_or(SolverError::InvalidProblemState( + "affine-space residual vector is not stored contiguously", + ))?; + + // Step 2: Solve AA' z = e and compute z + let z = self.factorizer.solve(e_slice)?; + + // Step 3: Compute x = x - A'z + let at_z = a.t().dot(&ArrayView1::from(&z[..])); + for (xi, corr) in x.iter_mut().zip(at_z.iter()) { + *xi = *xi - *corr; + } + Ok(()) + } + + /// Affine sets are convex. + fn is_convex(&self) -> bool { + true + } +} diff --git a/rust/src/constraints/ball1.rs b/rust/src/constraints/ball1.rs new file mode 100644 index 00000000..0039b268 --- /dev/null +++ b/rust/src/constraints/ball1.rs @@ -0,0 +1,86 @@ +use super::Constraint; +use super::Simplex; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; + +#[derive(Copy, Clone)] +/// A norm-1 ball, that is, a set given by $B_1^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert_1 \leq r\\}$ +/// or a ball-1 centered at a point $x_c$, that is, $B_1^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert_1 \leq r\\}$ +pub struct Ball1<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, + simplex: Simplex, +} + +impl<'a, T: Float> Ball1<'a, T> { + /// Construct a new ball-1 with given center and radius. + /// If no `center` is given, then it is assumed to be in the origin + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball1, Constraint}; + /// + /// let ball = Ball1::new(None, 1.0); + /// let mut x = [2.0, -0.5]; + /// ball.project(&mut x).unwrap(); + /// ``` + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); + let simplex = Simplex::new(radius); + Ball1 { + center, + radius, + simplex, + } + } + + fn project_on_ball1_centered_at_origin(&self, x: &mut [T]) -> FunctionCallResult + where + T: Sum, + { + if crate::matrix_operations::norm1(x) > self.radius { + // u = |x| (copied) + let mut u = vec![T::zero(); x.len()]; + u.iter_mut() + .zip(x.iter()) + .for_each(|(ui, &xi)| *ui = xi.abs()); + // u = P_simplex(u) + self.simplex.project(&mut u)?; + x.iter_mut() + .zip(u.iter()) + .for_each(|(xi, &ui)| *xi = xi.signum() * ui); + } + Ok(()) + } +} + +impl<'a, T> Constraint for Ball1<'a, T> +where + T: Float + Sum, +{ + fn project(&self, x: &mut [T]) -> FunctionCallResult { + if let Some(center) = &self.center { + assert_eq!( + x.len(), + center.len(), + "x and xc have incompatible dimensions" + ); + x.iter_mut() + .zip(center.iter()) + .for_each(|(xi, &ci)| *xi = *xi - ci); + self.project_on_ball1_centered_at_origin(x)?; + x.iter_mut() + .zip(center.iter()) + .for_each(|(xi, &ci)| *xi = *xi + ci); + } else { + self.project_on_ball1_centered_at_origin(x)?; + } + Ok(()) + } + + fn is_convex(&self) -> bool { + true + } +} diff --git a/src/constraints/ball2.rs b/rust/src/constraints/ball2.rs similarity index 60% rename from src/constraints/ball2.rs rename to rust/src/constraints/ball2.rs index c4475cde..1c0672c2 100644 --- a/src/constraints/ball2.rs +++ b/rust/src/constraints/ball2.rs @@ -1,35 +1,51 @@ use super::Constraint; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; #[derive(Copy, Clone)] /// A Euclidean ball, that is, a set given by $B_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert \leq r\\}$ /// or a Euclidean ball centered at a point $x_c$, that is, $B_2^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert \leq r\\}$ -pub struct Ball2<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct Ball2<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> Ball2<'a> { +impl<'a, T: Float> Ball2<'a, T> { /// Construct a new Euclidean ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball2, Constraint}; + /// + /// let ball = Ball2::new(None, 1.0); + /// let mut x = [2.0, 0.0]; + /// ball.project(&mut x).unwrap(); + /// ``` + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); Ball2 { center, radius } } } -impl<'a> Constraint for Ball2<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T> Constraint for Ball2<'a, T> +where + T: Float + Sum, +{ + fn project(&self, x: &mut [T]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), center.len(), "x and xc have incompatible dimensions" ); - let mut norm_difference = 0.0; + let mut norm_difference = T::zero(); x.iter().zip(center.iter()).for_each(|(a, b)| { let diff_ = *a - *b; - norm_difference += diff_ * diff_ + norm_difference = norm_difference + diff_ * diff_ }); norm_difference = norm_difference.sqrt(); @@ -43,9 +59,10 @@ impl<'a> Constraint for Ball2<'a> { let norm_x = crate::matrix_operations::norm2(x); if norm_x > self.radius { let norm_over_radius = norm_x / self.radius; - x.iter_mut().for_each(|x_| *x_ /= norm_over_radius); + x.iter_mut().for_each(|x_| *x_ = *x_ / norm_over_radius); } } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/ballinf.rs b/rust/src/constraints/ballinf.rs similarity index 72% rename from src/constraints/ballinf.rs rename to rust/src/constraints/ballinf.rs index 8b87c688..6c4eb1f3 100644 --- a/src/constraints/ballinf.rs +++ b/rust/src/constraints/ballinf.rs @@ -1,26 +1,37 @@ use super::Constraint; +use crate::FunctionCallResult; +use num::Float; #[derive(Copy, Clone)] /// An infinity ball defined as $B_\infty^r = \\{x\in\mathbb{R}^n {}:{} \Vert{}x{}\Vert_{\infty} \leq r\\}$, /// where $\Vert{}\cdot{}\Vert_{\infty}$ is the infinity norm. The infinity ball centered at a point /// $x_c$ is defined as $B_\infty^{x_c,r} = \\{x\in\mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert_{\infty} \leq r\\}$. /// -pub struct BallInf<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct BallInf<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> BallInf<'a> { +impl<'a, T: Float> BallInf<'a, T> { /// Construct a new infinity-norm ball with given center and radius /// If no `center` is given, then it is assumed to be in the origin - /// - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{BallInf, Constraint}; + /// + /// let ball = BallInf::new(None, 1.0); + /// let mut x = [2.0, -0.2, -3.0]; + /// ball.project(&mut x).unwrap(); + /// ``` + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); BallInf { center, radius } } } -impl<'a> Constraint for BallInf<'a> { +impl<'a, T: Float> Constraint for BallInf<'a, T> { /// Computes the projection of a given vector `x` on the current infinity ball. /// /// @@ -42,7 +53,7 @@ impl<'a> Constraint for BallInf<'a> { /// /// for all $i=1,\ldots, n$. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -52,12 +63,13 @@ impl<'a> Constraint for BallInf<'a> { x.iter_mut() .zip(center.iter()) .filter(|(&mut xi, &ci)| (xi - ci).abs() > self.radius) - .for_each(|(xi, ci)| *xi = ci + (*xi - ci).signum() * self.radius); + .for_each(|(xi, ci)| *xi = *ci + (*xi - *ci).signum() * self.radius); } else { x.iter_mut() .filter(|xi| xi.abs() > self.radius) .for_each(|xi| *xi = xi.signum() * self.radius); } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/ballp.rs b/rust/src/constraints/ballp.rs similarity index 75% rename from src/constraints/ballp.rs rename to rust/src/constraints/ballp.rs index 3a0d893b..5bd375c2 100644 --- a/src/constraints/ballp.rs +++ b/rust/src/constraints/ballp.rs @@ -1,4 +1,8 @@ +use crate::numeric::cast; + use super::Constraint; +use crate::{FunctionCallResult, SolverError}; +use num::Float; #[derive(Copy, Clone)] /// An $\\ell_p$ ball, that is, @@ -64,7 +68,7 @@ use super::Constraint; /// /// let ball = BallP::new(None, 1.0, 1.5, 1e-10, 100); /// let mut x = vec![3.0, -1.0, 2.0]; -/// ball.project(&mut x); +/// ball.project(&mut x).unwrap(); /// ``` /// /// Project onto a translated \(\ell_p\)-ball: @@ -75,7 +79,7 @@ use super::Constraint; /// let center = vec![1.0, 1.0, 1.0]; /// let ball = BallP::new(Some(¢er), 2.0, 3.0, 1e-10, 100); /// let mut x = vec![4.0, -1.0, 2.0]; -/// ball.project(&mut x); +/// ball.project(&mut x).unwrap(); /// ``` /// /// # Notes @@ -89,33 +93,33 @@ use super::Constraint; /// in [`Ball2`](crate::constraints::Ball2) is more efficient /// - The quality and speed of the computation depend on the chosen numerical /// tolerance and iteration limit. -pub struct BallP<'a> { +pub struct BallP<'a, T = f64> { /// Optional center of the ball. /// /// If `None`, the ball is centered at the origin. /// If `Some(center)`, the ball is centered at `center`. - center: Option<&'a [f64]>, + center: Option<&'a [T]>, /// Radius of the ball. /// /// Must be strictly positive. - radius: f64, + radius: T, /// Exponent of the norm. /// /// Must satisfy `p > 1.0` and be finite. - p: f64, + p: T, /// Numerical tolerance used by the outer bisection on the Lagrange /// multiplier and by the inner Newton solver. - tolerance: f64, + tolerance: T, /// Maximum number of iterations used by the outer bisection and /// the inner Newton solver. max_iter: usize, } -impl<'a> BallP<'a> { +impl<'a, T: Float> BallP<'a, T> { /// Construct a new l_p ball with given center, radius, and exponent. /// /// - `center`: if `None`, the ball is centered at the origin @@ -123,16 +127,10 @@ impl<'a> BallP<'a> { /// - `p`: norm exponent, must satisfy `p > 1.0` and be finite /// - `tolerance`: tolerance for the numerical solvers /// - `max_iter`: maximum number of iterations for the numerical solvers - pub fn new( - center: Option<&'a [f64]>, - radius: f64, - p: f64, - tolerance: f64, - max_iter: usize, - ) -> Self { - assert!(radius > 0.0); - assert!(p > 1.0 && p.is_finite()); - assert!(tolerance > 0.0); + pub fn new(center: Option<&'a [T]>, radius: T, p: T, tolerance: T, max_iter: usize) -> Self { + assert!(radius > T::zero()); + assert!(p > T::one() && p.is_finite()); + assert!(tolerance > T::zero()); assert!(max_iter > 0); BallP { @@ -150,14 +148,14 @@ impl<'a> BallP<'a> { /// The $p$-norm of a vector $x\in \mathbb{R}^n$ is given by /// $$\Vert x \Vert_p = \left(\sum_{i=1}^{n} |x_i|^p\right)^{1/p},$$ /// for $p > 1$. - fn lp_norm(&self, x: &[f64]) -> f64 { + fn lp_norm(&self, x: &[T]) -> T { x.iter() .map(|xi| xi.abs().powf(self.p)) - .sum::() - .powf(1.0 / self.p) + .fold(T::zero(), |sum, xi| sum + xi) + .powf(T::one() / self.p) } - fn project_lp_ball(&self, x: &mut [f64]) { + fn project_lp_ball(&self, x: &mut [T]) -> FunctionCallResult { let p = self.p; let r = self.radius; let tol = self.tolerance; @@ -165,35 +163,37 @@ impl<'a> BallP<'a> { let current_norm = self.lp_norm(x); if current_norm <= r { - return; + return Ok(()); } - let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect(); + let abs_x: Vec = x.iter().map(|xi| xi.abs()).collect(); let target = r.powf(p); - let radius_error = |lambda: f64| -> f64 { + let radius_error = |lambda: T| -> T { abs_x .iter() .map(|&a| { let u = Self::solve_coordinate_newton(a, lambda, p, tol, max_iter); u.powf(p) }) - .sum::() + .fold(T::zero(), |sum, ui| sum + ui) - target }; - let mut lambda_lo = 0.0_f64; - let mut lambda_hi = 1.0_f64; + let mut lambda_lo = T::zero(); + let mut lambda_hi = T::one(); - while radius_error(lambda_hi) > 0.0 { - lambda_hi *= 2.0; - if lambda_hi > 1e20 { - panic!("Failed to bracket the Lagrange multiplier"); + while radius_error(lambda_hi) > T::zero() { + lambda_hi = lambda_hi * cast::(2.0); + if lambda_hi > cast::(1e20) { + return Err(SolverError::ProjectionFailed( + "failed to bracket the Lagrange multiplier", + )); } } for _ in 0..max_iter { - let lambda_mid = 0.5 * (lambda_lo + lambda_hi); + let lambda_mid = cast::(0.5) * (lambda_lo + lambda_hi); let err = radius_error(lambda_mid); if err.abs() <= tol { @@ -202,19 +202,20 @@ impl<'a> BallP<'a> { break; } - if err > 0.0 { + if err > T::zero() { lambda_lo = lambda_mid; } else { lambda_hi = lambda_mid; } } - let lambda_star = 0.5 * (lambda_lo + lambda_hi); + let lambda_star = cast::(0.5) * (lambda_lo + lambda_hi); x.iter_mut().zip(abs_x.iter()).for_each(|(xi, &a)| { let u = Self::solve_coordinate_newton(a, lambda_star, p, tol, max_iter); *xi = xi.signum() * u; }); + Ok(()) } /// Solve for u >= 0 the equation u + lambda * p * u^(p-1) = a @@ -222,56 +223,56 @@ impl<'a> BallP<'a> { /// /// The solution always belongs to [0, a], so Newton is combined with /// bracketing and a bisection fallback. - fn solve_coordinate_newton(a: f64, lambda: f64, p: f64, tol: f64, max_iter: usize) -> f64 { - if a == 0.0 { - return 0.0; + fn solve_coordinate_newton(a: T, lambda: T, p: T, tol: T, max_iter: usize) -> T { + if a == T::zero() { + return T::zero(); } - if lambda == 0.0 { + if lambda == T::zero() { return a; } - let mut lo = 0.0_f64; + let mut lo = T::zero(); let mut hi = a; // Heuristic initial guess: // exact when p = 2, and usually in the right scale for general p. - let mut u = (a / (1.0 + lambda * p)).clamp(lo, hi); + let mut u = (a / (T::one() + lambda * p)).clamp(lo, hi); for _ in 0..max_iter { - let upm1 = u.powf(p - 1.0); + let upm1 = u.powf(p - T::one()); let f = u + lambda * p * upm1 - a; if f.abs() <= tol { return u; } - if f > 0.0 { + if f > T::zero() { hi = u; } else { lo = u; } - let df = 1.0 + lambda * p * (p - 1.0) * u.powf(p - 2.0); + let df = T::one() + lambda * p * (p - T::one()) * u.powf(p - cast::(2.0)); let mut candidate = u - f / df; if !candidate.is_finite() || candidate <= lo || candidate >= hi { - candidate = 0.5 * (lo + hi); + candidate = cast::(0.5) * (lo + hi); } - if (candidate - u).abs() <= tol * (1.0 + u.abs()) { + if (candidate - u).abs() <= tol * (T::one() + u.abs()) { return candidate; } u = candidate; } - 0.5 * (lo + hi) + cast::(0.5) * (lo + hi) } } -impl<'a> Constraint for BallP<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T: Float> Constraint for BallP<'a, T> { + fn project(&self, x: &mut [T]) -> FunctionCallResult { if let Some(center) = &self.center { assert_eq!( x.len(), @@ -279,20 +280,21 @@ impl<'a> Constraint for BallP<'a> { "x and xc have incompatible dimensions" ); - let mut shifted = vec![0.0; x.len()]; + let mut shifted = vec![T::zero(); x.len()]; shifted .iter_mut() .zip(x.iter().zip(center.iter())) .for_each(|(s, (xi, ci))| *s = *xi - *ci); - self.project_lp_ball(&mut shifted); + self.project_lp_ball(&mut shifted)?; x.iter_mut() .zip(shifted.iter().zip(center.iter())) .for_each(|(xi, (si, ci))| *xi = *ci + *si); } else { - self.project_lp_ball(x); + self.project_lp_ball(x)?; } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/cartesian_product.rs b/rust/src/constraints/cartesian_product.rs similarity index 75% rename from src/constraints/cartesian_product.rs rename to rust/src/constraints/cartesian_product.rs index edf15b5f..bf878ac8 100644 --- a/src/constraints/cartesian_product.rs +++ b/rust/src/constraints/cartesian_product.rs @@ -1,4 +1,5 @@ use super::Constraint; +use crate::FunctionCallResult; /// Cartesian product of constraints. /// @@ -21,13 +22,24 @@ use super::Constraint; /// The constraint $x \in C$ is interpreted as $x_i \in C_i$ /// for all $i=0,\ldots, n-1$. /// +/// # Indexing convention +/// +/// In Rust, Cartesian products are defined using cumulative lengths +/// (equivalently, exclusive end indices). +/// +/// For example, `.add_constraint(2, c0).add_constraint(5, c1)` means that +/// `c0` acts on `x[0..2]` and `c1` acts on `x[2..5]`. +/// +/// This differs from the Python API, which uses inclusive last indices such +/// as `[1, 4]` for the same two segments. +/// #[derive(Default)] -pub struct CartesianProduct<'a> { +pub struct CartesianProduct<'a, T = f64> { idx: Vec, - constraints: Vec>, + constraints: Vec + 'a>>, } -impl<'a> CartesianProduct<'a> { +impl<'a, T> CartesianProduct<'a, T> { /// Construct a new Cartesian product of constraints. /// /// # Note @@ -36,6 +48,21 @@ impl<'a> CartesianProduct<'a> { /// when possible (provided you have an estimate of the number of sets /// your Cartesian product will consist of). /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Ball2, CartesianProduct, Constraint, Rectangle}; + /// + /// let xmin = [-1.0, -1.0]; + /// let xmax = [1.0, 1.0]; + /// let cartesian = CartesianProduct::new() + /// .add_constraint(2, Rectangle::new(Some(&xmin), Some(&xmax))) + /// .add_constraint(4, Ball2::new(None, 1.0)); + /// + /// let mut x = [3.0, -2.0, 2.0, 0.0]; + /// cartesian.project(&mut x).unwrap(); + /// ``` + /// pub fn new() -> Self { CartesianProduct { idx: Vec::new(), @@ -75,7 +102,8 @@ impl<'a> CartesianProduct<'a> { /// /// # Arguments /// - /// - `ni`: total length of the vector `(x_0, \ldots, x_i)` (see example below) + /// - `ni`: total length of the vector `(x_0, \ldots, x_i)` (that is, the + /// exclusive end index of the `i`-th segment; see example below) /// - `constraint`: constraint to add; it must implement the trait `Constraint` /// /// @@ -123,7 +151,8 @@ impl<'a> CartesianProduct<'a> { /// ``` /// The method will panic if any of the associated projections panics. /// - pub fn add_constraint(mut self, ni: usize, constraint: impl Constraint + 'a) -> Self { + #[must_use] + pub fn add_constraint(mut self, ni: usize, constraint: impl Constraint + 'a) -> Self { assert!( self.dimension() < ni, "provided index is smaller than or equal to previous index, or zero" @@ -134,7 +163,7 @@ impl<'a> CartesianProduct<'a> { } } -impl<'a> Constraint for CartesianProduct<'a> { +impl<'a, T> Constraint for CartesianProduct<'a, T> { /// Project onto the Cartesian product of constraints. /// /// The given vector `x` is updated with the projection on the set @@ -143,16 +172,14 @@ impl<'a> Constraint for CartesianProduct<'a> { /// /// The method will panic if the dimension of `x` is not equal to the /// dimension of the Cartesian product (see `dimension()`) - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { assert!(x.len() == self.dimension(), "x has wrong size"); let mut j = 0; - self.idx - .iter() - .zip(self.constraints.iter()) - .for_each(|(&i, c)| { - c.project(&mut x[j..i]); - j = i; - }); + for (&i, c) in self.idx.iter().zip(self.constraints.iter()) { + c.project(&mut x[j..i])?; + j = i; + } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/epigraph_squared_norm.rs b/rust/src/constraints/epigraph_squared_norm.rs similarity index 62% rename from src/constraints/epigraph_squared_norm.rs rename to rust/src/constraints/epigraph_squared_norm.rs index f91617c8..9242cb34 100644 --- a/src/constraints/epigraph_squared_norm.rs +++ b/rust/src/constraints/epigraph_squared_norm.rs @@ -1,6 +1,10 @@ -use crate::matrix_operations; +use crate::{matrix_operations, numeric::cast}; +use crate::{FunctionCallResult, SolverError}; use super::Constraint; +use num::Float; +use roots::FloatType; +use std::iter::Sum; #[derive(Copy, Clone, Default)] /// The epigraph of the squared Euclidean norm, that is, @@ -16,13 +20,26 @@ impl EpigraphSquaredNorm { /// Create a new instance of the epigraph of the squared norm. /// /// Note that you do not need to specify the dimension. + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, EpigraphSquaredNorm}; + /// + /// let epi = EpigraphSquaredNorm::new(); + /// let mut x = [1.0, 2.0, 1.0]; + /// epi.project(&mut x).unwrap(); + /// ``` #[must_use] pub fn new() -> Self { EpigraphSquaredNorm {} } } -impl Constraint for EpigraphSquaredNorm { +impl Constraint for EpigraphSquaredNorm +where + T: Float + FloatType + Sum, +{ /// Project on the epigraph of the squared Euclidean norm. /// /// Let the input be represented as $(z,t)$, where `z` is the vector formed @@ -46,7 +63,10 @@ impl Constraint for EpigraphSquaredNorm { /// /// Panics if: /// - /// - `x.len() < 2`, + /// - `x.len() < 2`. + /// + /// Returns an error if: + /// /// - no admissible real root is found, /// - the Newton derivative becomes too small, /// - the final scaling factor is numerically singular. @@ -61,9 +81,9 @@ impl Constraint for EpigraphSquaredNorm { /// // Here, z = [1., 2., 3.] and t = 4. /// let mut x = [1., 2., 3., 4.]; /// - /// epi.project(&mut x); + /// epi.project(&mut x).unwrap(); /// ``` - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { assert!( x.len() >= 2, "EpigraphSquaredNorm::project requires x.len() >= 2" @@ -76,74 +96,78 @@ impl Constraint for EpigraphSquaredNorm { // Already feasible if norm_z_sq <= t { - return; + return Ok(()); } // Cubic: // 4 r^3 + 4 theta r^2 + theta^2 r - ||z||^2 = 0 - let theta = 1.0 - 2.0 * t; - let a3 = 4.0; - let a2 = 4.0 * theta; + let theta = cast::(1.0) - cast::(2.0) * t; + let a3 = cast::(4.0); + let a2 = cast::(4.0) * theta; let a1 = theta * theta; let a0 = -norm_z_sq; let cubic_poly_roots = roots::find_roots_cubic(a3, a2, a1, a0); - let root_tol = 1e-6; - let mut right_root: Option = None; + let root_tol = cast::(10.0) * num::Float::sqrt(T::epsilon()); + let mut right_root: Option = None; // Pick the first admissible real root for &ri in cubic_poly_roots.as_ref().iter() { - let denom = 1.0 + 2.0 * (ri - t); + let denom = cast::(1.0) + cast::(2.0) * (ri - t); // We need a valid scaling and consistency with ||z_proj||^2 = ri - if denom > 0.0 { + if denom > cast::(0.0) { let candidate_norm_sq = norm_z_sq / (denom * denom); - if (candidate_norm_sq - ri).abs() <= root_tol { + if num::Float::abs(candidate_norm_sq - ri) <= root_tol { right_root = Some(ri); break; } } } - let mut zsol = - right_root.expect("EpigraphSquaredNorm::project: no admissible real root found"); + let mut zsol = right_root.ok_or(SolverError::ProjectionFailed( + "no admissible real root found for the cubic projection equation", + ))?; // Newton refinement let newton_max_iters: usize = 5; - let newton_eps = 1e-14; + let newton_eps = cast::(10.0) * T::epsilon(); for _ in 0..newton_max_iters { let zsol_sq = zsol * zsol; let zsol_cb = zsol_sq * zsol; let p_z = a3 * zsol_cb + a2 * zsol_sq + a1 * zsol + a0; - if p_z.abs() <= newton_eps { + if num::Float::abs(p_z) <= newton_eps { break; } - let dp_z = 3.0 * a3 * zsol_sq + 2.0 * a2 * zsol + a1; - assert!( - dp_z.abs() > 1e-15, - "EpigraphSquaredNorm::project: Newton derivative too small" - ); + let dp_z = cast::(3.0) * a3 * zsol_sq + cast::(2.0) * a2 * zsol + a1; + if num::Float::abs(dp_z) <= cast::(1e-15) { + return Err(SolverError::ProjectionFailed( + "Newton refinement derivative is too small", + )); + } - zsol -= p_z / dp_z; + zsol = zsol - p_z / dp_z; } let right_root = zsol; - let scaling = 1.0 + 2.0 * (right_root - t); + let scaling = cast::(1.0) + cast::(2.0) * (right_root - t); - assert!( - scaling.abs() > 1e-15, - "EpigraphSquaredNorm::project: scaling factor too small" - ); + if num::Float::abs(scaling) <= cast::(1e-15) { + return Err(SolverError::ProjectionFailed( + "projection scaling factor is numerically singular", + )); + } // Projection for xi in x.iter_mut().take(nx) { - *xi /= scaling; + *xi = *xi / scaling; } x[nx] = right_root; + Ok(()) } /// This is a convex set, so this function returns `true`. diff --git a/src/constraints/finite.rs b/rust/src/constraints/finite.rs similarity index 76% rename from src/constraints/finite.rs rename to rust/src/constraints/finite.rs index 3f9393cd..30ab9c48 100644 --- a/src/constraints/finite.rs +++ b/rust/src/constraints/finite.rs @@ -1,17 +1,30 @@ use super::Constraint; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} /// /// A finite set, $X = \\{x_1, x_2, \ldots, x_n\\}\subseteq\mathbb{R}^n$, given vectors /// $x_i\in\mathbb{R}^n$ /// #[derive(Clone, Copy)] -pub struct FiniteSet<'a> { +pub struct FiniteSet<'a, T = f64> { /// The data is stored in a Vec-of-Vec datatype, that is, a vector /// of vectors - data: &'a [&'a [f64]], + data: &'a [&'a [T]], } -impl<'a> FiniteSet<'a> { +impl<'a, T> FiniteSet<'a, T> +where + T: Float + Sum, +{ /// Construct a finite set, $X = \\{x_1, x_2, \ldots, x_n\\}$, given vectors /// $x_i\in\mathbb{R}^n$ /// @@ -41,7 +54,7 @@ impl<'a> FiniteSet<'a> { /// This method will panic if the given vector of data is empty, /// or if the given vectors have unequal dimensions. /// - pub fn new(data: &'a [&'a [f64]]) -> Self { + pub fn new(data: &'a [&'a [T]]) -> Self { // Do a sanity check... assert!(!data.is_empty(), "empty data not allowed"); let n = data[0].len(); @@ -52,7 +65,10 @@ impl<'a> FiniteSet<'a> { } } -impl<'a> Constraint for FiniteSet<'a> { +impl<'a, T> Constraint for FiniteSet<'a, T> +where + T: Float + Sum, +{ /// /// Projection on the current finite set /// @@ -77,7 +93,7 @@ impl<'a> Constraint for FiniteSet<'a> { /// ]; /// let finite_set = FiniteSet::new(data); /// let mut x = [0.7, 0.6]; - /// finite_set.project(&mut x); // compute projection + /// finite_set.project(&mut x).unwrap(); // compute projection /// ``` /// /// # Panics @@ -85,18 +101,19 @@ impl<'a> Constraint for FiniteSet<'a> { /// This method panics if the dimension of `x` is not equal to the /// dimension of the points in the finite set. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { assert_eq!(x.len(), self.data[0].len(), "x has incompatible dimension"); let mut idx: usize = 0; - let mut best_distance: f64 = num::Float::infinity(); + let mut best_distance = T::infinity(); for (i, v) in self.data.iter().enumerate() { - let dist = crate::matrix_operations::norm2_squared_diff(v, x); + let dist = norm2_squared_diff(v, x); if dist < best_distance { idx = i; best_distance = dist; } } x.copy_from_slice(self.data[idx]); + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/halfspace.rs b/rust/src/constraints/halfspace.rs similarity index 82% rename from src/constraints/halfspace.rs rename to rust/src/constraints/halfspace.rs index 5442ca54..cd039128 100644 --- a/src/constraints/halfspace.rs +++ b/rust/src/constraints/halfspace.rs @@ -1,18 +1,24 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; #[derive(Clone)] /// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$. -pub struct Halfspace<'a> { +pub struct Halfspace<'a, T = f64> { /// normal vector - normal_vector: &'a [f64], + normal_vector: &'a [T], /// offset - offset: f64, + offset: T, /// squared Euclidean norm of the normal vector (computed once upon construction) - normal_vector_squared_norm: f64, + normal_vector_squared_norm: T, } -impl<'a> Halfspace<'a> { +impl<'a, T> Halfspace<'a, T> +where + T: Float + Sum, +{ /// A halfspace is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle \leq b\\}$, /// where $c$ is the normal vector of the halfspace and $b$ is an offset. /// @@ -42,10 +48,10 @@ impl<'a> Halfspace<'a> { /// let offset = 1.0; /// let halfspace = Halfspace::new(&normal_vector, offset); /// let mut x = [-1., 3.]; - /// halfspace.project(&mut x); + /// halfspace.project(&mut x).unwrap(); /// ``` /// - pub fn new(normal_vector: &'a [f64], offset: f64) -> Self { + pub fn new(normal_vector: &'a [T], offset: T) -> Self { let normal_vector_squared_norm = matrix_operations::norm2_squared(normal_vector); Halfspace { normal_vector, @@ -55,7 +61,10 @@ impl<'a> Halfspace<'a> { } } -impl<'a> Constraint for Halfspace<'a> { +impl<'a, T> Constraint for Halfspace<'a, T> +where + T: Float + Sum, +{ /// Projects on halfspace using the following formula: /// /// $$\begin{aligned} @@ -79,14 +88,15 @@ impl<'a> Constraint for Halfspace<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the halfspace. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { let inner_product = matrix_operations::inner_product(x, self.normal_vector); if inner_product > self.offset { let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; x.iter_mut() .zip(self.normal_vector.iter()) - .for_each(|(x, normal_vector_i)| *x -= factor * normal_vector_i); + .for_each(|(x, normal_vector_i)| *x = *x - factor * *normal_vector_i); } + Ok(()) } /// Halfspaces are convex sets diff --git a/src/constraints/hyperplane.rs b/rust/src/constraints/hyperplane.rs similarity index 80% rename from src/constraints/hyperplane.rs rename to rust/src/constraints/hyperplane.rs index 886fd494..958223c6 100644 --- a/src/constraints/hyperplane.rs +++ b/rust/src/constraints/hyperplane.rs @@ -1,18 +1,24 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; #[derive(Clone)] /// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$. -pub struct Hyperplane<'a> { +pub struct Hyperplane<'a, T = f64> { /// normal vector - normal_vector: &'a [f64], + normal_vector: &'a [T], /// offset - offset: f64, + offset: T, /// squared Euclidean norm of the normal vector (computed once upon construction) - normal_vector_squared_norm: f64, + normal_vector_squared_norm: T, } -impl<'a> Hyperplane<'a> { +impl<'a, T> Hyperplane<'a, T> +where + T: Float + Sum, +{ /// A hyperplane is a set given by $H = \\{x \in \mathbb{R}^n {}:{} \langle c, x\rangle = b\\}$, /// where $c$ is the normal vector of the hyperplane and $b$ is an offset. /// @@ -41,13 +47,13 @@ impl<'a> Hyperplane<'a> { /// let offset = 1.0; /// let hyperplane = Hyperplane::new(&normal_vector, offset); /// let mut x = [-1., 3.]; - /// hyperplane.project(&mut x); + /// hyperplane.project(&mut x).unwrap(); /// ``` /// - pub fn new(normal_vector: &'a [f64], offset: f64) -> Self { + pub fn new(normal_vector: &'a [T], offset: T) -> Self { let normal_vector_squared_norm = matrix_operations::norm2_squared(normal_vector); assert!( - normal_vector_squared_norm > 0.0, + normal_vector_squared_norm > T::zero(), "normal_vector must have positive norm" ); Hyperplane { @@ -58,7 +64,10 @@ impl<'a> Hyperplane<'a> { } } -impl<'a> Constraint for Hyperplane<'a> { +impl<'a, T> Constraint for Hyperplane<'a, T> +where + T: Float + Sum, +{ /// Projects on the hyperplane using the formula: /// /// $$\begin{aligned} @@ -79,13 +88,14 @@ impl<'a> Constraint for Hyperplane<'a> { /// This method panics if the length of `x` is not equal to the dimension /// of the hyperplane. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { assert_eq!(x.len(), self.normal_vector.len(), "x has wrong dimension"); let inner_product = matrix_operations::inner_product(x, self.normal_vector); let factor = (inner_product - self.offset) / self.normal_vector_squared_norm; x.iter_mut() .zip(self.normal_vector.iter()) - .for_each(|(x, nrm_vct)| *x -= factor * nrm_vct); + .for_each(|(x, nrm_vct)| *x = *x - factor * *nrm_vct); + Ok(()) } /// Hyperplanes are convex sets diff --git a/src/constraints/mod.rs b/rust/src/constraints/mod.rs similarity index 77% rename from src/constraints/mod.rs rename to rust/src/constraints/mod.rs index d4ce2462..478d54e0 100644 --- a/src/constraints/mod.rs +++ b/rust/src/constraints/mod.rs @@ -8,6 +8,8 @@ //! //! [`Constraint`]: trait.Constraint.html +use crate::FunctionCallResult; + mod affine_space; mod ball1; mod ball2; @@ -25,7 +27,7 @@ mod soc; mod sphere2; mod zero; -pub use affine_space::AffineSpace; +pub use affine_space::{AffineSpace, AffineSpaceError}; pub use ball1::Ball1; pub use ball2::Ball2; pub use ballinf::BallInf; @@ -46,7 +48,7 @@ pub use zero::Zero; /// /// This trait defines an abstract function that allows to compute projections /// on sets; this is implemented by a series of structures (see below for details) -pub trait Constraint { +pub trait Constraint { /// Projection onto the set, that is, /// /// $$ @@ -57,7 +59,17 @@ pub trait Constraint { /// /// - `x`: The given vector $x$ is updated with the projection on the set /// - fn project(&self, x: &mut [f64]); + /// ## Errors + /// + /// Implementations should return `Err(...)` when the projection cannot be + /// computed reliably because of a numerical or internal failure. + /// + /// ## Panics + /// + /// Implementations may still panic on clear API misuse, such as calling the + /// projection with a slice of incompatible dimension. + /// + fn project(&self, x: &mut [T]) -> FunctionCallResult; /// Returns true if and only if the set is convex fn is_convex(&self) -> bool; diff --git a/rust/src/constraints/no_constraints.rs b/rust/src/constraints/no_constraints.rs new file mode 100644 index 00000000..c04447b3 --- /dev/null +++ b/rust/src/constraints/no_constraints.rs @@ -0,0 +1,35 @@ +use super::Constraint; +use crate::FunctionCallResult; + +/// The whole space, no constraints +#[derive(Default, Clone, Copy)] +pub struct NoConstraints {} + +impl NoConstraints { + /// Constructs new instance of `NoConstraints` + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, NoConstraints}; + /// + /// let no_constraints = NoConstraints::new(); + /// let mut x = [1.0, -2.0, 3.0]; + /// no_constraints.project(&mut x).unwrap(); + /// ``` + /// + #[must_use] + pub fn new() -> NoConstraints { + NoConstraints {} + } +} + +impl Constraint for NoConstraints { + fn project(&self, _x: &mut [T]) -> FunctionCallResult { + Ok(()) + } + + fn is_convex(&self) -> bool { + true + } +} diff --git a/src/constraints/rectangle.rs b/rust/src/constraints/rectangle.rs similarity index 70% rename from src/constraints/rectangle.rs rename to rust/src/constraints/rectangle.rs index 76b52ceb..e81caabf 100644 --- a/src/constraints/rectangle.rs +++ b/rust/src/constraints/rectangle.rs @@ -1,4 +1,6 @@ use super::Constraint; +use crate::FunctionCallResult; +use num::Float; #[derive(Clone, Copy)] /// @@ -7,12 +9,12 @@ use super::Constraint; /// A set of the form $\\{x \in \mathbb{R}^n {}:{} x_{\min} {}\leq{} x {}\leq{} x_{\max}\\}$, /// where $\leq$ is meant in the element-wise sense and either of $x_{\min}$ and $x_{\max}$ can /// be equal to infinity. -pub struct Rectangle<'a> { - xmin: Option<&'a [f64]>, - xmax: Option<&'a [f64]>, +pub struct Rectangle<'a, T = f64> { + xmin: Option<&'a [T]>, + xmax: Option<&'a [T]>, } -impl<'a> Rectangle<'a> { +impl<'a, T: Float> Rectangle<'a, T> { /// Construct a new rectangle with given $x_{\min}$ and $x_{\max}$ /// /// # Arguments @@ -34,12 +36,27 @@ impl<'a> Rectangle<'a> { /// - Both `xmin` and `xmax` have been provided, but they have incompatible /// dimensions /// - pub fn new(xmin: Option<&'a [f64]>, xmax: Option<&'a [f64]>) -> Self { + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Rectangle}; + /// + /// let xmin = [-1.0, 0.0]; + /// let xmax = [1.0, 2.0]; + /// let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + /// let mut x = [3.0, -4.0]; + /// rectangle.project(&mut x).unwrap(); + /// ``` + /// + pub fn new(xmin: Option<&'a [T]>, xmax: Option<&'a [T]>) -> Self { assert!(xmin.is_some() || xmax.is_some()); // xmin or xmax must be Some - assert!( - xmin.is_none() || xmax.is_none() || xmin.unwrap().len() == xmax.unwrap().len(), - "incompatible dimensions of xmin and xmax" - ); + if let (Some(xmin), Some(xmax)) = (xmin, xmax) { + assert_eq!( + xmin.len(), + xmax.len(), + "incompatible dimensions of xmin and xmax" + ); + } if let (Some(xmin), Some(xmax)) = (xmin, xmax) { assert!( xmin.iter() @@ -53,8 +70,8 @@ impl<'a> Rectangle<'a> { } } -impl<'a> Constraint for Rectangle<'a> { - fn project(&self, x: &mut [f64]) { +impl<'a, T: Float> Constraint for Rectangle<'a, T> { + fn project(&self, x: &mut [T]) -> FunctionCallResult { if let Some(xmin) = &self.xmin { assert_eq!( x.len(), @@ -80,6 +97,7 @@ impl<'a> Constraint for Rectangle<'a> { }; }); } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/simplex.rs b/rust/src/constraints/simplex.rs similarity index 68% rename from src/constraints/simplex.rs rename to rust/src/constraints/simplex.rs index 061b390e..da1bf1bf 100644 --- a/src/constraints/simplex.rs +++ b/rust/src/constraints/simplex.rs @@ -1,50 +1,64 @@ +use crate::numeric::cast; + use super::Constraint; +use crate::FunctionCallResult; +use num::Float; #[derive(Copy, Clone)] /// A simplex with level $\alpha$ is a set of the form /// $\Delta_\alpha^n = \\{x \in \mathbb{R}^n {}:{} x \geq 0, \sum_i x_i = \alpha\\}$, /// where $\alpha$ is a positive constant. -pub struct Simplex { +pub struct Simplex { /// Simplex level - alpha: f64, + alpha: T, } -impl Simplex { +impl Simplex { /// Construct a new simplex with given (positive) $\alpha$. The user does not need /// to specify the dimension of the simplex. - pub fn new(alpha: f64) -> Self { - assert!(alpha > 0.0, "alpha is nonpositive"); + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Simplex}; + /// + /// let simplex = Simplex::new(1.0); + /// let mut x = [0.5, -0.5, 2.0]; + /// simplex.project(&mut x).unwrap(); + /// ``` + pub fn new(alpha: T) -> Self { + assert!(alpha > T::zero(), "alpha is nonpositive"); Simplex { alpha } } } -impl Constraint for Simplex { +impl Constraint for Simplex { /// Project onto $\Delta_\alpha^n$ using Condat's fast projection algorithm. /// /// See: Laurent Condat. Fast Projection onto the Simplex and the $\ell_1$ Ball. /// Mathematical Programming, Series A, Springer, 2016, 158 (1), pp.575-585. /// ⟨10.1007/s10107-015-0946-6⟩. - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { assert!(!x.is_empty(), "x must be nonempty"); let a = &self.alpha; // ---- step 1 - let mut v = Vec::::with_capacity(x.len()); // vector containing x[0] + let mut v = Vec::::with_capacity(x.len()); // vector containing x[0] v.push(x[0]); let mut v_size_old: i64 = -1; // 64 bit signed int - let mut v_tilde: Vec = Vec::new(); // empty vector of f64 - let mut rho: f64 = x[0] - a; // 64 bit float + let mut v_tilde: Vec = Vec::new(); + let mut rho: T = x[0] - *a; // ---- step 2 x.iter().skip(1).for_each(|x_n| { if *x_n > rho { - rho += (*x_n - rho) / ((v.len() + 1) as f64); - if rho > *x_n - a { + rho = rho + (*x_n - rho) / cast(v.len() + 1); + if rho > *x_n - *a { v.push(*x_n); } else { v_tilde.extend(&v); v = vec![*x_n]; - rho = *x_n - a; + rho = *x_n - *a; } } }); @@ -54,7 +68,7 @@ impl Constraint for Simplex { v_tilde.iter().for_each(|v_t_n| { if *v_t_n > rho { v.push(*v_t_n); - rho += (*v_t_n - rho) / (v.len() as f64); + rho = rho + (*v_t_n - rho) / cast(v.len()); } }); } @@ -68,7 +82,7 @@ impl Constraint for Simplex { if *v_n <= rho { hit_list.push(n); current_len_v -= 1; - rho += (rho - *v_n) / (current_len_v as f64); + rho = rho + (rho - *v_n) / cast(current_len_v); } }); hit_list.iter().rev().for_each(|target| { @@ -80,8 +94,9 @@ impl Constraint for Simplex { } // ---- step 6 - let zero: f64 = 0.0; + let zero = T::zero(); x.iter_mut().for_each(|x_n| *x_n = zero.max(*x_n - rho)); + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/soc.rs b/rust/src/constraints/soc.rs similarity index 71% rename from src/constraints/soc.rs rename to rust/src/constraints/soc.rs index 8ff0759b..7b0de112 100644 --- a/src/constraints/soc.rs +++ b/rust/src/constraints/soc.rs @@ -1,5 +1,8 @@ use super::Constraint; use crate::matrix_operations; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; #[derive(Clone, Copy)] /// @@ -17,11 +20,11 @@ use crate::matrix_operations; /// 1996 doctoral dissertation: Projection Algorithms and Monotone Operators /// (p. 40, Theorem 3.3.6). /// -pub struct SecondOrderCone { - alpha: f64, +pub struct SecondOrderCone { + alpha: T, } -impl SecondOrderCone { +impl SecondOrderCone { /// Construct a new instance of `SecondOrderCone` with parameter `alpha`. /// /// A second-order cone with parameter `alpha` is the set @@ -38,13 +41,26 @@ impl SecondOrderCone { /// # Panics /// /// The method panics if the given parameter `alpha` is nonpositive. - pub fn new(alpha: f64) -> SecondOrderCone { - assert!(alpha > 0.0); // alpha must be positive + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, SecondOrderCone}; + /// + /// let cone = SecondOrderCone::new(1.0); + /// let mut x = [2.0, 0.0, 0.5]; + /// cone.project(&mut x).unwrap(); + /// ``` + pub fn new(alpha: T) -> SecondOrderCone { + assert!(alpha > T::zero()); // alpha must be positive SecondOrderCone { alpha } } } -impl Constraint for SecondOrderCone { +impl Constraint for SecondOrderCone +where + T: Float + Sum, +{ /// Project onto the second-order cone. /// /// # Arguments @@ -56,7 +72,7 @@ impl Constraint for SecondOrderCone { /// /// This method panics if the length of `x` is less than 2. /// - fn project(&self, x: &mut [f64]) { + fn project(&self, x: &mut [T]) -> FunctionCallResult { // x = (z, r) let n = x.len(); assert!(n >= 2, "x must be of dimension at least 2"); @@ -64,14 +80,15 @@ impl Constraint for SecondOrderCone { let r = x[n - 1]; let norm_z = matrix_operations::norm2(z); if self.alpha * norm_z <= -r { - x.iter_mut().for_each(|v| *v = 0.0); + x.iter_mut().for_each(|v| *v = T::zero()); } else if norm_z > self.alpha * r { - let beta = (self.alpha * norm_z + r) / (self.alpha.powi(2) + 1.0); + let beta = (self.alpha * norm_z + r) / (self.alpha.powi(2) + T::one()); x[..n - 1] .iter_mut() - .for_each(|v| *v *= self.alpha * beta / norm_z); + .for_each(|v| *v = *v * self.alpha * beta / norm_z); x[n - 1] = beta; } + Ok(()) } fn is_convex(&self) -> bool { diff --git a/src/constraints/sphere2.rs b/rust/src/constraints/sphere2.rs similarity index 61% rename from src/constraints/sphere2.rs rename to rust/src/constraints/sphere2.rs index 86433855..9a702872 100644 --- a/src/constraints/sphere2.rs +++ b/rust/src/constraints/sphere2.rs @@ -1,23 +1,48 @@ +use crate::numeric::cast; + use super::Constraint; +use crate::FunctionCallResult; +use num::Float; +use std::iter::Sum; + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} #[derive(Copy, Clone)] /// A Euclidean sphere, that is, a set given by $S_2^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert = r\\}$ /// or a Euclidean sphere centered at a point $x_c$, that is, $S_2^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert = r\\}$ -pub struct Sphere2<'a> { - center: Option<&'a [f64]>, - radius: f64, +pub struct Sphere2<'a, T = f64> { + center: Option<&'a [T]>, + radius: T, } -impl<'a> Sphere2<'a> { +impl<'a, T: Float> Sphere2<'a, T> { /// Construct a new Euclidean sphere with given center and radius /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Sphere2}; + /// + /// let sphere = Sphere2::new(None, 1.0); + /// let mut x = [3.0, 4.0]; + /// sphere.project(&mut x).unwrap(); + /// ``` + pub fn new(center: Option<&'a [T]>, radius: T) -> Self { + assert!(radius > T::zero()); Sphere2 { center, radius } } } -impl<'a> Constraint for Sphere2<'a> { +impl<'a, T> Constraint for Sphere2<'a, T> +where + T: Float + Sum, +{ /// Projection onto the sphere, $S_{r, c}$ with radius $r$ and center $c$. /// If $x\neq c$, the projection is uniquely defined by /// @@ -38,8 +63,8 @@ impl<'a> Constraint for Sphere2<'a> { /// Panics if `x` is empty or, when a center is provided, if `x` and /// `center` have incompatible dimensions. /// - fn project(&self, x: &mut [f64]) { - let epsilon = 1e-12; + fn project(&self, x: &mut [T]) -> FunctionCallResult { + let epsilon = cast::(1e-12); assert!(!x.is_empty(), "x must be nonempty"); if let Some(center) = &self.center { assert_eq!( @@ -47,11 +72,11 @@ impl<'a> Constraint for Sphere2<'a> { center.len(), "x and center have incompatible dimensions" ); - let norm_difference = crate::matrix_operations::norm2_squared_diff(x, center).sqrt(); + let norm_difference = norm2_squared_diff(x, center).sqrt(); if norm_difference <= epsilon { x.copy_from_slice(center); - x[0] += self.radius; - return; + x[0] = x[0] + self.radius; + return Ok(()); } x.iter_mut().zip(center.iter()).for_each(|(x, c)| { *x = *c + self.radius * (*x - *c) / norm_difference; @@ -59,12 +84,13 @@ impl<'a> Constraint for Sphere2<'a> { } else { let norm_x = crate::matrix_operations::norm2(x); if norm_x <= epsilon { - x[0] += self.radius; - return; + x[0] = x[0] + self.radius; + return Ok(()); } let norm_over_radius = self.radius / norm_x; - x.iter_mut().for_each(|x_| *x_ *= norm_over_radius); + x.iter_mut().for_each(|x_| *x_ = *x_ * norm_over_radius); } + Ok(()) } /// Returns false (the sphere is not a convex set) diff --git a/src/constraints/tests.rs b/rust/src/constraints/tests.rs similarity index 53% rename from src/constraints/tests.rs rename to rust/src/constraints/tests.rs index 58f88f25..6306ef2c 100644 --- a/src/constraints/tests.rs +++ b/rust/src/constraints/tests.rs @@ -1,6 +1,7 @@ -use crate::matrix_operations; +use crate::{matrix_operations, numeric::cast}; use super::*; +use num::{Float, ToPrimitive}; use rand; use rand::RngExt; use rand_distr::{Distribution, Gamma}; @@ -10,7 +11,7 @@ fn t_zero_set() { let zero = Zero::new(); let mut x = [1.0, 2.0, 3.0]; let x_projection = [0.0; 3]; - zero.project(&mut x); + zero.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_projection, &x, @@ -20,6 +21,14 @@ fn t_zero_set() { ); } +#[test] +fn t_zero_set_f32() { + let zero = Zero::new(); + let mut x = [1.0_f32, -2.0, 3.5]; + zero.project(&mut x).unwrap(); + assert_eq!([0.0_f32, 0.0, 0.0], x); +} + #[test] fn t_hyperplane() { let normal_vector = [1.0, 2.0, 3.0]; @@ -31,7 +40,7 @@ fn t_hyperplane() { 0.285_714_285_714_286, 0.928_571_428_571_429, ]; - hyperplane.project(&mut x); + hyperplane.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_proj_expected, @@ -54,7 +63,7 @@ fn t_hyperplane_wrong_dimension() { let normal_vector = [1.0, 2.0, 3.0]; let hyperplane = Hyperplane::new(&normal_vector, 1.0); let mut x = [1.0, 2.0]; - hyperplane.project(&mut x); + hyperplane.project(&mut x).unwrap(); } #[test] @@ -64,7 +73,7 @@ fn t_halfspace_project_inside() { let halfspace = Halfspace::new(&normal_vector, offset); let mut x = [-1., 3.]; let x_expected = [-1., 3.]; - halfspace.project(&mut x); + halfspace.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_expected, @@ -81,7 +90,7 @@ fn t_halfspace_project_outside() { let halfspace = Halfspace::new(&normal_vector, offset); let mut x = [-1., 3.]; let x_expected = [-1.8, 1.4]; - halfspace.project(&mut x); + halfspace.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x, &x_expected, @@ -112,7 +121,7 @@ fn t_finite_set() { let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0], &[0.0, 1.0], &[1.0, 0.0]]; let finite_set = FiniteSet::new(data); let mut x = [0.7, 0.6]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[1.0, 1.0], &x, @@ -121,7 +130,7 @@ fn t_finite_set() { "projection is wrong (should be [1,1])", ); x = [-0.1, 0.2]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[0.0, 0.0], &x, @@ -130,7 +139,7 @@ fn t_finite_set() { "projection is wrong (should be [0,0])", ); x = [0.48, 0.501]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[0.0, 1.0], &x, @@ -139,7 +148,7 @@ fn t_finite_set() { "projection is wrong (should be [0,1])", ); x = [0.7, 0.2]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[1.0, 0.0], &x, @@ -149,23 +158,32 @@ fn t_finite_set() { ); } +#[test] +fn t_finite_set_f32() { + let data: &[&[f32]] = &[&[0.0_f32, 0.0], &[1.0, 1.0], &[0.0, 1.0], &[1.0, 0.0]]; + let finite_set = FiniteSet::new(data); + let mut x = [0.7_f32, 0.2]; + finite_set.project(&mut x).unwrap(); + assert_eq!([1.0_f32, 0.0], x); +} + #[test] #[should_panic] fn t_finite_set_project_wrong_dimension() { let data: &[&[f64]] = &[&[0.0, 0.0], &[1.0, 1.0]]; let finite_set = FiniteSet::new(data); let mut x = [0.5, 0.5, 0.5]; - finite_set.project(&mut x); + finite_set.project(&mut x).unwrap(); } #[test] fn t_rectangle_bounded() { - let xmin = vec![2.0; 5]; - let xmax = vec![4.5; 5]; + let xmin = [2.0; 5]; + let xmax = [4.5; 5]; let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[2.0, 2.0, 3.0, 4.0, 4.5], @@ -176,6 +194,18 @@ fn t_rectangle_bounded() { ); } +#[test] +fn t_rectangle_bounded_f32() { + let xmin = [2.0_f32; 3]; + let xmax = [4.5_f32; 3]; + let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); + let mut x = [1.0_f32, 3.0, 5.0]; + + rectangle.project(&mut x).unwrap(); + + assert_eq!([2.0_f32, 3.0, 4.5], x); +} + #[test] fn t_rectangle_infinite_bounds() { let xmin = [-1.0, 2.0, f64::NEG_INFINITY]; @@ -183,7 +213,7 @@ fn t_rectangle_infinite_bounds() { let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [-2.0, 3.0, 1.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-1.0, 3.0, 1.0], @@ -197,8 +227,8 @@ fn t_rectangle_infinite_bounds() { #[test] #[should_panic] fn t_rectangle_incompatible_dims() { - let xmin = vec![1.0; 5]; - let xmax = vec![2.0; 4]; + let xmin = [1.0; 5]; + let xmax = [2.0; 4]; let _rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); } @@ -217,7 +247,7 @@ fn t_rectangle_bounded_negative_entries() { let rectangle = Rectangle::new(Some(&xmin[..]), Some(&xmax[..])); let mut x = [-6.0, -3.0, 0.0, 3.0, -5.0, 1.0, 2.0, 3.0, -1.0, 0.0, 0.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-5.0, -3.0, -1.0, 2.0, -1.0, 0.0, 2.0, 3.0, 3.0, 4.0, 5.0], @@ -230,11 +260,11 @@ fn t_rectangle_bounded_negative_entries() { #[test] fn t_rectangle_only_xmin() { - let xmin = vec![2.0; 5]; + let xmin = [2.0; 5]; let rectangle = Rectangle::new(Some(&xmin[..]), None); let mut x = [1.0, 2.0, 3.0, 4.0, 5.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[2.0, 2.0, 3.0, 4.0, 5.0], @@ -247,11 +277,11 @@ fn t_rectangle_only_xmin() { #[test] fn t_rectangle_only_xmax() { - let xmax = vec![-3.0; 5]; + let xmax = [-3.0; 5]; let rectangle = Rectangle::new(None, Some(&xmax[..])); let mut x = [-10.0, -20.0, 0.0, 5.0, 3.0]; - rectangle.project(&mut x); + rectangle.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[-10.0, -20.0, -3.0, -3.0, -3.0], @@ -268,7 +298,7 @@ fn t_ball2_at_origin() { let mut x = [1.0, 1.0]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &[ @@ -282,12 +312,25 @@ fn t_ball2_at_origin() { ); } +#[test] +fn t_ball2_at_origin_f32() { + let radius = 1.0_f32; + let mut x = [1.0_f32, 1.0]; + let ball = Ball2::new(None, radius); + + ball.project(&mut x).unwrap(); + + let expected = std::f32::consts::FRAC_1_SQRT_2; + assert!((x[0] - expected).abs() < 1e-6); + assert!((x[1] - expected).abs() < 1e-6); +} + #[test] fn t_ball2_at_origin_different_radius_outside() { let radius = 0.8; let mut x = [1.0, 1.0]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let norm_proj_x = crate::matrix_operations::norm2(&x); unit_test_utils::assert_nearly_equal(radius, norm_proj_x, 1e-10, 1e-12, "wrong norm"); } @@ -297,7 +340,7 @@ fn t_ball2_at_origin_different_radius_inside() { let radius = 0.8; let mut x = [-0.2, 0.15]; let ball = Ball2::new(None, radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&x, &[-0.2, 0.15], 1e-10, 1e-12, "wrong"); } @@ -307,7 +350,7 @@ fn t_ball2_at_center_different_radius_outside() { let mut x = [1.0, 1.0]; let center = [-0.8, -1.1]; let ball = Ball2::new(Some(¢er), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let norm_x_minus_c = crate::matrix_operations::norm2_squared_diff(&x, ¢er).sqrt(); unit_test_utils::assert_nearly_equal(radius, norm_x_minus_c, 1e-10, 1e-12, "wrong norm"); } @@ -318,7 +361,7 @@ fn t_ball2_at_center_different_radius_inside() { let mut x = [-0.9, -0.85]; let center = [-0.8, -1.1]; let ball = Ball2::new(Some(¢er), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[-0.9, -0.85], &x, 1e-10, 1e-12, "wrong result"); } @@ -329,7 +372,7 @@ fn t_ball2_elsewhere() { let mut x = [2.0, 2.0]; let ball = Ball2::new(Some(¢er[..]), radius); - ball.project(&mut x); + ball.project(&mut x).unwrap(); let expected_proj_element = std::f64::consts::FRAC_1_SQRT_2 + 1.; unit_test_utils::assert_nearly_equal_array( @@ -341,16 +384,36 @@ fn t_ball2_elsewhere() { ); } +#[test] +fn t_ball2_boundary_no_change() { + let radius = 2.0; + let mut x = [0.0, 2.0]; + let x_expected = x; + let ball = Ball2::new(None, radius); + ball.project(&mut x).unwrap(); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_no_constraints() { let mut x = [1.0, 2.0, 3.0]; let whole_space = NoConstraints::new(); - whole_space.project(&mut x); + whole_space.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[1., 2., 3.], &x, 1e-10, 1e-15, "x is wrong"); } +#[test] +fn t_no_constraints_f32() { + let mut x = [1.0_f32, 2.0, 3.0]; + let whole_space = NoConstraints::new(); + + whole_space.project(&mut x).unwrap(); + + assert_eq!([1.0_f32, 2.0, 3.0], x); +} + #[test] #[should_panic] fn t_cartesian_product_constraints_incoherent_indices() { @@ -370,7 +433,7 @@ fn t_cartesian_product_constraints_wrong_vector_dim() { .add_constraint(3, ball1) .add_constraint(10, ball2); let mut x = [0.0; 30]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); } #[test] @@ -385,7 +448,7 @@ fn t_cartesian_product_constraints() { .add_constraint(idx1, ball1) .add_constraint(idx2, ball2); let mut x = [3.0, 4.0, 5.0, 2.0, 1.0]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); let r1 = crate::matrix_operations::norm2(&x[0..idx1]); let r2 = crate::matrix_operations::norm2(&x[idx1..idx2]); unit_test_utils::assert_nearly_equal(r1, radius1, 1e-8, 1e-12, "r1 is wrong"); @@ -416,7 +479,7 @@ fn t_cartesian_product_ball_and_rectangle() { /* Projection */ let mut x = [-10.0, 0.5, 10.0, 0.01, -0.01, 0.1, 10.0, -1.0, 1.0]; - cart_prod.project(&mut x); + cart_prod.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x[0..3], @@ -438,12 +501,33 @@ fn t_cartesian_product_ball_and_rectangle() { ); } +#[test] +fn t_cartesian_product_ball_and_rectangle_f32() { + let xmin1 = vec![-1.0_f32; 2]; + let xmax1 = vec![1.0_f32; 2]; + let rectangle1 = Rectangle::new(Some(&xmin1), Some(&xmax1)); + + let radius = 1.0_f32; + let ball = Ball2::new(None, radius); + + let cart_prod = CartesianProduct::new() + .add_constraint(2, rectangle1) + .add_constraint(5, ball); + + let mut x = [-4.0_f32, 0.25_f32, 2.0_f32, -1.0_f32, 2.0_f32]; + cart_prod.project(&mut x).unwrap(); + + assert_eq!([-1.0_f32, 0.25_f32], x[..2]); + let ball_norm = crate::matrix_operations::norm2(&x[2..5]); + assert!((ball_norm - radius).abs() < 1e-5_f32); +} + #[test] fn t_second_order_cone_case_i() { let soc = SecondOrderCone::new(1.0); let mut x = vec![1.0, 1.0, 1.42]; let x_copy = x.clone(); - soc.project(&mut x); + soc.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&x, &x_copy, 1e-10, 1e-12, "x has been modified"); } @@ -452,7 +536,7 @@ fn t_second_order_cone_case_ii() { let alpha = 0.5; let soc = SecondOrderCone::new(alpha); let mut x = vec![1.0, 1.0, -0.71]; - soc.project(&mut x); + soc.project(&mut x).unwrap(); let expected = vec![0.0; 3]; unit_test_utils::assert_nearly_equal_array( &x, @@ -468,14 +552,25 @@ fn t_second_order_cone_case_iii() { let alpha = 1.5; let soc = SecondOrderCone::new(alpha); let mut x = vec![1.0, 1.0, 0.1]; - soc.project(&mut x); + soc.project(&mut x).unwrap(); // make sure the new `x` is in the cone - let norm_z = crate::matrix_operations::norm2(&x[..=1]); + let norm_z: f64 = crate::matrix_operations::norm2(&x[..=1]); assert!(norm_z <= alpha * x[2]); // in fact the projection should be on the boundary of the cone assert!((norm_z - alpha * x[2]).abs() <= 1e-7); } +#[test] +fn t_second_order_cone_case_iii_f32() { + let alpha = 1.5_f32; + let soc = SecondOrderCone::new(alpha); + let mut x = vec![1.0_f32, 1.0_f32, 0.1_f32]; + soc.project(&mut x).unwrap(); + let norm_z = crate::matrix_operations::norm2(&x[..=1]); + assert!(norm_z <= alpha * x[2] + 1e-5_f32); + assert!((norm_z - alpha * x[2]).abs() <= 1e-4_f32); +} + #[test] #[should_panic] fn t_second_order_cone_illegal_alpha_i() { @@ -496,7 +591,7 @@ fn t_second_order_cone_short_vector() { let alpha = 1.0; let soc = SecondOrderCone::new(alpha); let mut _x = vec![1.0]; - soc.project(&mut _x); + soc.project(&mut _x).unwrap(); } #[test] @@ -516,7 +611,7 @@ fn t_cartesian_product_dimension() { // let's do a projection to make sure this works // Note: we've used the same set (finite_set), twice let mut x = [-0.5, 1.1, 0.45, 0.55, 10.0, 10.0, -500.0, 1.0, 1.0, 1.0]; - cartesian.project(&mut x); + cartesian.project(&mut x).unwrap(); println!("X = {:#?}", x); let sqrt_3_over_3 = 3.0_f64.sqrt() / 3.; unit_test_utils::assert_nearly_equal_array( @@ -539,6 +634,21 @@ fn t_cartesian_product_dimension() { ); } +#[test] +fn t_cartesian_product_indices_are_cumulative_lengths() { + let cartesian = CartesianProduct::new() + .add_constraint(1, NoConstraints::new()) + .add_constraint(3, NoConstraints::new()) + .add_constraint(6, NoConstraints::new()); + + assert_eq!(6, cartesian.dimension()); + + let mut x = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]; + let original = x; + cartesian.project(&mut x).unwrap(); + assert_eq!(original, x); +} + #[test] fn t_cartesian_ball_no_constraint() { let xc = [1., 0., 0.]; @@ -552,7 +662,7 @@ fn t_cartesian_ball_no_constraint() { .add_constraint(9, no_constraints); assert_eq!(9, cartesian.dimension()); let mut x = [100., -200., 0.5, 1.5, 3.5, 1000., 5., -500., 2_000_000.]; - cartesian.project(&mut x); + cartesian.project(&mut x).unwrap(); let x_proj_ball = [0.869811089019176, 0.390566732942472, 0.911322376865767]; unit_test_utils::assert_nearly_equal_array( &x[0..=1], @@ -576,7 +686,7 @@ fn t_ball_inf_origin() { let ball_inf = BallInf::new(None, 1.0); let mut x = [0.0, -0.5, 0.5, 1.5, 3.5, 0.8, 1.1, -5.0, -10.0]; let x_correct = [0.0, -0.5, 0.5, 1.0, 1.0, 0.8, 1.0, -1.0, -1.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -592,34 +702,43 @@ fn t_ball_inf_center() { let xc = [5.0, -6.0]; let ball_inf = BallInf::new(Some(&xc), 1.5); let mut x = [11.0, -0.5]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -4.5], &x, 1e-10, 1e-12, "upper right"); let mut x = [3.0, -7.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[3.5, -7.0], &x, 1e-10, 1e-12, "left"); let mut x = [800.0, -5.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -5.0], &x, 1e-10, 1e-12, "right"); let mut x = [9.0, -10.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.5, -7.5], &x, 1e-10, 1e-12, "down right"); let mut x = [3.0, 0.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[3.5, -4.5], &x, 1e-10, 1e-12, "top left"); let mut x = [6.0, -5.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[6.0, -5.0], &x, 1e-10, 1e-12, "inside"); let mut x = [5.0, -6.0]; - ball_inf.project(&mut x); + ball_inf.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array(&[5.0, -6.0], &x, 1e-10, 1e-12, "centre"); } +#[test] +fn t_ball_inf_boundary_no_change() { + let ball_inf = BallInf::new(None, 1.0); + let mut x = [-1.0, 0.2, 1.0]; + let x_expected = x; + ball_inf.project(&mut x).unwrap(); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_is_convex_ball_inf() { let ball_inf = BallInf::new(None, 1.5); @@ -650,7 +769,7 @@ fn t_is_convex_soc() { #[test] fn t_is_convex_zero() { let zero = Zero::new(); - assert!(zero.is_convex()); + assert!(>::is_convex(&zero)); } #[test] @@ -690,7 +809,7 @@ fn t_simplex_projection() { let mut x = [1.0, 2.0, 3.0]; let alpha = 3.0; let my_simplex = Simplex::new(alpha); - my_simplex.project(&mut x); + my_simplex.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal( crate::matrix_operations::sum(&x), alpha, @@ -700,6 +819,29 @@ fn t_simplex_projection() { ); } +#[test] +fn t_simplex_projection_f32() { + let mut x = [1.0_f32, 2.0, 3.0]; + let alpha = 3.0_f32; + let simplex = Simplex::new(alpha); + simplex.project(&mut x).unwrap(); + + let sum = x[0] + x[1] + x[2]; + assert!((sum - alpha).abs() < 1e-5); + assert!(x.iter().all(|&xi| xi >= -1e-6)); +} + +#[test] +fn t_halfspace_boundary_no_change() { + let normal_vector = [1.0, 2.0]; + let offset = 5.0; + let halfspace = Halfspace::new(&normal_vector, offset); + let mut x = [1.0, 2.0]; + let x_expected = x; + halfspace.project(&mut x).unwrap(); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_simplex_projection_random_spam() { let n = 10; @@ -712,8 +854,7 @@ fn t_simplex_projection_random_spam() { let alpha_scale = 20.; let alpha = alpha_scale * rand::random::(); let simplex = Simplex::new(alpha); - simplex.project(&mut x); - println!("x = {:?}", x); + simplex.project(&mut x).unwrap(); assert!(x.iter().all(|&xi| xi >= -1e-12)); unit_test_utils::assert_nearly_equal( crate::matrix_operations::sum(&x), @@ -737,7 +878,7 @@ fn t_simplex_projection_random_optimality() { let alpha = alpha_scale * rand::random::(); let simplex = Simplex::new(alpha); let y = z.clone(); - simplex.project(&mut z); + simplex.project(&mut z).unwrap(); for j in 0..n { let w = alpha * (y[j] - z[j]) - crate::matrix_operations::inner_product(&z, &y) + crate::matrix_operations::norm2_squared(&z); @@ -769,7 +910,7 @@ fn t_simplex_alpha_negative() { fn t_simplex_empty_vector() { let simplex = Simplex::new(1.0); let mut x = []; - simplex.project(&mut x); + simplex.project(&mut x).unwrap(); } #[test] @@ -786,7 +927,7 @@ fn t_ball1_random_optimality_conditions() { x.copy_from_slice(&x_star); let radius = 5. * rand::random::(); let ball1 = Ball1::new(None, radius); - ball1.project(&mut x_star); + ball1.project(&mut x_star).unwrap(); // make sure |x|_1 <= radius assert!( crate::matrix_operations::norm1(&x_star) <= radius * (1. + 1e-9), @@ -820,6 +961,17 @@ fn t_ball1_random_optimality_conditions() { } } +#[test] +fn t_ball1_projection_f32() { + let ball1 = Ball1::new(None, 1.0_f32); + let mut x = [2.0_f32, -1.0_f32, 0.0_f32]; + ball1.project(&mut x).unwrap(); + assert!((x[0] - 1.0_f32).abs() < 1e-6_f32); + assert!(x[1].abs() < 1e-6_f32); + assert!(x[2].abs() < 1e-6_f32); + assert!(crate::matrix_operations::norm1(&x) <= 1.0_f32 + 1e-6_f32); +} + #[test] fn t_ball1_random_optimality_conditions_centered() { for n in (10..=60).step_by(10) { @@ -835,13 +987,13 @@ fn t_ball1_random_optimality_conditions_centered() { .for_each(|xi| *xi = scale_xc * (2. * rand::random::() - 1.)); let radius = 5. * rand::random::(); let ball1 = Ball1::new(Some(&xc), radius); - ball1.project(&mut x); + ball1.project(&mut x).unwrap(); // x = x - xc x.iter_mut() .zip(xc.iter()) .for_each(|(xi, &xci)| *xi -= xci); assert!( - crate::matrix_operations::norm1(&x) <= radius * (1. + 1e-9), + crate::matrix_operations::norm1(&x) <= radius * (1. + 1e-9) + 1e-12, "norm(x - xc, 1) > radius" ); } @@ -855,7 +1007,7 @@ fn t_ball1_wrong_dimensions() { let mut x = vec![3.0, 4.0, 5.0]; let radius = 1.0; let ball1 = Ball1::new(Some(&xc), radius); - ball1.project(&mut x); + ball1.project(&mut x).unwrap(); } #[test] @@ -864,8 +1016,8 @@ fn t_sphere2_no_center() { let mut x_out = [1.0, 1.0]; let mut x_in = [-0.3, -0.2]; let unit_sphere = Sphere2::new(None, radius); - unit_sphere.project(&mut x_out); - unit_sphere.project(&mut x_in); + unit_sphere.project(&mut x_out).unwrap(); + unit_sphere.project(&mut x_in).unwrap(); let norm_out = crate::matrix_operations::norm2(&x_out); let norm_in = crate::matrix_operations::norm2(&x_in); unit_test_utils::assert_nearly_equal(radius, norm_out, 1e-10, 1e-12, "norm_out is not 1.0"); @@ -877,7 +1029,7 @@ fn t_sphere2_no_center_projection_of_zero() { let radius = 0.9; let mut x = [0.0, 0.0]; let unit_sphere = Sphere2::new(None, radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let norm_result = crate::matrix_operations::norm2(&x); unit_test_utils::assert_nearly_equal(radius, norm_result, 1e-10, 1e-12, "norm_out is not 1.0"); } @@ -889,7 +1041,7 @@ fn t_sphere2_center() { let mut x = [1.0, 1.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let mut x_minus_c = [0.0; 2]; x.iter() .zip(center.iter()) @@ -909,7 +1061,7 @@ fn t_sphere2_center_projection_of_center() { let mut x = [-3.0, 5.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); let mut x_minus_c = [0.0; 2]; x.iter() .zip(center.iter()) @@ -928,7 +1080,7 @@ fn t_sphere2_empty_vector() { let radius = 1.0; let unit_sphere = Sphere2::new(None, radius); let mut x = []; - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); } #[test] @@ -938,7 +1090,7 @@ fn t_sphere2_center_wrong_dimension() { let center = [1.0, 2.0, 3.0]; let unit_sphere = Sphere2::new(Some(¢er), radius); let mut x = [1.0, 2.0]; - unit_sphere.project(&mut x); + unit_sphere.project(&mut x).unwrap(); } #[test] @@ -951,8 +1103,8 @@ fn t_ball1_alpha_negative() { fn t_epigraph_squared_norm_inside() { let epi = EpigraphSquaredNorm::new(); let mut x = [1., 2., 10.]; - let x_correct = x.clone(); - epi.project(&mut x); + epi.project(&mut x).unwrap(); + let x_correct = x; unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -962,13 +1114,22 @@ fn t_epigraph_squared_norm_inside() { ); } +#[test] +fn t_epigraph_squared_norm_boundary_no_change() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0, 2.0, 5.0]; + let x_expected = x; + epi.project(&mut x).unwrap(); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + #[test] fn t_epigraph_squared_norm() { let epi = EpigraphSquaredNorm::new(); for i in 0..100 { let t = 0.01 * i as f64; let mut x = [1., 2., 3., t]; - epi.project(&mut x); + epi.project(&mut x).unwrap(); let err = (matrix_operations::norm2_squared(&x[..3]) - x[3]).abs(); assert!(err < 1e-10, "wrong projection on epigraph of squared norm"); } @@ -980,11 +1141,11 @@ fn t_epigraph_squared_norm_correctness() { let mut x = [1., 2., 3., 4.]; let x_correct = [ 0.560142228903570, - 1.120284457807140, + 1.120_284_457_807_14, 1.680426686710711, 4.392630432414829, ]; - epi.project(&mut x); + epi.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x_correct, &x, @@ -994,6 +1155,15 @@ fn t_epigraph_squared_norm_correctness() { ); } +#[test] +fn t_epigraph_squared_norm_f32() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0_f32, 0.0, 0.0]; + epi.project(&mut x).unwrap(); + let err = (matrix_operations::norm2_squared(&x[..2]) - x[2]).abs(); + assert!(err < 1e-4); +} + #[test] fn t_affine_space() { let a = vec![ @@ -1002,11 +1172,11 @@ fn t_affine_space() { let b = vec![1., 2., -0.5]; let affine_set = AffineSpace::new(a, b); let mut x = [1., -2., -0.3, 0.5]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let x_correct = [ 1.888564346697095, 5.629857182200888, - 1.796204902230790, + 1.796_204_902_230_79, 2.888362906715977, ]; unit_test_utils::assert_nearly_equal_array( @@ -1018,6 +1188,56 @@ fn t_affine_space() { ); } +#[test] +fn t_affine_space_f32() { + let a = vec![ + 0.5_f32, 0.1, 0.2, -0.3, -0.6, 0.3, 0.0, 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1.0_f32, 2.0, -0.5]; + let affine_set = AffineSpace::new(a.clone(), b.clone()); + let mut x = [1.0_f32, -2.0, -0.3, 0.5]; + affine_set.project(&mut x).unwrap(); + + let x_correct = [ + 1.888_564_3_f32, + 5.629_857_f32, + 1.796_204_9_f32, + 2.888_363_f32, + ]; + assert!((x[0] - x_correct[0]).abs() < 1e-4_f32); + assert!((x[1] - x_correct[1]).abs() < 1e-4_f32); + assert!((x[2] - x_correct[2]).abs() < 1e-4_f32); + assert!((x[3] - x_correct[3]).abs() < 1e-4_f32); + + for (row, bi) in a.chunks_exact(4).zip(b.iter()) { + let ax_i = row + .iter() + .zip(x.iter()) + .fold(0.0_f32, |sum, (aij, xj)| sum + (*aij) * (*xj)); + assert!((ax_i - *bi).abs() < 1e-4_f32); + } +} + +#[test] +fn t_affine_space_projection_feasibility() { + let a = vec![ + 0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::new(a.clone(), b.clone()); + let mut x = [1., -2., -0.3, 0.5]; + affine_set.project(&mut x).unwrap(); + let residual = [ + a[0] * x[0] + a[1] * x[1] + a[2] * x[2] + a[3] * x[3] - b[0], + a[4] * x[0] + a[5] * x[1] + a[6] * x[2] + a[7] * x[3] - b[1], + a[8] * x[0] + a[9] * x[1] + a[10] * x[2] + a[11] * x[3] - b[2], + ]; + assert!( + crate::matrix_operations::norm_inf(&residual) <= 1e-10, + "projection does not satisfy Ax = b" + ); +} + #[test] fn t_affine_space_larger() { let a = vec![ @@ -1026,7 +1246,7 @@ fn t_affine_space_larger() { let b = vec![1., -2., 3., 4.]; let affine_set = AffineSpace::new(a, b); let mut x = [10., 11., -9., 4., 5.]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let x_correct = [ 9.238095238095237, -0.714285714285714, @@ -1049,11 +1269,48 @@ fn t_affine_space_single_row() { let b = vec![1.]; let affine_set = AffineSpace::new(a, b); let mut x = [5., 6., 10., 25.]; - affine_set.project(&mut x); + affine_set.project(&mut x).unwrap(); let s = x.iter().sum(); unit_test_utils::assert_nearly_equal(1., s, 1e-12, 1e-14, "wrong sum"); } +#[test] +fn t_affine_space_try_new() { + let a = vec![ + 0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4, + ]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::try_new(a, b); + assert!(affine_set.is_ok(), "try_new should succeed on valid data"); +} + +#[test] +fn t_affine_space_try_new_empty_b() { + let a = vec![1.0, 2.0]; + let b = vec![]; + let affine_set = AffineSpace::::try_new(a, b); + assert!(matches!(affine_set, Err(AffineSpaceError::EmptyB))); +} + +#[test] +fn t_affine_space_try_new_wrong_dimensions() { + let a = vec![0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0]; + let b = vec![1., 2., -0.5]; + let affine_set = AffineSpace::try_new(a, b); + assert!(matches!( + affine_set, + Err(AffineSpaceError::IncompatibleDimensions) + )); +} + +#[test] +fn t_affine_space_try_new_rank_deficient() { + let a = vec![1.0, 2.0, 2.0, 4.0]; + let b = vec![1.0, 2.0]; + let affine_set = AffineSpace::try_new(a, b); + assert!(matches!(affine_set, Err(AffineSpaceError::NotFullRowRank))); +} + #[test] #[should_panic] fn t_affine_space_wrong_dimensions() { @@ -1188,29 +1445,443 @@ fn is_norm_p_projection( true } +fn is_norm_p_projection_with_tol( + x: &[f64], + x_candidate_proj: &[f64], + p: f64, + radius: f64, + sample_points: usize, + feasibility_tol: f64, + inner_prod_tol: f64, +) -> bool { + let n = x.len(); + assert_eq!(n, x_candidate_proj.len()); + + let norm_proj = x_candidate_proj + .iter() + .map(|xi| xi.abs().powf(p)) + .sum::() + .powf(1.0 / p); + if norm_proj > radius + feasibility_tol { + return false; + } + + let e: Vec = x + .iter() + .zip(x_candidate_proj.iter()) + .map(|(xi, yi)| xi - yi) + .collect(); + let samples = sample_lp_sphere(sample_points, n, p, radius); + for xi in samples.iter() { + let w: Vec = x_candidate_proj + .iter() + .zip(xi.iter()) + .map(|(xproj_i, xi_i)| xproj_i - xi_i) + .collect(); + let inner = matrix_operations::inner_product(&w, &e); + if inner < -inner_prod_tol { + return false; + } + } + true +} + +fn as_f64_vec(x: &[T]) -> Vec { + x.iter() + .map(|xi| { + xi.to_f64() + .expect("test float values must be convertible to f64") + }) + .collect() +} + +fn lp_norm_generic(x: &[T], p: T) -> T { + x.iter() + .map(|xi| xi.abs().powf(p)) + .fold(T::zero(), |sum, xi| sum + xi) + .powf(T::one() / p) +} + +fn random_vec(rng: &mut impl rand::Rng, len: usize, lower: f64, upper: f64) -> Vec { + (0..len) + .map(|_| cast::(rng.random_range(lower..upper))) + .collect() +} + +fn run_ballp_random_properties() +where + T: Float + ToPrimitive, +{ + let mut rng = rand::rng(); + let solver_tol = if T::epsilon() > cast::(1e-10) { + cast::(1e-5) + } else { + cast::(1e-12) + }; + let feasibility_tol = if T::epsilon() > cast::(1e-10) { + cast::(5e-3) + } else { + cast::(1e-8) + }; + let idempotence_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let inner_prod_tol = if T::epsilon() > cast::(1e-10) { + 5e-3 + } else { + 1e-8 + }; + + for &(dim, p_f64, radius_f64, with_center) in &[ + (3_usize, 1.7_f64, 1.1_f64, false), + (4_usize, 2.5_f64, 0.9_f64, true), + (5_usize, 3.4_f64, 1.4_f64, true), + ] { + for _ in 0..40 { + let center = with_center.then(|| random_vec::(&mut rng, dim, -1.5, 1.5)); + let mut x = random_vec::(&mut rng, dim, -4.0, 4.0); + let x_before = x.clone(); + let p = cast::(p_f64); + let radius = cast::(radius_f64); + let ball = BallP::new(center.as_deref(), radius, p, solver_tol, 300); + ball.project(&mut x).unwrap(); + + let shifted_projection: Vec = if let Some(center) = center.as_ref() { + x.iter() + .zip(center.iter()) + .map(|(xi, ci)| *xi - *ci) + .collect() + } else { + x.clone() + }; + let proj_norm = lp_norm_generic(&shifted_projection, p); + assert!( + proj_norm <= radius + feasibility_tol, + "projected point is not feasible for BallP" + ); + + let mut reproj = x.clone(); + ball.project(&mut reproj).unwrap(); + let max_reproj_diff = reproj + .iter() + .zip(x.iter()) + .fold(T::zero(), |acc, (a, b)| acc.max((*a - *b).abs())); + assert!( + max_reproj_diff <= idempotence_tol, + "BallP projection is not idempotent within tolerance" + ); + + let shifted_x_before: Vec = if let Some(center) = center.as_ref() { + x_before + .iter() + .zip(center.iter()) + .map(|(xi, ci)| { + (*xi - *ci) + .to_f64() + .expect("test float values must be convertible to f64") + }) + .collect() + } else { + as_f64_vec(&x_before) + }; + let shifted_projection_f64 = as_f64_vec(&shifted_projection); + assert!( + is_norm_p_projection_with_tol( + &shifted_x_before, + &shifted_projection_f64, + p_f64, + radius_f64, + 500, + feasibility_tol + .to_f64() + .expect("test float values must be convertible to f64"), + inner_prod_tol, + ), + "BallP projection failed sampled optimality check" + ); + } + } +} + +fn run_epigraph_squared_norm_random_properties() +where + T: Float + roots::FloatType + std::iter::Sum + ToPrimitive, +{ + let mut rng = rand::rng(); + let feasibility_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let idempotence_tol = if T::epsilon() > cast::(1e-10) { + cast::(2e-4) + } else { + cast::(1e-10) + }; + let vi_tol = if T::epsilon() > cast::(1e-10) { + 2e-3 + } else { + 1e-8 + }; + let epi = EpigraphSquaredNorm::new(); + + for dim in 2..=5 { + for _ in 0..50 { + let mut x = random_vec::(&mut rng, dim, -3.0, 3.0); + x.push(cast::(rng.random_range(-2.0..4.0))); + let x_before = as_f64_vec(&x); + + epi.project(&mut x).unwrap(); + + let z = &x[..dim]; + let t = x[dim]; + let norm_z_sq = matrix_operations::norm2_squared(z); + assert!( + norm_z_sq <= t + feasibility_tol, + "Epigraph projection is not feasible" + ); + + let mut reproj = x.clone(); + epi.project(&mut reproj).unwrap(); + let max_reproj_diff = reproj + .iter() + .zip(x.iter()) + .fold(T::neg_infinity(), |acc, (a, b)| { + acc.max(num::Float::abs(*a - *b)) + }); + assert!( + max_reproj_diff <= idempotence_tol, + "Epigraph projection is not idempotent within tolerance" + ); + + let proj_f64 = as_f64_vec(&x); + let residual: Vec = x_before + .iter() + .zip(proj_f64.iter()) + .map(|(xb, xp)| xb - xp) + .collect(); + + for _ in 0..150 { + let z_feasible: Vec = (0..dim).map(|_| rng.random_range(-3.0..3.0)).collect(); + let norm_z_sq_feasible = z_feasible.iter().map(|zi| zi * zi).sum::(); + let t_feasible = norm_z_sq_feasible + rng.random_range(0.0..3.0); + let mut y = z_feasible; + y.push(t_feasible); + let diff: Vec = proj_f64 + .iter() + .zip(y.iter()) + .map(|(xp, yi)| xp - yi) + .collect(); + let inner = matrix_operations::inner_product(&diff, &residual); + assert!( + inner >= -vi_tol, + "Epigraph projection failed sampled variational inequality" + ); + } + } + } +} + +fn assert_projection_idempotent(constraint: &C, x0: &[f64], message: &'static str) { + let mut once = x0.to_vec(); + let mut twice = x0.to_vec(); + constraint.project(&mut once).unwrap(); + constraint.project(&mut twice).unwrap(); + constraint.project(&mut twice).unwrap(); + unit_test_utils::assert_nearly_equal_array(&once, &twice, 1e-10, 1e-12, message); +} + #[test] fn t_ballp_at_origin_projection() { let radius = 0.8; let mut x = [1.0, -1.0, 6.0]; - let x0 = x.clone(); + let x0 = x; let p = 3.; let tol = 1e-16; let max_iters: usize = 200; let ball = BallP::new(None, radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000)); } +#[test] +fn t_ballp_at_origin_projection_preserves_signs() { + let radius = 0.9; + let mut x = [1.0, -3.0, 2.5, -0.7]; + let x0 = x; + let ball = BallP::new(None, radius, 3.0, 1e-14, 200); + ball.project(&mut x).unwrap(); + for (proj, original) in x.iter().zip(x0.iter()) { + assert!(proj.abs() <= original.abs() + 1e-12); + if *original != 0.0 { + assert_eq!(proj.signum(), original.signum()); + } + } +} + +#[test] +fn t_ballp_zero_coordinates_branch() { + let radius = 0.7; + let p = 3.5; + let mut x = [0.0, -2.0, 0.0, 1.5]; + let x0 = x; + let ball = BallP::new(None, radius, p, 1e-14, 300); + ball.project(&mut x).unwrap(); + assert_eq!(x[0], 0.0); + assert_eq!(x[2], 0.0); + assert!(is_norm_p_projection(&x0, &x, p, radius, 10_000)); +} + +#[test] +fn t_ballp_outside_projection_lands_on_boundary_for_multiple_p() { + let test_cases = [ + (1.1, [2.0, -1.0, 0.5]), + (1.5, [1.0, -2.0, 3.0]), + (2.5, [3.0, -4.0, 1.0]), + (10.0, [1.2, -0.7, 2.1]), + ]; + let radius = 0.8; + + for (p, x_init) in test_cases { + let mut x = x_init; + let ball = BallP::new(None, radius, p, 1e-14, 400); + ball.project(&mut x).unwrap(); + let norm_p = x + .iter() + .map(|xi| xi.abs().powf(p)) + .sum::() + .powf(1.0 / p); + unit_test_utils::assert_nearly_equal( + radius, + norm_p, + 1e-9, + 1e-11, + "projection should lie on the boundary", + ); + } +} + +#[test] +fn t_ballp_boundary_no_change() { + let radius = 1.0; + let p = 4.0; + let mut x = [1.0, 0.0]; + let x_expected = x; + let ball = BallP::new(None, radius, p, 1e-14, 200); + ball.project(&mut x).unwrap(); + unit_test_utils::assert_nearly_equal_array(&x_expected, &x, 1e-12, 1e-12, "wrong result"); +} + +#[test] +fn t_ballp_translated_projection_multiple_p_values() { + let center = [1.0, -2.0, 0.5]; + let radius = 0.9; + let cases = [ + (1.1, [3.0, -4.0, 2.0]), + (1.5, [2.5, -0.5, 1.8]), + (2.5, [4.0, -3.5, -1.0]), + (10.0, [1.8, 0.5, 3.0]), + ]; + + for (p, x_init) in cases { + let mut x = x_init; + let ball = BallP::new(Some(¢er), radius, p, 1e-14, 400); + ball.project(&mut x).unwrap(); + let norm_p = x + .iter() + .zip(center.iter()) + .map(|(xi, ci)| (xi - ci).abs().powf(p)) + .sum::() + .powf(1.0 / p); + unit_test_utils::assert_nearly_equal( + radius, + norm_p, + 1e-9, + 1e-11, + "translated lp projection should lie on the boundary", + ); + } +} + +#[test] +fn t_halfspace_projection_is_idempotent() { + let normal_vector = [1.0, 2.0]; + let halfspace = Halfspace::new(&normal_vector, 1.0); + assert_projection_idempotent( + &halfspace, + &[-1.0, 3.0], + "halfspace projection not idempotent", + ); +} + +#[test] +fn t_rectangle_projection_is_idempotent() { + let xmin = [-1.0, 0.0, -2.0]; + let xmax = [1.0, 2.0, 0.5]; + let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + assert_projection_idempotent( + &rectangle, + &[-10.0, 1.5, 3.0], + "rectangle projection not idempotent", + ); +} + +#[test] +fn t_ball2_projection_is_idempotent() { + let center = [0.5, -1.0]; + let ball = Ball2::new(Some(¢er), 0.8); + assert_projection_idempotent(&ball, &[3.0, 2.0], "ball2 projection not idempotent"); +} + +#[test] +fn t_ball_inf_projection_is_idempotent() { + let center = [2.0, -3.0]; + let ball_inf = BallInf::new(Some(¢er), 1.2); + assert_projection_idempotent(&ball_inf, &[10.0, 1.0], "ballinf projection not idempotent"); +} + +#[test] +fn t_affine_space_projection_is_idempotent() { + let a = vec![1.0, 1.0, 0.0, 1.0, -1.0, 2.0]; + let b = vec![1.0, 0.5]; + let affine_set = AffineSpace::new(a, b); + assert_projection_idempotent( + &affine_set, + &[3.0, -2.0, 4.0], + "affine-space projection not idempotent", + ); +} + +#[test] +fn t_sphere2_projection_is_idempotent() { + let center = [1.0, 1.0, -1.0]; + let sphere = Sphere2::new(Some(¢er), 2.0); + assert_projection_idempotent( + &sphere, + &[4.0, -2.0, 3.0], + "sphere projection not idempotent", + ); +} + +#[test] +fn t_ballp_projection_is_idempotent() { + let center = [0.0, 1.0, -1.0]; + let ball = BallP::new(Some(¢er), 0.75, 3.0, 1e-14, 300); + assert_projection_idempotent(&ball, &[2.0, -3.0, 1.5], "ballp projection not idempotent"); +} + #[test] fn t_ballp_at_origin_x_already_inside() { let radius = 1.5; let mut x = [0.5, -0.2, 0.1]; - let x0 = x.clone(); + let x0 = x; let p = 3.; let tol = 1e-16; let max_iters: usize = 1200; let ball = BallP::new(None, radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); unit_test_utils::assert_nearly_equal_array( &x0, &x, @@ -1229,13 +1900,13 @@ fn t_ballp_at_xc_projection() { let tol = 1e-16; let max_iters: usize = 200; let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); - ball.project(&mut x); + ball.project(&mut x).unwrap(); - let nrm = (x - .iter() - .zip(x_center.iter()) - .fold(0.0, |s, (x, y)| (*x - *y).abs().powf(p) + s)) - .powf(1. / p); + let nrm: f64 = (x.iter().zip(x_center.iter()).fold(0.0_f64, |s, (x, y)| { + let diff: f64 = *x - *y; + diff.abs().powf(p) + s + })) + .powf(1.0_f64 / p); unit_test_utils::assert_nearly_equal(radius, nrm, 1e-10, 1e-12, "wrong distance to lp-ball"); let proj_expected = [0.5178727276722618, 2.2277981662325224]; @@ -1247,3 +1918,232 @@ fn t_ballp_at_xc_projection() { "wrong projection on lp-ball centered at xc != 0", ); } + +#[test] +fn t_ballp_at_xc_projection_f32() { + let radius = 0.8_f32; + let mut x = [0.0_f32, 0.1]; + let x_center = [1.0_f32, 3.0]; + let p = 4.0_f32; + let tol = 1e-6_f32; + let max_iters: usize = 200; + let ball = BallP::new(Some(&x_center), radius, p, tol, max_iters); + ball.project(&mut x).unwrap(); + + let nrm = x + .iter() + .zip(x_center.iter()) + .fold(0.0_f32, |s, (xi, yi)| s + (*xi - *yi).abs().powf(p)) + .powf(1.0_f32 / p); + assert!((radius - nrm).abs() < 1e-4_f32); + + let proj_expected = [0.517_872_75_f32, 2.227_798_2_f32]; + assert!((x[0] - proj_expected[0]).abs() < 1e-4_f32); + assert!((x[1] - proj_expected[1]).abs() < 1e-4_f32); +} + +#[test] +fn t_ballp_random_properties_f64() { + run_ballp_random_properties::(); +} + +#[test] +fn t_ballp_random_properties_f32() { + run_ballp_random_properties::(); +} + +#[test] +fn t_epigraph_squared_norm_random_properties_f64() { + run_epigraph_squared_norm_random_properties::(); +} + +#[test] +fn t_epigraph_squared_norm_random_properties_f32() { + run_epigraph_squared_norm_random_properties::(); +} + +#[test] +#[should_panic] +fn t_rectangle_no_bounds() { + let _rectangle = Rectangle::::new(None, None); +} + +#[test] +#[should_panic] +fn t_rectangle_only_xmin_wrong_dimension() { + let xmin = [1.0, 2.0, 3.0]; + let rectangle = Rectangle::new(Some(&xmin), None); + let mut x = [0.0, 1.0]; + rectangle.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_rectangle_only_xmax_wrong_dimension() { + let xmax = [1.0, 2.0, 3.0]; + let rectangle = Rectangle::new(None, Some(&xmax)); + let mut x = [0.0, 1.0]; + rectangle.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_halfspace_wrong_dimension() { + let normal_vector = [1.0, 2.0, 3.0]; + let halfspace = Halfspace::new(&normal_vector, 1.0); + let mut x = [1.0, 2.0]; + halfspace.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_ball2_wrong_dimensions() { + let center = [1.0, 2.0]; + let ball = Ball2::new(Some(¢er), 1.0); + let mut x = [1.0, 2.0, 3.0]; + ball.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_ball2_nonpositive_radius() { + let _ball = Ball2::new(None, 0.0); +} + +#[test] +#[should_panic] +fn t_ball_inf_wrong_dimensions() { + let center = [1.0, 2.0]; + let ball_inf = BallInf::new(Some(¢er), 1.0); + let mut x = [1.0, 2.0, 3.0]; + ball_inf.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_ball_inf_nonpositive_radius() { + let _ball_inf = BallInf::new(None, 0.0); +} + +#[test] +#[should_panic] +fn t_epigraph_squared_norm_short_vector() { + let epi = EpigraphSquaredNorm::new(); + let mut x = [1.0]; + epi.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_affine_space_empty_b() { + let _affine_set = AffineSpace::new(vec![1.0, 2.0], vec![]); +} + +#[test] +#[should_panic] +fn t_affine_space_project_wrong_dimension() { + let a = vec![1.0, 0.0, 0.0, 1.0]; + let b = vec![0.0, 0.0]; + let affine_set = AffineSpace::new(a, b); + let mut x = [1.0]; + affine_set.project(&mut x).unwrap(); +} + +#[test] +#[should_panic] +fn t_affine_space_rank_deficient_matrix() { + let a = vec![1.0, 2.0, 1.0, 2.0]; + let b = vec![1.0, 1.0]; + let _affine_set = AffineSpace::new(a, b); +} + +#[test] +fn t_is_convex_sphere2() { + let sphere = Sphere2::new(None, 1.0); + assert!(!sphere.is_convex()); +} + +#[test] +fn t_is_convex_no_constraints() { + let whole_space = NoConstraints::new(); + assert!(>::is_convex(&whole_space)); +} + +#[test] +fn t_is_convex_rectangle() { + let xmin = [-1.0, -2.0]; + let xmax = [1.0, 2.0]; + let rectangle = Rectangle::new(Some(&xmin), Some(&xmax)); + assert!(rectangle.is_convex()); +} + +#[test] +fn t_is_convex_simplex() { + let simplex = Simplex::new(1.0); + assert!(simplex.is_convex()); +} + +#[test] +fn t_is_convex_ball1() { + let ball1 = Ball1::new(None, 1.0); + assert!(ball1.is_convex()); +} + +#[test] +fn t_is_convex_ballp() { + let ballp = BallP::new(None, 1.0, 3.0, 1e-12, 100); + assert!(ballp.is_convex()); +} + +#[test] +fn t_is_convex_epigraph_squared_norm() { + let epi = EpigraphSquaredNorm::new(); + assert!(>::is_convex(&epi)); +} + +#[test] +fn t_is_convex_affine_space() { + let a = vec![1.0, 0.0, 0.0, 1.0]; + let b = vec![1.0, -1.0]; + let affine_set = AffineSpace::new(a, b); + assert!(affine_set.is_convex()); +} + +#[test] +#[should_panic] +fn t_ballp_nonpositive_radius() { + let _ballp = BallP::new(None, 0.0, 2.0, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_exponent_too_small() { + let _ballp = BallP::new(None, 1.0, 1.0, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_nonfinite_exponent() { + let _ballp = BallP::new(None, 1.0, f64::INFINITY, 1e-12, 100); +} + +#[test] +#[should_panic] +fn t_ballp_nonpositive_tolerance() { + let _ballp = BallP::new(None, 1.0, 2.0, 0.0, 100); +} + +#[test] +#[should_panic] +fn t_ballp_zero_max_iters() { + let _ballp = BallP::new(None, 1.0, 2.0, 1e-12, 0); +} + +#[test] +#[should_panic] +fn t_ballp_wrong_dimensions() { + let center = [1.0, 2.0]; + let ballp = BallP::new(Some(¢er), 1.0, 3.0, 1e-12, 100); + let mut x = [1.0, 2.0, 3.0]; + ballp.project(&mut x).unwrap(); +} diff --git a/rust/src/constraints/zero.rs b/rust/src/constraints/zero.rs new file mode 100644 index 00000000..c9a7e503 --- /dev/null +++ b/rust/src/constraints/zero.rs @@ -0,0 +1,38 @@ +use super::Constraint; +use crate::FunctionCallResult; +use num::Float; + +#[derive(Clone, Copy, Default)] +/// Set Zero, $\\{0\\}$ +pub struct Zero {} + +impl Zero { + /// Constructs new instance of `Zero` + /// + /// # Example + /// + /// ``` + /// use optimization_engine::constraints::{Constraint, Zero}; + /// + /// let zero = Zero::new(); + /// let mut x = [1.0, -2.0, 3.0]; + /// zero.project(&mut x).unwrap(); + /// ``` + #[must_use] + pub fn new() -> Self { + Zero {} + } +} + +impl Constraint for Zero { + /// Computes the projection on $\\{0\\}$, that is, $\Pi_{\\{0\\}}(x) = 0$ + /// for all $x$ + fn project(&self, x: &mut [T]) -> FunctionCallResult { + x.iter_mut().for_each(|xi| *xi = T::zero()); + Ok(()) + } + + fn is_convex(&self) -> bool { + true + } +} diff --git a/src/core/fbs/fbs_cache.rs b/rust/src/core/fbs/fbs_cache.rs similarity index 69% rename from src/core/fbs/fbs_cache.rs rename to rust/src/core/fbs/fbs_cache.rs index 83dd61bb..546c2dc6 100644 --- a/src/core/fbs/fbs_cache.rs +++ b/rust/src/core/fbs/fbs_cache.rs @@ -1,19 +1,23 @@ //! FBS Cache //! +use num::Float; use std::num::NonZeroUsize; /// Cache for the forward-backward splitting (FBS), or projected gradient, algorithm /// /// This struct allocates memory needed for the FBS algorithm -pub struct FBSCache { - pub(crate) work_gradient_u: Vec, - pub(crate) work_u_previous: Vec, - pub(crate) gamma: f64, - pub(crate) tolerance: f64, - pub(crate) norm_fpr: f64, +pub struct FBSCache +where + T: Float, +{ + pub(crate) work_gradient_u: Vec, + pub(crate) work_u_previous: Vec, + pub(crate) gamma: T, + pub(crate) tolerance: T, + pub(crate) norm_fpr: T, } -impl FBSCache { +impl FBSCache { /// Construct a new instance of `FBSCache` /// /// ## Arguments @@ -37,13 +41,13 @@ impl FBSCache { /// This method will panic if there is no available memory for the required allocation /// (capacity overflow) /// - pub fn new(n: NonZeroUsize, gamma: f64, tolerance: f64) -> FBSCache { + pub fn new(n: NonZeroUsize, gamma: T, tolerance: T) -> FBSCache { FBSCache { - work_gradient_u: vec![0.0; n.get()], - work_u_previous: vec![0.0; n.get()], + work_gradient_u: vec![T::zero(); n.get()], + work_u_previous: vec![T::zero(); n.get()], gamma, tolerance, - norm_fpr: f64::INFINITY, + norm_fpr: T::infinity(), } } } diff --git a/rust/src/core/fbs/fbs_engine.rs b/rust/src/core/fbs/fbs_engine.rs new file mode 100644 index 00000000..a7a38f32 --- /dev/null +++ b/rust/src/core/fbs/fbs_engine.rs @@ -0,0 +1,110 @@ +//! FBS Engine +//! +use crate::{ + constraints, + core::{fbs::FBSCache, AlgorithmEngine, Problem}, + matrix_operations, FunctionCallResult, SolverError, +}; +use num::Float; + +/// The FBE Engine defines the steps of the FBE algorithm and the termination criterion +/// +pub struct FBSEngine<'a, GradientType, ConstraintType, CostType, T = f64> +where + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, +{ + pub(crate) problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + pub(crate) cache: &'a mut FBSCache, +} + +impl<'a, GradientType, ConstraintType, CostType, T> + FBSEngine<'a, GradientType, ConstraintType, CostType, T> +where + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, +{ + /// Constructor for instances of `FBSEngine` + /// + /// ## Arguments + /// + /// - `problem` problem definition (cost function, gradient of the cost, constraints) + /// - mutable reference to a `cache` a cache (which is created once); the cache is reuseable + /// + /// ## Returns + /// + /// An new instance of `FBSEngine` + pub fn new( + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut FBSCache, + ) -> FBSEngine<'a, GradientType, ConstraintType, CostType, T> { + FBSEngine { problem, cache } + } + + fn gradient_step(&mut self, u_current: &mut [T]) -> FunctionCallResult { + (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u)?; + if !crate::matrix_operations::is_finite(&self.cache.work_gradient_u) { + return Err(SolverError::NotFiniteComputation( + "gradient evaluation returned a non-finite value during an FBS step", + )); + } + + // take a gradient step: u_currect -= gamma * gradient + u_current + .iter_mut() + .zip(self.cache.work_gradient_u.iter()) + .for_each(|(u, w)| *u = *u - self.cache.gamma * *w); + Ok(()) + } + + fn projection_step(&mut self, u_current: &mut [T]) -> FunctionCallResult { + self.problem.constraints.project(u_current) + } +} + +impl<'a, GradientType, ConstraintType, CostType, T> AlgorithmEngine + for FBSEngine<'a, GradientType, ConstraintType, CostType, T> +where + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'a, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'a, + ConstraintType: constraints::Constraint + 'a, +{ + /// Take a forward-backward step and check whether the algorithm should terminate + /// + /// ## Arguments + /// + /// - `u_current` the current mutable + /// + /// ## Returns + /// + /// - A boolean flag which is`true` if and only if the algorithm should not + /// terminate + /// + /// ## Panics + /// + /// The method may panick if the computation of the gradient of the cost function + /// or the cost function panics. + fn step(&mut self, u_current: &mut [T]) -> Result { + self.cache.work_u_previous.copy_from_slice(u_current); // cache the previous step + self.gradient_step(u_current)?; // compute the gradient + self.projection_step(u_current)?; // project + if !crate::matrix_operations::is_finite(u_current) { + return Err(SolverError::NotFiniteComputation( + "projected iterate contains a non-finite value during an FBS step", + )); + } + self.cache.norm_fpr = + matrix_operations::norm_inf_diff(u_current, &self.cache.work_u_previous); + + Ok(self.cache.norm_fpr > self.cache.tolerance) + } + + fn init(&mut self, _u_current: &mut [T]) -> FunctionCallResult { + Ok(()) + } +} diff --git a/src/core/fbs/fbs_optimizer.rs b/rust/src/core/fbs/fbs_optimizer.rs similarity index 66% rename from src/core/fbs/fbs_optimizer.rs rename to rust/src/core/fbs/fbs_optimizer.rs index d714ab67..076a750a 100644 --- a/src/core/fbs/fbs_optimizer.rs +++ b/rust/src/core/fbs/fbs_optimizer.rs @@ -8,6 +8,7 @@ use crate::{ }, matrix_operations, FunctionCallResult, SolverError, }; +use num::Float; use std::time; const MAX_ITER: usize = 100_usize; @@ -22,23 +23,25 @@ const MAX_ITER: usize = 100_usize; /// a different optimization problem. /// /// -pub struct FBSOptimizer<'a, GradientType, ConstraintType, CostType> +pub struct FBSOptimizer<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - fbs_engine: FBSEngine<'a, GradientType, ConstraintType, CostType>, + fbs_engine: FBSEngine<'a, GradientType, ConstraintType, CostType, T>, max_iter: usize, max_duration: Option, } -impl<'a, GradientType, ConstraintType, CostType> - FBSOptimizer<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'a, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'a, + ConstraintType: constraints::Constraint + 'a, { /// Constructs a new instance of `FBSOptimizer` /// @@ -46,9 +49,10 @@ where /// /// - `problem`: problem definition /// - `cache`: instance of `FBSCache` + #[must_use] pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut FBSCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut FBSCache, ) -> Self { FBSOptimizer { fbs_engine: FBSEngine::new(problem, cache), @@ -62,53 +66,48 @@ where /// ## Panics /// /// The method panics if the specified tolerance is not positive + #[must_use] pub fn with_tolerance( self, - tolerance: f64, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { - assert!(tolerance > 0.0); + tolerance: T, + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { + assert!(tolerance > T::zero()); self.fbs_engine.cache.tolerance = tolerance; self } /// Sets the maximum number of iterations + #[must_use] pub fn with_max_iter( mut self, max_iter: usize, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { self.max_iter = max_iter; self } /// Sets the maximum number of iterations + #[must_use] pub fn with_max_duration( mut self, max_duration: time::Duration, - ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType> { + ) -> FBSOptimizer<'a, GradientType, ConstraintType, CostType, T> { self.max_duration = Some(max_duration); self } -} -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for FBSOptimizer<'life, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'life, - ConstraintType: constraints::Constraint + 'life, -{ - fn solve(&mut self, u: &mut [f64]) -> Result { - let now = instant::Instant::now(); + /// Solves the optimization problem for decision variables of scalar type `T`. + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { + let now = web_time::Instant::now(); - // Initialize - propagate error upstream, if any self.fbs_engine.init(u)?; let mut num_iter: usize = 0; let mut step_flag = self.fbs_engine.step(u)?; if let Some(dur) = self.max_duration { - while step_flag && num_iter < self.max_iter && dur <= now.elapsed() { + while step_flag && num_iter < self.max_iter && now.elapsed() <= dur { num_iter += 1; step_flag = self.fbs_engine.step(u)? } @@ -119,15 +118,15 @@ where } } - // cost at the solution [propagate error upstream] - let mut cost_value: f64 = 0.0; + let mut cost_value = T::zero(); (self.fbs_engine.problem.cost)(u, &mut cost_value)?; if !matrix_operations::is_finite(u) || !cost_value.is_finite() { - return Err(SolverError::NotFiniteComputation); + return Err(SolverError::NotFiniteComputation( + "final FBS iterate or cost is non-finite", + )); } - // export solution status Ok(SolverStatus::new( if num_iter < self.max_iter { ExitStatus::Converged @@ -141,3 +140,16 @@ where )) } } + +impl<'life, GradientType, ConstraintType, CostType, T> Optimizer + for FBSOptimizer<'life, GradientType, ConstraintType, CostType, T> +where + T: Float, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'life, + CostType: Fn(&[T], &mut T) -> FunctionCallResult + 'life, + ConstraintType: constraints::Constraint + 'life, +{ + fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { + FBSOptimizer::solve(self, u) + } +} diff --git a/src/core/fbs/mod.rs b/rust/src/core/fbs/mod.rs similarity index 100% rename from src/core/fbs/mod.rs rename to rust/src/core/fbs/mod.rs diff --git a/src/core/fbs/tests.rs b/rust/src/core/fbs/tests.rs similarity index 56% rename from src/core/fbs/tests.rs rename to rust/src/core/fbs/tests.rs index b94425b1..765caff7 100644 --- a/src/core/fbs/tests.rs +++ b/rust/src/core/fbs/tests.rs @@ -9,6 +9,13 @@ const N_DIM: usize = 2; #[cfg(test)] use crate::mocks; +fn solve_with_optimizer_trait( + optimizer: &mut impl crate::core::Optimizer, + u: &mut [f32], +) -> Result, crate::SolverError> { + optimizer.solve(u) +} + #[test] fn t_solve_fbs_hard() { let bounds = constraints::NoConstraints::new(); @@ -48,7 +55,7 @@ fn t_solve_fbs_hard_failure_nan() { let mut u = [-12., -160., 55.]; let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache).with_max_iter(10000); let status = optimizer.solve(&mut u); - assert_eq!(Err(SolverError::NotFiniteComputation), status); + assert!(matches!(status, Err(SolverError::NotFiniteComputation(_)))); } #[test] @@ -114,7 +121,7 @@ fn t_solve_fbs() { assert!(status.has_converged()); assert!(status.norm_fpr() < tolerance); - unit_test_utils::assert_nearly_equal_array(&mocks::SOLUTION_A, &u, 1e-4, 1e-5, "u"); + unit_test_utils::assert_nearly_equal_array(&mocks::solution_a(), &u, 1e-4, 1e-5, "u"); } #[test] @@ -147,3 +154,91 @@ fn t_solve_fbs_many_times() { assert!(status.norm_fpr() < tolerance); } } + +#[test] +fn t_fbs_step_no_constraints_f32() { + let no_constraints = constraints::NoConstraints::new(); + let problem = Problem::new( + &no_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut fbs_engine = FBSEngine::new(problem, &mut fbs_cache); + let mut u = [1.0_f32, 3.0_f32]; + + assert!(fbs_engine.step(&mut u).unwrap()); + assert!((u[0] - 0.5_f32).abs() < 1e-6); + assert!((u[1] - 2.4_f32).abs() < 1e-6); +} + +#[test] +fn t_solve_fbs_f32() { + let radius = 0.2_f32; + let box_constraints = constraints::Ball2::new(None, radius); + let problem = Problem::new( + &box_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut u = [0.0_f32; N_DIM]; + let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache); + + let status = optimizer.solve(&mut u).unwrap(); + + assert!(status.has_converged()); + let expected = crate::mocks::solution_a::(); + assert!(status.norm_fpr() < tolerance); + assert!((u[0] - expected[0]).abs() < 1e-4); + assert!((u[1] - expected[1]).abs() < 1e-4); +} + +#[test] +fn t_solve_fbs_f32_via_optimizer_trait() { + let radius = 0.2_f32; + let box_constraints = constraints::Ball2::new(None, radius); + let problem = Problem::new( + &box_constraints, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad[0] = u[0] + u[1] + 1.0; + grad[1] = u[0] + 2.0 * u[1] - 1.0; + Ok(()) + }, + |u: &[f32], cost: &mut f32| -> FunctionCallResult { + *cost = u[0] * u[0] + 2.0 * u[1] * u[1] + u[0] - u[1] + 3.0; + Ok(()) + }, + ); + let gamma = 0.1_f32; + let tolerance = 1e-6_f32; + + let mut fbs_cache = FBSCache::::new(NonZeroUsize::new(N_DIM).unwrap(), gamma, tolerance); + let mut u = [0.0_f32; N_DIM]; + let mut optimizer = FBSOptimizer::new(problem, &mut fbs_cache); + + let status = solve_with_optimizer_trait(&mut optimizer, &mut u).unwrap(); + + assert!(status.has_converged()); + assert!(status.norm_fpr() < tolerance); +} diff --git a/src/core/mod.rs b/rust/src/core/mod.rs similarity index 54% rename from src/core/mod.rs rename to rust/src/core/mod.rs index 46099e1a..a329bdaa 100644 --- a/src/core/mod.rs +++ b/rust/src/core/mod.rs @@ -3,6 +3,8 @@ //! //! +use num::Float; + pub mod fbs; pub mod panoc; pub mod problem; @@ -29,12 +31,20 @@ pub enum ExitStatus { } /// A general optimizer -pub trait Optimizer { - /// solves a given problem and updates the initial estimate `u` with the solution +pub trait Optimizer +where + T: Float, +{ + /// Solves a given problem and updates the initial estimate `u` with the solution. + /// + /// Returns the solver status on success. /// - /// Returns the solver status + /// If the algorithm cannot proceed because a user callback fails, a + /// projection fails, non-finite values are encountered, or an internal + /// numerical/kernel inconsistency is detected, this method returns + /// `Err(SolverError)`. /// - fn solve(&mut self, u: &mut [f64]) -> Result; + fn solve(&mut self, u: &mut [T]) -> Result, SolverError>; } /// Engine supporting an algorithm @@ -46,10 +56,17 @@ pub trait Optimizer { /// It defines what the algorithm does at every step (see `step`) and whether /// the specified termination criterion is satisfied /// -pub trait AlgorithmEngine { - /// Take a step of the algorithm and return `Ok(true)` only if the iterations should continue - fn step(&mut self, u: &mut [f64]) -> Result; +pub trait AlgorithmEngine { + /// Take a step of the algorithm and return `Ok(true)` only if the iterations should continue. + /// + /// Returns `Err(SolverError)` if a callback or projection fails, if a + /// non-finite value is produced, or if the engine detects an invalid + /// numerical state. + fn step(&mut self, u: &mut [T]) -> Result; - /// Initializes the algorithm - fn init(&mut self, u: &mut [f64]) -> FunctionCallResult; + /// Initializes the algorithm. + /// + /// Returns `Err(SolverError)` if initialization requires evaluating a + /// callback/projection and that operation fails. + fn init(&mut self, u: &mut [T]) -> FunctionCallResult; } diff --git a/src/core/panoc/mod.rs b/rust/src/core/panoc/mod.rs similarity index 100% rename from src/core/panoc/mod.rs rename to rust/src/core/panoc/mod.rs diff --git a/src/core/panoc/panoc_cache.rs b/rust/src/core/panoc/panoc_cache.rs similarity index 60% rename from src/core/panoc/panoc_cache.rs rename to rust/src/core/panoc/panoc_cache.rs index 7711a349..4d4f7ffa 100644 --- a/src/core/panoc/panoc_cache.rs +++ b/rust/src/core/panoc/panoc_cache.rs @@ -1,6 +1,19 @@ -const DEFAULT_SY_EPSILON: f64 = 1e-10; -const DEFAULT_CBFGS_EPSILON: f64 = 1e-8; -const DEFAULT_CBFGS_ALPHA: f64 = 1.0; +use crate::numeric::cast; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; + +fn default_sy_epsilon() -> T { + cast::(1e-10) +} + +fn default_cbfgs_epsilon() -> T { + cast::(1e-8) +} + +fn default_cbfgs_alpha() -> T { + T::one() +} /// Cache for PANOC /// @@ -12,38 +25,44 @@ const DEFAULT_CBFGS_ALPHA: f64 = 1.0; /// Subsequently, a `PANOCEngine` is used to construct an instance of `PANOCAlgorithm` /// #[derive(Debug)] -pub struct PANOCCache { - pub(crate) lbfgs: lbfgs::Lbfgs, - pub(crate) gradient_u: Vec, +pub struct PANOCCache +where + T: Float + LbfgsPrecision + Sum, +{ + pub(crate) lbfgs: lbfgs::Lbfgs, + pub(crate) gradient_u: Vec, /// Stores the gradient of the cost at the previous iteration. This is /// an optional field because it is used (and needs to be allocated) /// only if we need to check the AKKT-specific termination conditions - pub(crate) gradient_u_previous: Option>, - pub(crate) u_half_step: Vec, + pub(crate) gradient_u_previous: Option>, + pub(crate) u_half_step: Vec, /// Keeps track of best point so far - pub(crate) best_u_half_step: Vec, - pub(crate) gradient_step: Vec, - pub(crate) direction_lbfgs: Vec, - pub(crate) u_plus: Vec, - pub(crate) rhs_ls: f64, - pub(crate) lhs_ls: f64, - pub(crate) gamma_fpr: Vec, - pub(crate) gamma: f64, - pub(crate) tolerance: f64, - pub(crate) norm_gamma_fpr: f64, + pub(crate) best_u_half_step: Vec, + pub(crate) gradient_step: Vec, + pub(crate) direction_lbfgs: Vec, + pub(crate) u_plus: Vec, + pub(crate) rhs_ls: T, + pub(crate) lhs_ls: T, + pub(crate) gamma_fpr: Vec, + pub(crate) gamma: T, + pub(crate) tolerance: T, + pub(crate) norm_gamma_fpr: T, /// Keeps track of best FPR so far - pub(crate) best_norm_gamma_fpr: f64, - pub(crate) gradient_u_norm_sq: f64, - pub(crate) gradient_step_u_half_step_diff_norm_sq: f64, - pub(crate) tau: f64, - pub(crate) lipschitz_constant: f64, - pub(crate) sigma: f64, - pub(crate) cost_value: f64, + pub(crate) best_norm_gamma_fpr: T, + pub(crate) gradient_u_norm_sq: T, + pub(crate) gradient_step_u_half_step_diff_norm_sq: T, + pub(crate) tau: T, + pub(crate) lipschitz_constant: T, + pub(crate) sigma: T, + pub(crate) cost_value: T, pub(crate) iteration: usize, - pub(crate) akkt_tolerance: Option, + pub(crate) akkt_tolerance: Option, } -impl PANOCCache { +impl PANOCCache +where + T: Float + LbfgsPrecision + Sum, +{ /// Construct a new instance of `PANOCCache` /// /// ## Arguments @@ -63,36 +82,36 @@ impl PANOCCache { /// /// This constructor allocated memory using `vec!`. /// - /// It allocates a total of `8*problem_size + 2*lbfgs_memory_size*problem_size + 2*lbfgs_memory_size + 11` floats (`f64`) + /// It allocates a total of `8*problem_size + 2*lbfgs_memory_size*problem_size + 2*lbfgs_memory_size + 11` floats of type `T` /// - pub fn new(problem_size: usize, tolerance: f64, lbfgs_memory_size: usize) -> PANOCCache { - assert!(tolerance > 0., "tolerance must be positive"); + pub fn new(problem_size: usize, tolerance: T, lbfgs_memory_size: usize) -> PANOCCache { + assert!(tolerance > T::zero(), "tolerance must be positive"); PANOCCache { - gradient_u: vec![0.0; problem_size], + gradient_u: vec![T::zero(); problem_size], gradient_u_previous: None, - u_half_step: vec![0.0; problem_size], - best_u_half_step: vec![0.0; problem_size], - gamma_fpr: vec![0.0; problem_size], - direction_lbfgs: vec![0.0; problem_size], - gradient_step: vec![0.0; problem_size], - u_plus: vec![0.0; problem_size], - gamma: 0.0, + u_half_step: vec![T::zero(); problem_size], + best_u_half_step: vec![T::zero(); problem_size], + gamma_fpr: vec![T::zero(); problem_size], + direction_lbfgs: vec![T::zero(); problem_size], + gradient_step: vec![T::zero(); problem_size], + u_plus: vec![T::zero(); problem_size], + gamma: T::zero(), tolerance, - norm_gamma_fpr: f64::INFINITY, - best_norm_gamma_fpr: f64::INFINITY, - gradient_u_norm_sq: 0.0, - gradient_step_u_half_step_diff_norm_sq: 0.0, + norm_gamma_fpr: T::infinity(), + best_norm_gamma_fpr: T::infinity(), + gradient_u_norm_sq: T::zero(), + gradient_step_u_half_step_diff_norm_sq: T::zero(), lbfgs: lbfgs::Lbfgs::new(problem_size, lbfgs_memory_size) - .with_cbfgs_alpha(DEFAULT_CBFGS_ALPHA) - .with_cbfgs_epsilon(DEFAULT_CBFGS_EPSILON) - .with_sy_epsilon(DEFAULT_SY_EPSILON), - lhs_ls: 0.0, - rhs_ls: 0.0, - tau: 1.0, - lipschitz_constant: 0.0, - sigma: 0.0, - cost_value: 0.0, + .with_cbfgs_alpha(default_cbfgs_alpha()) + .with_cbfgs_epsilon(default_cbfgs_epsilon()) + .with_sy_epsilon(default_sy_epsilon()), + lhs_ls: T::zero(), + rhs_ls: T::zero(), + tau: T::one(), + lipschitz_constant: T::zero(), + sigma: T::zero(), + cost_value: T::zero(), iteration: 0, akkt_tolerance: None, } @@ -109,10 +128,13 @@ impl PANOCCache { /// /// The method panics if `akkt_tolerance` is nonpositive /// - pub fn set_akkt_tolerance(&mut self, akkt_tolerance: f64) { - assert!(akkt_tolerance > 0.0, "akkt_tolerance must be positive"); + pub fn set_akkt_tolerance(&mut self, akkt_tolerance: T) { + assert!( + akkt_tolerance > T::zero(), + "akkt_tolerance must be positive" + ); self.akkt_tolerance = Some(akkt_tolerance); - self.gradient_u_previous = Some(vec![0.0; self.gradient_step.len()]); + self.gradient_u_previous = Some(vec![T::zero(); self.gradient_step.len()]); } /// Copies the value of the current cost gradient to `gradient_u_previous`, @@ -127,8 +149,8 @@ impl PANOCCache { } /// Computes the AKKT residual which is defined as `||gamma*(fpr + df - df_previous)||` - fn akkt_residual(&self) -> f64 { - let mut r = 0.0; + fn akkt_residual(&self) -> T { + let mut r = T::zero(); if let Some(df_previous) = &self.gradient_u_previous { // Notation: gamma_fpr_i is the i-th element of gamma_fpr = gamma * fpr, // df_i is the i-th element of the gradient of the cost function at the @@ -139,9 +161,8 @@ impl PANOCCache { .iter() .zip(self.gradient_u.iter()) .zip(df_previous.iter()) - .fold(0.0, |mut sum, ((&gamma_fpr_i, &df_i), &dfp_i)| { - sum += (gamma_fpr_i + self.gamma * (df_i - dfp_i)).powi(2); - sum + .fold(T::zero(), |sum, ((&gamma_fpr_i, &df_i), &dfp_i)| { + sum + (gamma_fpr_i + self.gamma * (df_i - dfp_i)).powi(2) }) .sqrt(); } @@ -185,19 +206,19 @@ impl PANOCCache { /// and `gamma` to 0.0 pub fn reset(&mut self) { self.lbfgs.reset(); - self.best_u_half_step.fill(0.0); - self.best_norm_gamma_fpr = f64::INFINITY; - self.norm_gamma_fpr = f64::INFINITY; - self.gradient_u_norm_sq = 0.0; - self.gradient_step_u_half_step_diff_norm_sq = 0.0; - self.lhs_ls = 0.0; - self.rhs_ls = 0.0; - self.tau = 1.0; - self.lipschitz_constant = 0.0; - self.sigma = 0.0; - self.cost_value = 0.0; + self.best_u_half_step.fill(T::zero()); + self.best_norm_gamma_fpr = T::infinity(); + self.norm_gamma_fpr = T::infinity(); + self.gradient_u_norm_sq = T::zero(); + self.gradient_step_u_half_step_diff_norm_sq = T::zero(); + self.lhs_ls = T::zero(); + self.rhs_ls = T::zero(); + self.tau = T::one(); + self.lipschitz_constant = T::zero(); + self.sigma = T::zero(); + self.cost_value = T::zero(); self.iteration = 0; - self.gamma = 0.0; + self.gamma = T::zero(); } /// Store the current half step if it improves the best fixed-point residual so far. @@ -225,7 +246,8 @@ impl PANOCCache { /// The method panics if alpha or epsilon are nonpositive and if sy_epsilon /// is negative. /// - pub fn with_cbfgs_parameters(mut self, alpha: f64, epsilon: f64, sy_epsilon: f64) -> Self { + #[must_use] + pub fn with_cbfgs_parameters(mut self, alpha: T, epsilon: T, sy_epsilon: T) -> Self { self.lbfgs = self .lbfgs .with_cbfgs_alpha(alpha) @@ -264,4 +286,22 @@ mod tests { assert_eq!(2.0, cache.best_norm_gamma_fpr); assert_eq!(&[-1.0, -2.0], &cache.best_u_half_step[..]); } + + #[test] + fn t_cache_best_half_step_f32() { + let mut cache = PANOCCache::::new(2, 1e-6_f32, 3); + + cache.u_half_step.copy_from_slice(&[1.0_f32, 2.0]); + cache.norm_gamma_fpr = 3.0_f32; + cache.cache_best_half_step(); + + assert_eq!(3.0_f32, cache.best_norm_gamma_fpr); + assert_eq!(&[1.0_f32, 2.0], &cache.best_u_half_step[..]); + + cache.reset(); + assert!(cache.best_norm_gamma_fpr.is_infinite()); + assert!(cache.norm_gamma_fpr.is_infinite()); + assert_eq!(0.0_f32, cache.gamma); + assert_eq!(1.0_f32, cache.tau); + } } diff --git a/src/core/panoc/panoc_engine.rs b/rust/src/core/panoc/panoc_engine.rs similarity index 70% rename from src/core/panoc/panoc_engine.rs rename to rust/src/core/panoc/panoc_engine.rs index 63ad862d..c558ceb4 100644 --- a/src/core/panoc/panoc_engine.rs +++ b/rust/src/core/panoc/panoc_engine.rs @@ -1,52 +1,72 @@ use crate::{ constraints, core::{panoc::PANOCCache, AlgorithmEngine, Problem}, - matrix_operations, FunctionCallResult, SolverError, + matrix_operations, + numeric::cast, + FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; -/// Mimum estimated Lipschitz constant (initial estimate) -const MIN_L_ESTIMATE: f64 = 1e-10; +fn min_l_estimate() -> T { + cast::(1e-10) +} -/// gamma = GAMMA_L_COEFF/L -const GAMMA_L_COEFF: f64 = 0.95; +fn gamma_l_coeff() -> T { + cast::(0.95) +} //const SIGMA_COEFF: f64 = 0.49; -/// Delta in the estimation of the initial Lipschitz constant -const DELTA_LIPSCHITZ: f64 = 1e-12; +fn delta_lipschitz() -> T { + cast::(1e-12) +} -/// Epsilon in the estimation of the initial Lipschitz constant -const EPSILON_LIPSCHITZ: f64 = 1e-6; +fn epsilon_lipschitz() -> T { + cast::(1e-6) +} -/// Safety parameter used to check a strict inequality in the update of the Lipschitz constant -const LIPSCHITZ_UPDATE_EPSILON: f64 = 1e-6; +fn lipschitz_update_epsilon() -> T { + cast::(1e-6) +} /// Maximum iterations of updating the Lipschitz constant const MAX_LIPSCHITZ_UPDATE_ITERATIONS: usize = 10; -/// Maximum possible Lipschitz constant -const MAX_LIPSCHITZ_CONSTANT: f64 = 1e9; +fn max_lipschitz_constant() -> T { + cast::(1e9) +} + +fn norm2_squared_diff(a: &[T], b: &[T]) -> T { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .fold(T::zero(), |sum, (&x, &y)| sum + (x - y) * (x - y)) +} /// Maximum number of linesearch iterations const MAX_LINESEARCH_ITERATIONS: u32 = 10; /// Engine for PANOC algorithm -pub struct PANOCEngine<'a, GradientType, ConstraintType, CostType> +pub struct PANOCEngine<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - problem: Problem<'a, GradientType, ConstraintType, CostType>, - pub(crate) cache: &'a mut PANOCCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + pub(crate) cache: &'a mut PANOCCache, } -impl<'a, GradientType, ConstraintType, CostType> - PANOCEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + PANOCEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Construct a new Engine for PANOC /// @@ -60,28 +80,28 @@ where /// /// pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut PANOCCache, - ) -> PANOCEngine<'a, GradientType, ConstraintType, CostType> { + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut PANOCCache, + ) -> PANOCEngine<'a, GradientType, ConstraintType, CostType, T> { PANOCEngine { problem, cache } } /// Estimate the local Lipschitz constant at `u` - fn estimate_loc_lip(&mut self, u: &mut [f64]) -> FunctionCallResult { + fn estimate_loc_lip(&mut self, u: &mut [T]) -> FunctionCallResult { let mut lipest = crate::lipschitz_estimator::LipschitzEstimator::new( u, &self.problem.gradf, &mut self.cache.gradient_u, ) - .with_delta(DELTA_LIPSCHITZ) - .with_epsilon(EPSILON_LIPSCHITZ); + .with_delta(delta_lipschitz()) + .with_epsilon(epsilon_lipschitz()); self.cache.lipschitz_constant = lipest.estimate_local_lipschitz()?; Ok(()) } /// Computes the FPR and its norm - fn compute_fpr(&mut self, u_current: &[f64]) { + fn compute_fpr(&mut self, u_current: &[T]) { // compute the FPR: // fpr ← u - u_half_step let cache = &mut self.cache; @@ -90,19 +110,19 @@ where .iter_mut() .zip(u_current.iter()) .zip(cache.u_half_step.iter()) - .for_each(|((fpr, u), uhalf)| *fpr = u - uhalf); + .for_each(|((fpr, u), uhalf)| *fpr = *u - *uhalf); // compute the norm of FPR cache.norm_gamma_fpr = matrix_operations::norm2(&cache.gamma_fpr); } /// Score the current feasible half step and cache it if it is the best so far. - pub(crate) fn cache_best_half_step(&mut self, u_current: &[f64]) { + pub(crate) fn cache_best_half_step(&mut self, u_current: &[T]) { self.compute_fpr(u_current); self.cache.cache_best_half_step(); } /// Computes a gradient step; does not compute the gradient - fn gradient_step(&mut self, u_current: &[f64]) { + fn gradient_step(&mut self, u_current: &[T]) { // take a gradient step: // gradient_step ← u_current - gamma * gradient let cache = &mut self.cache; @@ -135,17 +155,18 @@ where } /// Computes a projection on `gradient_step` - fn half_step(&mut self) { + fn half_step(&mut self) -> FunctionCallResult { let cache = &mut self.cache; // u_half_step ← projection(gradient_step) cache.u_half_step.copy_from_slice(&cache.gradient_step); - self.problem.constraints.project(&mut cache.u_half_step); + self.problem.constraints.project(&mut cache.u_half_step)?; cache.gradient_step_u_half_step_diff_norm_sq = - matrix_operations::norm2_squared_diff(&cache.gradient_step, &cache.u_half_step); + norm2_squared_diff(&cache.gradient_step, &cache.u_half_step); + Ok(()) } /// Computes an LBFGS direction; updates `cache.direction_lbfgs` - fn lbfgs_direction(&mut self, u_current: &[f64]) { + fn lbfgs_direction(&mut self, u_current: &[T]) { let cache = &mut self.cache; // update the LBFGS buffer cache.lbfgs.update_hessian(&cache.gamma_fpr, u_current); @@ -160,7 +181,7 @@ where /// Returns the RHS of the Lipschitz update /// Computes rhs = cost + LIP_EPS * |f| - gamma * + (L/2/gamma) ||gamma * fpr||^2 - fn lipschitz_check_rhs(&mut self) -> f64 { + fn lipschitz_check_rhs(&mut self) -> T { let cache = &mut self.cache; let gamma = cache.gamma; let cost_value = cache.cost_value; @@ -169,53 +190,64 @@ where matrix_operations::inner_product(&cache.gradient_u, &cache.gamma_fpr); // rhs ← cost + LIP_EPS * |f| - + (L/2/gamma) ||gamma_fpr||^2 - cost_value + LIPSCHITZ_UPDATE_EPSILON * cost_value.abs() - inner_prod_grad_fpr - + (GAMMA_L_COEFF / (2.0 * gamma)) * cache.norm_gamma_fpr * cache.norm_gamma_fpr + cost_value + lipschitz_update_epsilon::() * cost_value.abs() - inner_prod_grad_fpr + + (gamma_l_coeff::() / (cast::(2.0) * gamma)) + * cache.norm_gamma_fpr + * cache.norm_gamma_fpr } /// Updates the estimate of the Lipscthiz constant - fn update_lipschitz_constant(&mut self, u_current: &[f64]) -> FunctionCallResult { - let mut cost_u_half_step = 0.0; + fn update_lipschitz_constant(&mut self, u_current: &[T]) -> FunctionCallResult { + let mut cost_u_half_step = T::zero(); // Compute the cost at the half step (self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?; - debug_assert!(matrix_operations::is_finite(&[self.cache.cost_value])); + if !matrix_operations::is_finite(&[self.cache.cost_value, cost_u_half_step]) { + return Err(SolverError::NotFiniteComputation( + "cost evaluation returned a non-finite value during Lipschitz estimation", + )); + } let mut it_lipschitz_search = 0; while cost_u_half_step > self.lipschitz_check_rhs() && it_lipschitz_search < MAX_LIPSCHITZ_UPDATE_ITERATIONS - && self.cache.lipschitz_constant < MAX_LIPSCHITZ_CONSTANT + && self.cache.lipschitz_constant < max_lipschitz_constant() { self.cache.lbfgs.reset(); // invalidate the L-BFGS buffer // update L, sigma and gamma... - self.cache.lipschitz_constant *= 2.; - self.cache.gamma /= 2.; + self.cache.lipschitz_constant = self.cache.lipschitz_constant * cast::(2.0); + self.cache.gamma = self.cache.gamma / cast::(2.0); // recompute the half step... self.gradient_step(u_current); // updates self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step // recompute the cost at the half step // update `cost_u_half_step` (self.problem.cost)(&self.cache.u_half_step, &mut cost_u_half_step)?; + if !cost_u_half_step.is_finite() { + return Err(SolverError::NotFiniteComputation( + "half-step cost became non-finite during Lipschitz backtracking", + )); + } // recompute the FPR and the square of its norm self.compute_fpr(u_current); it_lipschitz_search += 1; } - self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma); + self.cache.sigma = (T::one() - gamma_l_coeff::()) / (cast::(4.0) * self.cache.gamma); Ok(()) } /// Computes u_plus ← u - gamma * (1-tau) * fpr - tau * dir, - fn compute_u_plus(&mut self, u: &[f64]) { + fn compute_u_plus(&mut self, u: &[T]) { let cache = &mut self.cache; let _gamma = cache.gamma; let tau = cache.tau; - let temp_ = 1.0 - tau; + let temp_ = T::one() - tau; cache .u_plus .iter_mut() @@ -230,21 +262,23 @@ where /// Computes the RHS of the linesearch condition fn compute_rhs_ls(&mut self) { let cache = &mut self.cache; + let half = cast::(0.5); // dist squared ← norm(gradient step - u half step)^2 // rhs_ls ← f - (gamma/2) * norm(gradf)^2 // + 0.5 * dist squared / gamma // - sigma * norm_gamma_fpr^2 - let fbe = cache.cost_value - 0.5 * cache.gamma * cache.gradient_u_norm_sq - + 0.5 * cache.gradient_step_u_half_step_diff_norm_sq / cache.gamma; + let fbe = cache.cost_value - half * cache.gamma * cache.gradient_u_norm_sq + + half * cache.gradient_step_u_half_step_diff_norm_sq / cache.gamma; let sigma_fpr_sq = cache.sigma * cache.norm_gamma_fpr * cache.norm_gamma_fpr; cache.rhs_ls = fbe - sigma_fpr_sq; } /// Computes the left hand side of the line search condition and compares it with the RHS; /// returns `true` if and only if lhs > rhs (when the line search should continue) - fn line_search_condition(&mut self, u: &[f64]) -> Result { + fn line_search_condition(&mut self, u: &[T]) -> Result { let gamma = self.cache.gamma; + let half = cast::(0.5); // u_plus ← u - (1-tau)*gamma_fpr + tau*direction self.compute_u_plus(u); @@ -254,42 +288,56 @@ where // point `u_plus` (self.problem.cost)(&self.cache.u_plus, &mut self.cache.cost_value)?; (self.problem.gradf)(&self.cache.u_plus, &mut self.cache.gradient_u)?; + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "line-search candidate produced a non-finite cost or gradient", + )); + } self.cache_gradient_norm(); self.gradient_step_uplus(); // gradient_step ← u_plus - gamma * gradient_u - self.half_step(); // u_half_step ← project(gradient_step) + self.half_step()?; // u_half_step ← project(gradient_step) // Update the LHS of the line search condition - self.cache.lhs_ls = self.cache.cost_value - 0.5 * gamma * self.cache.gradient_u_norm_sq - + 0.5 * self.cache.gradient_step_u_half_step_diff_norm_sq / self.cache.gamma; + self.cache.lhs_ls = self.cache.cost_value - half * gamma * self.cache.gradient_u_norm_sq + + half * self.cache.gradient_step_u_half_step_diff_norm_sq / self.cache.gamma; Ok(self.cache.lhs_ls > self.cache.rhs_ls) } /// Update without performing a line search; this is executed at the first iteration - fn update_no_linesearch(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn update_no_linesearch(&mut self, u_current: &mut [T]) -> FunctionCallResult { u_current.copy_from_slice(&self.cache.u_half_step); // set u_current ← u_half_step (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value (self.problem.gradf)(u_current, &mut self.cache.gradient_u)?; // compute gradient + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "first PANOC iterate produced a non-finite cost or gradient", + )); + } self.cache_gradient_norm(); self.gradient_step(u_current); // updated self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step Ok(()) } /// Performs a line search to select tau - fn linesearch(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn linesearch(&mut self, u_current: &mut [T]) -> FunctionCallResult { // perform line search self.compute_rhs_ls(); // compute the right hand side of the line search - self.cache.tau = 1.0; // initialise tau ← 1.0 + self.cache.tau = T::one(); // initialise tau ← 1.0 let mut num_ls_iters = 0; while self.line_search_condition(u_current)? && num_ls_iters < MAX_LINESEARCH_ITERATIONS { - self.cache.tau /= 2.0; + self.cache.tau = self.cache.tau / cast::(2.0); num_ls_iters += 1; } if num_ls_iters == MAX_LINESEARCH_ITERATIONS { - self.cache.tau = 0.; + self.cache.tau = T::zero(); u_current.copy_from_slice(&self.cache.u_half_step); } // Sets `u_current` to `u_plus` (u_current ← u_plus) @@ -299,20 +347,26 @@ where } /// Compute the cost value at the best cached feasible half step. - pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result { - let mut cost = 0.0; + pub(crate) fn cost_value_at_best_half_step(&mut self) -> Result { + let mut cost = T::zero(); (self.problem.cost)(&self.cache.best_u_half_step, &mut cost)?; + if !cost.is_finite() { + return Err(SolverError::NotFiniteComputation( + "best cached half-step cost is non-finite", + )); + } Ok(cost) } } /// Implementation of the `step` and `init` methods of [trait.AlgorithmEngine.html] -impl<'a, GradientType, ConstraintType, CostType> AlgorithmEngine - for PANOCEngine<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> AlgorithmEngine + for PANOCEngine<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// PANOC step /// @@ -324,7 +378,7 @@ where /// iterate of PANOC /// /// - fn step(&mut self, u_current: &mut [f64]) -> Result { + fn step(&mut self, u_current: &mut [T]) -> Result { // caches the previous gradient vector (copies df to df_previous) self.cache.cache_previous_gradient(); @@ -358,15 +412,23 @@ where /// gradient of the cost at the initial point, initial estimates for `gamma` and `sigma`, /// a gradient step and a half step (projected gradient step) /// - fn init(&mut self, u_current: &mut [f64]) -> FunctionCallResult { + fn init(&mut self, u_current: &mut [T]) -> FunctionCallResult { self.cache.reset(); (self.problem.cost)(u_current, &mut self.cache.cost_value)?; // cost value self.estimate_loc_lip(u_current)?; // computes the gradient as well! (self.cache.gradient_u) + if !self.cache.cost_value.is_finite() + || !matrix_operations::is_finite(&self.cache.gradient_u) + { + return Err(SolverError::NotFiniteComputation( + "initial PANOC cost or gradient is non-finite", + )); + } self.cache_gradient_norm(); - self.cache.gamma = GAMMA_L_COEFF / f64::max(self.cache.lipschitz_constant, MIN_L_ESTIMATE); - self.cache.sigma = (1.0 - GAMMA_L_COEFF) / (4.0 * self.cache.gamma); + self.cache.gamma = + gamma_l_coeff::() / self.cache.lipschitz_constant.max(min_l_estimate()); + self.cache.sigma = (T::one() - gamma_l_coeff::()) / (cast::(4.0) * self.cache.gamma); self.gradient_step(u_current); // updated self.cache.gradient_step - self.half_step(); // updates self.cache.u_half_step + self.half_step()?; // updates self.cache.u_half_step Ok(()) } @@ -382,7 +444,7 @@ mod tests { use crate::constraints; use crate::core::panoc::panoc_engine::PANOCEngine; use crate::core::panoc::*; - use crate::core::Problem; + use crate::core::{AlgorithmEngine, Problem}; use crate::mocks; use crate::FunctionCallResult; @@ -478,7 +540,7 @@ mod tests { .gradient_step .copy_from_slice(&[40., 50.]); - panoc_engine.half_step(); // u_half_step ← projection(gradient_step) + panoc_engine.half_step().unwrap(); // u_half_step ← projection(gradient_step) unit_test_utils::assert_nearly_equal_array( &[0.312_347_523_777_212, 0.390_434_404_721_515], @@ -607,4 +669,43 @@ mod tests { "update_lipschitz_constant should only evaluate the half-step cost" ); } + + #[test] + fn t_panoc_init_f32() { + let bounds = constraints::NoConstraints::new(); + let problem = Problem::new( + &bounds, + |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + grad.copy_from_slice(u); + Ok(()) + }, + |u: &[f32], c: &mut f32| -> FunctionCallResult { + *c = 0.5_f32 * (u[0] * u[0] + u[1] * u[1]); + Ok(()) + }, + ); + let mut panoc_cache = PANOCCache::::new(2, 1e-6_f32, 3); + let mut panoc_engine = PANOCEngine::new(problem, &mut panoc_cache); + let mut u = [1_000.0_f32, 2_000.0_f32]; + + panoc_engine.init(&mut u).unwrap(); + + assert!(panoc_engine.cache.lipschitz_constant.is_finite()); + assert!(panoc_engine.cache.lipschitz_constant > 0.0_f32); + let expected_gamma = 0.95_f32 / panoc_engine.cache.lipschitz_constant; + assert!((panoc_engine.cache.gamma - expected_gamma).abs() < 1e-6); + unit_test_utils::assert_nearly_equal_array( + &[1_000.0_f32, 2_000.0_f32], + &panoc_engine.cache.gradient_u, + 1e-5, + 1e-6, + "gradient at u", + ); + let expected_half_step = [ + (1.0_f32 - panoc_engine.cache.gamma) * 1_000.0_f32, + (1.0_f32 - panoc_engine.cache.gamma) * 2_000.0_f32, + ]; + assert!((panoc_engine.cache.u_half_step[0] - expected_half_step[0]).abs() < 5e-3); + assert!((panoc_engine.cache.u_half_step[1] - expected_half_step[1]).abs() < 5e-3); + } } diff --git a/src/core/panoc/panoc_optimizer.rs b/rust/src/core/panoc/panoc_optimizer.rs similarity index 78% rename from src/core/panoc/panoc_optimizer.rs rename to rust/src/core/panoc/panoc_optimizer.rs index 98f87667..b8525ef2 100644 --- a/src/core/panoc/panoc_optimizer.rs +++ b/rust/src/core/panoc/panoc_optimizer.rs @@ -8,6 +8,9 @@ use crate::{ }, matrix_operations, FunctionCallResult, SolverError, }; +use lbfgs::LbfgsPrecision; +use num::Float; +use std::iter::Sum; use std::time; const MAX_ITER: usize = 100_usize; @@ -15,23 +18,25 @@ const MAX_ITER: usize = 100_usize; /// Optimizer using the PANOC algorithm /// /// -pub struct PANOCOptimizer<'a, GradientType, ConstraintType, CostType> +pub struct PANOCOptimizer<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { - panoc_engine: PANOCEngine<'a, GradientType, ConstraintType, CostType>, + panoc_engine: PANOCEngine<'a, GradientType, ConstraintType, CostType, T>, max_iter: usize, max_duration: Option, } -impl<'a, GradientType, ConstraintType, CostType> - PANOCOptimizer<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + PANOCOptimizer<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Constructor of `PANOCOptimizer` /// @@ -43,9 +48,10 @@ where /// ## Panic /// /// Does not panic + #[must_use] pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut PANOCCache, + problem: Problem<'a, GradientType, ConstraintType, CostType, T>, + cache: &'a mut PANOCCache, ) -> Self { PANOCOptimizer { panoc_engine: PANOCEngine::new(problem, cache), @@ -62,8 +68,9 @@ where /// ## Panics /// /// The method panics if the specified tolerance is not positive - pub fn with_tolerance(self, tolerance: f64) -> Self { - assert!(tolerance > 0.0, "tolerance must be larger than 0"); + #[must_use] + pub fn with_tolerance(self, tolerance: T) -> Self { + assert!(tolerance > T::zero(), "tolerance must be larger than 0"); self.panoc_engine.cache.tolerance = tolerance; self @@ -90,8 +97,12 @@ where /// The method panics if the provided value of the AKKT-specific tolerance is /// not positive. /// - pub fn with_akkt_tolerance(self, akkt_tolerance: f64) -> Self { - assert!(akkt_tolerance > 0.0, "akkt_tolerance must be positive"); + #[must_use] + pub fn with_akkt_tolerance(self, akkt_tolerance: T) -> Self { + assert!( + akkt_tolerance > T::zero(), + "akkt_tolerance must be positive" + ); self.panoc_engine.cache.set_akkt_tolerance(akkt_tolerance); self } @@ -101,6 +112,7 @@ where /// ## Panics /// /// Panics if the provided number of iterations is equal to zero + #[must_use] pub fn with_max_iter(mut self, max_iter: usize) -> Self { assert!(max_iter > 0, "max_iter must be larger than 0"); @@ -109,21 +121,15 @@ where } /// Sets the maximum solution time, useful in real-time applications + #[must_use] pub fn with_max_duration(mut self, max_duation: time::Duration) -> Self { self.max_duration = Some(max_duation); self } -} -impl<'life, GradientType, ConstraintType, CostType> Optimizer - for PANOCOptimizer<'life, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'life, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint + 'life, -{ - fn solve(&mut self, u: &mut [f64]) -> Result { - let now = instant::Instant::now(); + /// Solves the optimization problem for decision variables of scalar type `T`. + pub fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { + let now = web_time::Instant::now(); /* * Initialise [call panoc_engine.init()] @@ -159,7 +165,9 @@ where // check for possible NaN/inf if !matrix_operations::is_finite(u) { - return Err(SolverError::NotFiniteComputation); + return Err(SolverError::NotFiniteComputation( + "final PANOC iterate contains a non-finite value", + )); } // exit status @@ -188,6 +196,19 @@ where } } +impl<'life, GradientType, ConstraintType, CostType, T> Optimizer + for PANOCOptimizer<'life, GradientType, ConstraintType, CostType, T> +where + T: Float + LbfgsPrecision + Sum, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult + 'life, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint + 'life, +{ + fn solve(&mut self, u: &mut [T]) -> Result, SolverError> { + PANOCOptimizer::solve(self, u) + } +} + /* --------------------------------------------------------------------------------------------- */ /* TESTS */ /* --------------------------------------------------------------------------------------------- */ @@ -225,7 +246,7 @@ mod tests { let mut panoc_cache = PANOCCache::new(n_dimension, tolerance, lbfgs_memory); let problem = Problem::new(&bounds, cost_gradient, cost_function); let mut panoc = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(max_iters); - let now = instant::Instant::now(); + let now = web_time::Instant::now(); let status = panoc.solve(&mut u_solution).unwrap(); println!("{} iterations", status.iterations()); @@ -239,7 +260,7 @@ mod tests { /* CHECK FEASIBILITY */ let mut u_project = [0.0; 2]; u_project.copy_from_slice(&u_solution); - bounds.project(&mut u_project); + bounds.project(&mut u_project).unwrap(); unit_test_utils::assert_nearly_equal_array( &u_solution, &u_project, @@ -249,6 +270,44 @@ mod tests { ); } + #[test] + fn t_panoc_optimizer_rosenbrock_f32() { + let tolerance = 1e-4_f32; + let a_param = 1.0_f32; + let b_param = 200.0_f32; + let n_dimension = 2; + let lbfgs_memory = 8; + let max_iters = 120; + let mut u_solution = [-1.5_f32, 0.9_f32]; + + let cost_gradient = |u: &[f32], grad: &mut [f32]| -> FunctionCallResult { + mocks::rosenbrock_grad(a_param, b_param, u, grad); + Ok(()) + }; + let cost_function = |u: &[f32], c: &mut f32| -> FunctionCallResult { + *c = mocks::rosenbrock_cost(a_param, b_param, u); + Ok(()) + }; + + let radius = 2.0_f32; + let bounds = constraints::Ball2::new(None, radius); + let mut panoc_cache = PANOCCache::::new(n_dimension, tolerance, lbfgs_memory); + let problem = Problem::new(&bounds, cost_gradient, cost_function); + let mut panoc = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(max_iters); + let status = panoc.solve(&mut u_solution).unwrap(); + + assert_eq!(max_iters, panoc.max_iter); + assert!(status.has_converged()); + assert!(status.iterations() < max_iters); + assert!(status.norm_fpr() < tolerance); + + let mut u_project = [0.0_f32; 2]; + u_project.copy_from_slice(&u_solution); + bounds.project(&mut u_project).unwrap(); + assert!((u_solution[0] - u_project[0]).abs() < 1e-5_f32); + assert!((u_solution[1] - u_project[1]).abs() < 1e-5_f32); + } + #[test] fn t_panoc_in_loop() { /* USER PARAMETERS */ diff --git a/src/core/panoc/tests.rs b/rust/src/core/panoc/tests.rs similarity index 96% rename from src/core/panoc/tests.rs rename to rust/src/core/panoc/tests.rs index afd89ef3..e94200e3 100644 --- a/src/core/panoc/tests.rs +++ b/rust/src/core/panoc/tests.rs @@ -76,7 +76,7 @@ fn t_test_panoc_basic() { } println!("final |fpr| = {}", panoc_engine.cache.norm_gamma_fpr); assert!(panoc_engine.cache.norm_gamma_fpr <= tolerance); - unit_test_utils::assert_nearly_equal_array(&u, &mocks::SOLUTION_A, 1e-6, 1e-8, ""); + unit_test_utils::assert_nearly_equal_array(&u, &mocks::solution_a(), 1e-6, 1e-8, ""); } #[test] @@ -111,7 +111,7 @@ fn t_test_panoc_hard() { println!("\nsol = {:?}", u); assert!(panoc_engine.cache.norm_gamma_fpr <= tolerance_fpr); - unit_test_utils::assert_nearly_equal_array(&u, &mocks::SOLUTION_HARD, 1e-6, 1e-8, ""); + unit_test_utils::assert_nearly_equal_array(&u, &mocks::solution_hard(), 1e-6, 1e-8, ""); } #[test] @@ -167,7 +167,7 @@ fn t_zero_gamma_l() { let mut panoc_engine = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(100); // Invoke the solver. - let _status = panoc_engine.solve(u); + panoc_engine.solve(u).unwrap(); println!("norm_gamma_fpr = {}", panoc_cache.norm_gamma_fpr); println!("u = {:?}", u); println!("iters = {}", panoc_cache.iteration); @@ -210,7 +210,7 @@ fn t_zero_gamma_huber() { let mut panoc_engine = PANOCOptimizer::new(problem, &mut panoc_cache).with_max_iter(100); // Invoke the solver. - let _status = panoc_engine.solve(u); + panoc_engine.solve(u).unwrap(); println!("norm_gamma_fpr = {}", panoc_cache.norm_gamma_fpr); println!("u = {:?}", u); println!("iters = {}", panoc_cache.iteration); diff --git a/src/core/problem.rs b/rust/src/core/problem.rs similarity index 67% rename from src/core/problem.rs rename to rust/src/core/problem.rs index c6e61dae..c2ca372f 100644 --- a/src/core/problem.rs +++ b/rust/src/core/problem.rs @@ -7,6 +7,7 @@ //! C (and then invoked from Rust via an interface such as icasadi). //! use crate::{constraints, FunctionCallResult}; +use std::marker::PhantomData; /// Definition of an optimisation problem /// @@ -15,11 +16,11 @@ use crate::{constraints, FunctionCallResult}; /// - the cost function /// - the set of constraints, which is described by implementations of /// [Constraint](../../panoc_rs/constraints/trait.Constraint.html) -pub struct Problem<'a, GradientType, ConstraintType, CostType> +pub struct Problem<'a, GradientType, ConstraintType, CostType, T = f64> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// constraints pub(crate) constraints: &'a ConstraintType, @@ -27,13 +28,15 @@ where pub(crate) gradf: GradientType, /// cost function pub(crate) cost: CostType, + marker: PhantomData, } -impl<'a, GradientType, ConstraintType, CostType> Problem<'a, GradientType, ConstraintType, CostType> +impl<'a, GradientType, ConstraintType, CostType, T> + Problem<'a, GradientType, ConstraintType, CostType, T> where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, + GradientType: Fn(&[T], &mut [T]) -> FunctionCallResult, + CostType: Fn(&[T], &mut T) -> FunctionCallResult, + ConstraintType: constraints::Constraint, { /// Construct a new instance of an optimisation problem /// @@ -50,11 +53,12 @@ where constraints: &'a ConstraintType, cost_gradient: GradientType, cost: CostType, - ) -> Problem<'a, GradientType, ConstraintType, CostType> { + ) -> Problem<'a, GradientType, ConstraintType, CostType, T> { Problem { constraints, gradf: cost_gradient, cost, + marker: PhantomData, } } } diff --git a/src/core/solver_status.rs b/rust/src/core/solver_status.rs similarity index 87% rename from src/core/solver_status.rs rename to rust/src/core/solver_status.rs index e4f7a138..8c75201e 100644 --- a/src/core/solver_status.rs +++ b/rust/src/core/solver_status.rs @@ -2,6 +2,7 @@ //! //! use crate::core::ExitStatus; +use num::Float; use std::time; /// Solver status @@ -10,7 +11,10 @@ use std::time; /// `SolverStatus` are returned by optimizers. /// #[derive(Debug, PartialEq, Copy, Clone)] -pub struct SolverStatus { +pub struct SolverStatus +where + T: Float, +{ /// exit status of the algorithm exit_status: ExitStatus, /// number of iterations for convergence @@ -18,12 +22,12 @@ pub struct SolverStatus { /// time it took to solve solve_time: time::Duration, /// norm of the fixed-point residual (FPR) - fpr_norm: f64, + fpr_norm: T, /// cost value at the candidate solution - cost_value: f64, + cost_value: T, } -impl SolverStatus { +impl SolverStatus { /// Constructs a new instance of SolverStatus /// /// ## Arguments @@ -39,9 +43,9 @@ impl SolverStatus { exit_status: ExitStatus, num_iter: usize, solve_time: time::Duration, - fpr_norm: f64, - cost_value: f64, - ) -> SolverStatus { + fpr_norm: T, + cost_value: T, + ) -> SolverStatus { SolverStatus { exit_status, num_iter, @@ -67,12 +71,12 @@ impl SolverStatus { } /// norm of the fixed point residual - pub fn norm_fpr(&self) -> f64 { + pub fn norm_fpr(&self) -> T { self.fpr_norm } /// value of the cost at the solution - pub fn cost_value(&self) -> f64 { + pub fn cost_value(&self) -> T { self.cost_value } diff --git a/src/lib.rs b/rust/src/lib.rs similarity index 51% rename from src/lib.rs rename to rust/src/lib.rs index e0f61bb0..d122b87a 100644 --- a/src/lib.rs +++ b/rust/src/lib.rs @@ -40,16 +40,76 @@ extern crate num; -/// Exceptions/Errors that may arise while solving a problem +use std::fmt; + +/// Exceptions/errors that may arise while solving a problem. +/// +/// In the Rust API, numerical failures are generally reported through +/// `Result<_, SolverError>`: +/// +/// - user-provided callbacks may return [`SolverError::Cost`], +/// - set projections may return [`SolverError::ProjectionFailed`], +/// - non-finite intermediate values are reported via +/// [`SolverError::NotFiniteComputation`], and +/// - internal numerical/kernel issues are reported through +/// [`SolverError::LinearAlgebraFailure`] or +/// [`SolverError::InvalidProblemState`]. +/// +/// By contrast, blatant API misuse such as inconsistent slice dimensions may +/// still panic in some low-level routines. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SolverError { /// If the gradient or cost function cannot be evaluated - Cost, + Cost(&'static str), /// Computation failed and NaN/Infinite value was obtained - NotFiniteComputation, + NotFiniteComputation(&'static str), + /// A projection could not be computed numerically + ProjectionFailed(&'static str), + /// A linear algebra operation failed + LinearAlgebraFailure(&'static str), + /// The solver reached an unexpected internal state + InvalidProblemState(&'static str), +} + +impl fmt::Display for SolverError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SolverError::Cost(reason) => { + write!(f, "cost or gradient evaluation failed: {}", reason) + } + SolverError::NotFiniteComputation(reason) => { + write!(f, "non-finite computation: {}", reason) + } + SolverError::ProjectionFailed(reason) => write!(f, "projection failed: {}", reason), + SolverError::LinearAlgebraFailure(reason) => { + write!(f, "linear algebra failure: {}", reason) + } + SolverError::InvalidProblemState(reason) => { + write!(f, "invalid internal problem state: {}", reason) + } + } + } +} + +impl std::error::Error for SolverError {} + +impl From for SolverError { + fn from(_: crate::matrix_operations::MatrixError) -> Self { + SolverError::LinearAlgebraFailure("matrix operation failed") + } +} + +impl From for SolverError { + fn from(_: crate::cholesky_factorizer::CholeskyError) -> Self { + SolverError::LinearAlgebraFailure("Cholesky factorization or solve failed") + } } -/// Result of a function call (status) +/// Standard result type used by user callbacks and internal projection/codegen routines. +/// +/// A successful call returns `Ok(())`. Failures should be reported with a +/// descriptive [`SolverError`] so optimizers can propagate the reason to the +/// caller. pub type FunctionCallResult = Result<(), SolverError>; pub mod alm; @@ -58,6 +118,7 @@ pub mod constraints; pub mod core; pub mod lipschitz_estimator; pub mod matrix_operations; +mod numeric; pub use crate::cholesky_factorizer::{CholeskyError, CholeskyFactorizer}; pub use crate::core::fbs; diff --git a/src/lipschitz_estimator.rs b/rust/src/lipschitz_estimator.rs similarity index 74% rename from src/lipschitz_estimator.rs rename to rust/src/lipschitz_estimator.rs index d8657062..f06fc942 100644 --- a/src/lipschitz_estimator.rs +++ b/rust/src/lipschitz_estimator.rs @@ -5,6 +5,10 @@ //! //! Functions are provided as closures. //! +//! The estimator is generic over a scalar type `T` satisfying [`num::Float`]. +//! In practice this means it can be used with floating-point slices such as +//! `&[f64]` or `&[f32]`. The examples below use `f64` for simplicity. +//! //! # Method //! //! This method computes a numerical approximation of the norm of the directional @@ -37,40 +41,60 @@ //! ``` //! -use crate::{matrix_operations, SolverError}; +use crate::{numeric::cast, SolverError}; +use num::Float; + +fn default_delta() -> T { + cast::(1e-6) +} + +fn default_epsilon() -> T { + cast::(1e-6) +} -const DEFAULT_DELTA: f64 = 1e-6; -const DEFAULT_EPSILON: f64 = 1e-6; +fn norm2(a: &[T]) -> T { + a.iter().fold(T::zero(), |sum, &x| sum + x * x).sqrt() +} -/// Structure for the computation of estimates of the Lipschitz constant of mappings -pub struct LipschitzEstimator<'a, F> +/// Structure for the computation of estimates of the Lipschitz constant of mappings. +/// +/// The scalar type `T` is generic and must implement [`num::Float`]. This allows +/// the estimator to operate on either `f64`, `f32`, or another compatible float type. +pub struct LipschitzEstimator<'a, T, F> where - F: Fn(&[f64], &mut [f64]) -> Result<(), SolverError>, + T: Float, + F: Fn(&[T], &mut [T]) -> Result<(), SolverError>, { /// `u_decision_var` is the point where the Lipschitz constant is estimated - u_decision_var: &'a mut [f64], + u_decision_var: &'a mut [T], /// internally allocated workspace memory - workspace: Vec, + workspace: Vec, /// `function_value_at_u` a vector which is updated with the /// value of the given function, `F`, at `u`; the provided value /// of `function_value_at_u_p` is not used - function_value_at_u: &'a mut [f64], + function_value_at_u: &'a mut [T], /// /// Function whose Lipschitz constant is to be approximated /// /// For example, in optimization, this is the gradient (Jacobian matrix) /// of the cost function (this is a closure) function: &'a F, - epsilon_lip: f64, - delta_lip: f64, + epsilon_lip: T, + delta_lip: T, } -impl<'a, F> LipschitzEstimator<'a, F> +impl<'a, T, F> LipschitzEstimator<'a, T, F> where - F: Fn(&[f64], &mut [f64]) -> Result<(), SolverError>, + T: Float, + F: Fn(&[T], &mut [T]) -> Result<(), SolverError>, { /// Creates a new instance of this structure /// + /// The type parameter `T` is inferred from `u_`, `f_`, and `function_value_`. + /// For example, if those use `f64`, then this constructs a + /// `LipschitzEstimator<'_, f64, _>`; if they use `f32`, it constructs a + /// `LipschitzEstimator<'_, f32, _>`. + /// /// # Arguments /// /// - `u_` On entry: point where the Lipschitz constant is estimated, @@ -88,18 +112,18 @@ where /// /// pub fn new( - u_: &'a mut [f64], + u_: &'a mut [T], f_: &'a F, - function_value_: &'a mut [f64], - ) -> LipschitzEstimator<'a, F> { + function_value_: &'a mut [T], + ) -> LipschitzEstimator<'a, T, F> { let n: usize = u_.len(); LipschitzEstimator { u_decision_var: u_, - workspace: vec![0.0_f64; n], + workspace: vec![T::zero(); n], function_value_at_u: function_value_, function: f_, - epsilon_lip: DEFAULT_EPSILON, - delta_lip: DEFAULT_DELTA, + epsilon_lip: default_epsilon(), + delta_lip: default_delta(), } } @@ -108,13 +132,15 @@ where /// /// # Arguments /// - /// - `delta`: parameter delta (the default value is `1e-6`) + /// - `delta`: parameter delta of type `T` (the default value is `1e-6` + /// converted to `T`) /// /// # Panics /// The method will panic if `delta` is non positive /// - pub fn with_delta(mut self, delta: f64) -> Self { - assert!(delta > 0.0); + #[must_use] + pub fn with_delta(mut self, delta: T) -> Self { + assert!(delta > T::zero()); self.delta_lip = delta; self } @@ -124,13 +150,15 @@ where /// /// # Arguments /// - /// - `epsilon`: parameter epsilon (the default value is `1e-6`) + /// - `epsilon`: parameter epsilon of type `T` (the default value is `1e-6` + /// converted to `T`) /// /// # Panics /// The method will panic if `epsilon` is non positive /// - pub fn with_epsilon(mut self, epsilon: f64) -> Self { - assert!(epsilon > 0.0); + #[must_use] + pub fn with_epsilon(mut self, epsilon: T) -> Self { + assert!(epsilon > T::zero()); self.epsilon_lip = epsilon; self } @@ -139,11 +167,11 @@ where /// /// During the computation of the local lipschitz constant at `u`, /// the value of the given function at `u` is computed and stored - /// internally. This method returns a pointer to that vector. + /// internally. This method returns a pointer to that vector as a slice of `T`. /// /// If `estimate_local_lipschitz` has not been computed, the result /// will point to a zero vector. - pub fn get_function_value(&self) -> &[f64] { + pub fn get_function_value(&self) -> &[T] { self.function_value_at_u } @@ -151,6 +179,7 @@ where /// Evaluates a local Lipschitz constant of a given function /// /// Functions are closures of type `F` as shown here. + /// The returned estimate has the same scalar type `T` as the input data. /// /// # Returns /// @@ -180,7 +209,7 @@ where /// No rust-side panics, unless the C function which is called via this interface /// fails. /// - pub fn estimate_local_lipschitz(&mut self) -> Result { + pub fn estimate_local_lipschitz(&mut self) -> Result { // function_value = gradient(u, p) (self.function)(self.u_decision_var, self.function_value_at_u)?; let epsilon_lip = self.epsilon_lip; @@ -197,14 +226,14 @@ where delta_lip } }); - let norm_h = matrix_operations::norm2(&self.workspace); + let norm_h = norm2(&self.workspace); // u += workspace // u = u + h self.u_decision_var .iter_mut() .zip(self.workspace.iter()) - .for_each(|(out, a)| *out += *a); + .for_each(|(out, a)| *out = *out + *a); // workspace = F(u + h) (self.function)(self.u_decision_var, &mut self.workspace)?; @@ -213,9 +242,9 @@ where self.workspace .iter_mut() .zip(self.function_value_at_u.iter()) - .for_each(|(out, a)| *out -= *a); + .for_each(|(out, a)| *out = *out - *a); - let norm_workspace = matrix_operations::norm2(&self.workspace); + let norm_workspace = norm2(&self.workspace); Ok(norm_workspace / norm_h) } } @@ -333,4 +362,25 @@ mod tests { "computed/actual gradient", ); } + + #[test] + fn t_test_lip_estimator_f32() { + let mut u = [1.0_f32, 2.0, 3.0]; + let mut function_value = [0.0_f32; 3]; + + let f = |u: &[f32], g: &mut [f32]| -> Result<(), SolverError> { + g[0] = 3.0 * u[0]; + g[1] = 2.0 * u[1]; + g[2] = 4.5; + Ok(()) + }; + + let mut lip_estimator = LipschitzEstimator::new(&mut u, &f, &mut function_value) + .with_delta(1e-4_f32) + .with_epsilon(1e-4_f32); + let lip = lip_estimator.estimate_local_lipschitz().unwrap(); + + let expected = 5.0_f32 / 14.0_f32.sqrt(); + assert!((lip - expected).abs() < 1e-4); + } } diff --git a/src/matrix_operations.rs b/rust/src/matrix_operations.rs similarity index 100% rename from src/matrix_operations.rs rename to rust/src/matrix_operations.rs diff --git a/src/mocks.rs b/rust/src/mocks.rs similarity index 56% rename from src/mocks.rs rename to rust/src/mocks.rs index a051da21..41b2105c 100644 --- a/src/mocks.rs +++ b/rust/src/mocks.rs @@ -1,81 +1,102 @@ -use crate::{matrix_operations, SolverError}; +use crate::{matrix_operations, numeric::cast, SolverError}; +use num::Float; +use std::iter::Sum; +use std::ops::Mul; -pub const SOLUTION_A: [f64; 2] = [-0.148_959_718_255_77, 0.133_457_867_273_39]; -pub const SOLUTION_HARD: [f64; 3] = [ - -0.041_123_164_672_281, - -0.028_440_417_469_206, - 0.000_167_276_757_790, -]; +pub fn solution_a() -> [T; 2] { + [ + cast::(-0.148_959_718_255_77), + cast::(0.133_457_867_273_39), + ] +} + +pub fn solution_hard() -> [T; 3] { + [ + cast::(-0.041_123_164_672_281), + cast::(-0.028_440_417_469_206), + cast::(0.000_167_276_757_790), + ] +} -pub fn lipschitz_mock(u: &[f64], g: &mut [f64]) -> Result<(), SolverError> { - g[0] = 3.0 * u[0]; - g[1] = 2.0 * u[1]; - g[2] = 4.5; +pub fn lipschitz_mock(u: &[T], g: &mut [T]) -> Result<(), SolverError> { + g[0] = cast::(3.0) * u[0]; + g[1] = cast::(2.0) * u[1]; + g[2] = cast::(4.5); Ok(()) } -pub fn void_parameteric_cost(_u: &[f64], _p: &[f64], _cost: &mut f64) -> Result<(), SolverError> { +pub fn void_parameteric_cost( + _u: &[T], + _p: &[T], + _cost: &mut T, +) -> Result<(), SolverError> { Ok(()) } -pub fn void_parameteric_gradient( - _u: &[f64], - _p: &[f64], - _grad: &mut [f64], +pub fn void_parameteric_gradient( + _u: &[T], + _p: &[T], + _grad: &mut [T], ) -> Result<(), SolverError> { Ok(()) } -pub fn void_mapping(_u: &[f64], _result: &mut [f64]) -> Result<(), SolverError> { +pub fn void_mapping(_u: &[T], _result: &mut [T]) -> Result<(), SolverError> { Ok(()) } -pub fn void_cost(_u: &[f64], _cost: &mut f64) -> Result<(), SolverError> { +pub fn void_cost(_u: &[T], _cost: &mut T) -> Result<(), SolverError> { Ok(()) } -pub fn void_gradient(_u: &[f64], _grad: &mut [f64]) -> Result<(), SolverError> { +pub fn void_gradient(_u: &[T], _grad: &mut [T]) -> Result<(), SolverError> { Ok(()) } -pub fn my_cost(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = 0.5 * (u[0].powi(2) + 2. * u[1].powi(2) + 2.0 * u[0] * u[1]) + u[0] - u[1] + 3.0; +pub fn my_cost(u: &[T], cost: &mut T) -> Result<(), SolverError> { + *cost = cast::(0.5) + * (u[0].powi(2) + cast::(2.0) * u[1].powi(2) + cast::(2.0) * u[0] * u[1]) + + u[0] + - u[1] + + cast::(3.0); Ok(()) } -pub fn my_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { - grad[0] = u[0] + u[1] + 1.0; - grad[1] = u[0] + 2. * u[1] - 1.0; +pub fn my_gradient(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { + grad[0] = u[0] + u[1] + T::one(); + grad[1] = u[0] + cast::(2.0) * u[1] - T::one(); Ok(()) } -pub fn rosenbrock_cost(a: f64, b: f64, u: &[f64]) -> f64 { +pub fn rosenbrock_cost(a: T, b: T, u: &[T]) -> T { (a - u[0]).powi(2) + b * (u[1] - u[0].powi(2)).powi(2) } -pub fn rosenbrock_grad(a: f64, b: f64, u: &[f64], grad: &mut [f64]) { - grad[0] = 2.0 * u[0] - 2.0 * a - 4.0 * b * u[0] * (-u[0].powi(2) + u[1]); - grad[1] = b * (-2.0 * u[0].powi(2) + 2.0 * u[1]); +pub fn rosenbrock_grad(a: T, b: T, u: &[T], grad: &mut [T]) { + grad[0] = cast::(2.0) * u[0] + - cast::(2.0) * a + - cast::(4.0) * b * u[0] * (-u[0].powi(2) + u[1]); + grad[1] = b * (-cast::(2.0) * u[0].powi(2) + cast::(2.0) * u[1]); } -pub fn hard_quadratic_cost(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = (4. * u[0].powi(2)) / 2. - + 5.5 * u[1].powi(2) - + 500.5 * u[2].powi(2) - + 5. * u[0] * u[1] - + 25. * u[0] * u[2] - + 5. * u[1] * u[2] +pub fn hard_quadratic_cost(u: &[T], cost: &mut T) -> Result<(), SolverError> { + *cost = (cast::(4.0) * u[0].powi(2)) / cast::(2.0) + + cast::(5.5) * u[1].powi(2) + + cast::(500.5) * u[2].powi(2) + + cast::(5.0) * u[0] * u[1] + + cast::(25.0) * u[0] * u[2] + + cast::(5.0) * u[1] * u[2] + u[0] + u[1] + u[2]; Ok(()) } -pub fn hard_quadratic_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn hard_quadratic_gradient(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { // norm(Hessian) = 1000.653 (Lipschitz gradient) - grad[0] = 4. * u[0] + 5. * u[1] + 25. * u[2] + 1.; - grad[1] = 5. * u[0] + 11. * u[1] + 5. * u[2] + 1.; - grad[2] = 25. * u[0] + 5. * u[1] + 1001. * u[2] + 1.; + grad[0] = cast::(4.0) * u[0] + cast::(5.0) * u[1] + cast::(25.0) * u[2] + T::one(); + grad[1] = cast::(5.0) * u[0] + cast::(11.0) * u[1] + cast::(5.0) * u[2] + T::one(); + grad[2] = cast::(25.0) * u[0] + cast::(5.0) * u[1] + cast::(1001.0) * u[2] + T::one(); Ok(()) } @@ -85,28 +106,27 @@ pub fn hard_quadratic_gradient(u: &[f64], grad: &mut [f64]) -> Result<(), Solver /// /// where `m` is the length of `xi`. It is assumed that the length of /// `u` is larger than the length of `xi` -pub fn psi_cost_dummy(u: &[f64], xi: &[f64], cost: &mut f64) -> Result<(), SolverError> { +pub fn psi_cost_dummy(u: &[T], xi: &[T], cost: &mut T) -> Result<(), SolverError> +where + T: Float + Sum + Mul, +{ let u_len = u.len(); let xi_len = xi.len(); assert!(u_len > xi_len); - let sum_u = u.iter().fold(0.0, |mut sum, ui| { - sum += ui; - sum - }); + let sum_u = u.iter().fold(T::zero(), |sum, ui| sum + *ui); // psi_cost = 0.5*SUM(ui^2) + xi[0] * sum_u - *cost = - 0.5 * u.iter().fold(0.0, |mut sum_of_squares, ui| { - sum_of_squares += ui.powi(2); - sum_of_squares - }) + xi[0] * sum_u; + *cost = cast::(0.5) + * u.iter() + .fold(T::zero(), |sum_of_squares, ui| sum_of_squares + ui.powi(2)) + + xi[0] * sum_u; // psi_cost += xi[1..m]'*u[0..m-1] let m = std::cmp::min(u_len, xi_len - 1); - *cost += matrix_operations::inner_product(&u[..m], &xi[1..=m]); + *cost = *cost + matrix_operations::inner_product(&u[..m], &xi[1..=m]); Ok(()) } /// Gradient of `psi_cost` -pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn psi_gradient_dummy(u: &[T], xi: &[T], grad: &mut [T]) -> Result<(), SolverError> { let u_len = u.len(); let xi_len = xi.len(); assert!( @@ -115,11 +135,11 @@ pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), ); assert!(u_len == grad.len(), "u and grad must have equal lengths"); grad.copy_from_slice(u); - grad.iter_mut().for_each(|grad_i| *grad_i += xi[0]); + grad.iter_mut().for_each(|grad_i| *grad_i = *grad_i + xi[0]); xi[1..] .iter() .zip(grad.iter_mut()) - .for_each(|(xi_i, grad_i)| *grad_i += xi_i); + .for_each(|(xi_i, grad_i)| *grad_i = *grad_i + *xi_i); Ok(()) } @@ -132,11 +152,11 @@ pub fn psi_gradient_dummy(u: &[f64], xi: &[f64], grad: &mut [f64]) -> Result<(), /// /// It is `F1: R^3 --> R^2` /// -pub fn mapping_f1_affine(u: &[f64], f1u: &mut [f64]) -> Result<(), SolverError> { +pub fn mapping_f1_affine(u: &[T], f1u: &mut [T]) -> Result<(), SolverError> { assert!(u.len() == 3, "the length of u must be equal to 3"); assert!(f1u.len() == 2, "the length of F1(u) must be equal to 2"); - f1u[0] = 2.0 * u[0] + u[2] - 1.0; - f1u[1] = u[0] + 3.0 * u[1]; + f1u[0] = cast::(2.0) * u[0] + u[2] - T::one(); + f1u[1] = u[0] + cast::(3.0) * u[1]; Ok(()) } @@ -150,16 +170,16 @@ pub fn mapping_f1_affine(u: &[f64], f1u: &mut [f64]) -> Result<(), SolverError> /// 3*d2 /// d1 ] /// ``` -/// -pub fn mapping_f1_affine_jacobian_product( - _u: &[f64], - d: &[f64], - res: &mut [f64], +/// +pub fn mapping_f1_affine_jacobian_product( + _u: &[T], + d: &[T], + res: &mut [T], ) -> Result<(), SolverError> { assert!(d.len() == 2, "the length of d must be equal to 3"); assert!(res.len() == 3, "the length of res must be equal to 3"); - res[0] = 2.0 * d[0] + d[1]; - res[1] = 3.0 * d[1]; + res[0] = cast::(2.0) * d[0] + d[1]; + res[1] = cast::(3.0) * d[1]; res[2] = d[0]; Ok(()) } @@ -169,15 +189,18 @@ pub fn mapping_f1_affine_jacobian_product( /// ``` /// f0(u) = 0.5*u'*u + 1'*u /// ``` -pub fn f0(u: &[f64], cost: &mut f64) -> Result<(), SolverError> { - *cost = 0.5 * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); +pub fn f0(u: &[T], cost: &mut T) -> Result<(), SolverError> +where + T: Float + Sum + Mul, +{ + *cost = cast::(0.5) * matrix_operations::norm2_squared(u) + matrix_operations::sum(u); Ok(()) } -pub fn d_f0(u: &[f64], grad: &mut [f64]) -> Result<(), SolverError> { +pub fn d_f0(u: &[T], grad: &mut [T]) -> Result<(), SolverError> { grad.iter_mut() .zip(u.iter()) - .for_each(|(grad_i, u_i)| *grad_i = u_i + 1.0); + .for_each(|(grad_i, u_i)| *grad_i = *u_i + T::one()); Ok(()) } diff --git a/rust/src/numeric.rs b/rust/src/numeric.rs new file mode 100644 index 00000000..a7978b91 --- /dev/null +++ b/rust/src/numeric.rs @@ -0,0 +1,10 @@ +use num::{Float, ToPrimitive}; + +/// Convert a numeric literal or integer index into the target float type. +#[inline] +pub(crate) fn cast(value: impl ToPrimitive) -> T +where + T: Float, +{ + T::from(value).expect("numeric constant must be representable") +} diff --git a/src/tests.rs b/rust/src/tests.rs similarity index 87% rename from src/tests.rs rename to rust/src/tests.rs index b8c5e1ca..c11c575d 100644 --- a/src/tests.rs +++ b/rust/src/tests.rs @@ -21,6 +21,6 @@ fn t_access() { assert!(status.has_converged()); assert!(status.norm_fpr() < tolerance); - assert!((-0.14896 - u[0]).abs() < 1e-4); - assert!((0.13346 - u[1]).abs() < 1e-4); + assert!((-0.14896_f64 - u[0]).abs() < 1e-4); + assert!((0.13346_f64 - u[1]).abs() < 1e-4); } diff --git a/src/alm/alm_cache.rs b/src/alm/alm_cache.rs deleted file mode 100644 index 2f5211bf..00000000 --- a/src/alm/alm_cache.rs +++ /dev/null @@ -1,101 +0,0 @@ -use crate::panoc::PANOCCache; - -const DEFAULT_INITIAL_PENALTY: f64 = 10.0; - -/// Cache for `AlmOptimizer` (to be allocated once) -/// -/// This is a cache structure that contains all the data that make -/// up the "state" of the ALM/PM algorithm, i.e., all those data that -/// the algorithm *updates*. -/// -/// On the other hand, the problem data are provided in an instance -/// of `AlmProblem` -/// -#[derive(Debug)] -pub struct AlmCache { - /// PANOC cache for inner problems - pub(crate) panoc_cache: PANOCCache, - /// Lagrange multipliers (next) - pub(crate) y_plus: Option>, - /// Vector $\xi^\nu = (c^\nu, y^\nu)$ - pub(crate) xi: Option>, - /// Infeasibility related to ALM-type constraints - pub(crate) delta_y_norm: f64, - /// Delta y at iteration `nu+1` - pub(crate) delta_y_norm_plus: f64, - /// Value $\Vert F_2(u^\nu) \Vert$ - pub(crate) f2_norm: f64, - /// Value $\Vert F_2(u^{\nu+1}) \Vert$ - pub(crate) f2_norm_plus: f64, - /// Auxiliary variable `w` - pub(crate) w_alm_aux: Option>, - /// Infeasibility related to PM-type constraints, `w_pm = F2(u)` - pub(crate) w_pm: Option>, - /// (Outer) iteration count - pub(crate) iteration: usize, - /// Counter for inner iterations - pub(crate) inner_iteration_count: usize, - /// Value of the norm of the fixed-point residual for the last - /// solved inner problem - pub(crate) last_inner_problem_norm_fpr: f64, - /// Available time left for ALM/PM computations (the value `None` - /// corresponds to an unspecified available time, i.e., there are - /// no bounds on the maximum time). The maximum time is specified, - /// if at all, in `AlmOptimizer` - pub(crate) available_time: Option, -} - -impl AlmCache { - /// Construct a new instance of `AlmCache` - /// - /// # Arguments - /// - /// - `panoc_cache`: an instance of `PANOCCache` that will be used by - /// the inner problem - /// - `n1`, `n2`: range dimensions of mappings `F1` and `F2` respectively - /// - /// # Panics - /// - /// Does not panic - /// - pub fn new(panoc_cache: PANOCCache, n1: usize, n2: usize) -> Self { - AlmCache { - panoc_cache, - y_plus: if n1 > 0 { Some(vec![0.0; n1]) } else { None }, - // Allocate memory for xi = (c, y) if either n1 or n2 is nonzero, - // otherwise, xi is None - xi: if n1 + n2 > 0 { - let mut xi_init = vec![DEFAULT_INITIAL_PENALTY; 1]; - xi_init.append(&mut vec![0.0; n1]); - Some(xi_init) - } else { - None - }, - // w_alm_aux should be allocated only if n1 > 0 - w_alm_aux: if n1 > 0 { Some(vec![0.0; n1]) } else { None }, - // w_pm is needed only if n2 > 0 - w_pm: if n2 > 0 { Some(vec![0.0; n2]) } else { None }, - iteration: 0, - delta_y_norm: 0.0, - delta_y_norm_plus: f64::INFINITY, - f2_norm: 0.0, - f2_norm_plus: f64::INFINITY, - inner_iteration_count: 0, - last_inner_problem_norm_fpr: -1.0, - available_time: None, - } - } - - /// Resets the cache to its virgin state, and resets the stored instance - /// of `PANOCCache` - /// - pub fn reset(&mut self) { - self.panoc_cache.reset(); - self.iteration = 0; - self.f2_norm = 0.0; - self.f2_norm_plus = 0.0; - self.delta_y_norm = 0.0; - self.delta_y_norm_plus = 0.0; - self.inner_iteration_count = 0; - } -} diff --git a/src/constraints/affine_space.rs b/src/constraints/affine_space.rs deleted file mode 100644 index 2f25ceba..00000000 --- a/src/constraints/affine_space.rs +++ /dev/null @@ -1,111 +0,0 @@ -use super::Constraint; -use crate::matrix_operations; -use crate::CholeskyFactorizer; - -use ndarray::{ArrayView1, ArrayView2}; - -#[derive(Clone)] -/// An affine space here is defined as the set of solutions of a linear equation, $Ax = b$, -/// that is, $E=\\{x\in\mathbb{R}^n: Ax = b\\}$, which is an affine space. It is assumed that -/// the matrix $AA^\intercal$ is full-rank. -pub struct AffineSpace { - a_mat: Vec, - b_vec: Vec, - factorizer: CholeskyFactorizer, - n_rows: usize, - n_cols: usize, -} - -impl AffineSpace { - /// Construct a new affine space given the matrix $A\in\mathbb{R}^{m\times n}$ and - /// the vector $b\in\mathbb{R}^m$ - /// - /// ## Arguments - /// - /// - `a`: matrix $A$, row-wise data - /// - `b`: vector $b$ - /// - /// ## Returns - /// New Affine Space structure - /// - pub fn new(a: Vec, b: Vec) -> Self { - let n_rows = b.len(); - let n_elements_a = a.len(); - assert!(n_rows > 0, "b must not be empty"); - assert!( - n_elements_a.is_multiple_of(n_rows), - "A and b have incompatible dimensions" - ); - let n_cols = n_elements_a / n_rows; - let aat = matrix_operations::mul_a_at(&a, n_rows, n_cols).unwrap(); - let mut factorizer = CholeskyFactorizer::new(n_rows); - factorizer.factorize(&aat).unwrap(); - AffineSpace { - a_mat: a, - b_vec: b, - factorizer, - n_rows, - n_cols, - } - } -} - -impl Constraint for AffineSpace { - /// Projection onto the set $E = \\{x: Ax = b\\}$, which is computed by - /// $$P_E(x) = x - A^\intercal z(x),$$ - /// where $z$ is the solution of the linear system - /// $$(AA^\intercal)z = Ax - b,$$ - /// which has a unique solution provided $A$ has full row rank. The linear system - /// is solved by computing the Cholesky factorization of $AA^\intercal$, which is - /// done using `CholeskyFactorizer` - /// - /// ## Arguments - /// - /// - `x`: The given vector $x$ is updated with the projection on the set - /// - /// ## Example - /// - /// Consider the set $X = \\{x \in \mathbb{R}^4 :Ax = b\\}$, with $A\in\mathbb{R}^{3\times 4}$ - /// being the matrix - /// $$A = \begin{bmatrix}0.5 & 0.1& 0.2& -0.3\\\\ -0.6& 0.3& 0 & 0.5 \\\\ 1.0& 0.1& -1& -0.4\end{bmatrix},$$ - /// and $b$ being the vector - /// $$b = \begin{bmatrix}1 \\\\ 2 \\\\ -0.5\end{bmatrix}.$$ - /// - /// ```rust - /// use optimization_engine::constraints::*; - /// - /// let a = vec![0.5, 0.1, 0.2, -0.3, -0.6, 0.3, 0., 0.5, 1.0, 0.1, -1.0, -0.4,]; - /// let b = vec![1., 2., -0.5]; - /// let affine_set = AffineSpace::new(a, b); - /// let mut x = [1., -2., -0.3, 0.5]; - /// affine_set.project(&mut x); - /// ``` - /// - /// The result is stored in `x` and it can be verified that $Ax = b$. - fn project(&self, x: &mut [f64]) { - let n = self.n_cols; - assert!(x.len() == n, "x has wrong dimension"); - - // Step 1: Compute e = Ax - b - let a = ArrayView2::from_shape((self.n_rows, self.n_cols), &self.a_mat) - .expect("invalid A shape"); - let x_view = ArrayView1::from(&x[..]); - let b = ArrayView1::from(&self.b_vec[..]); - let e = a.dot(&x_view) - b; - let e_slice: &[f64] = e.as_slice().unwrap(); - - // Step 2: Solve AA' z = e and compute z - let z = self.factorizer.solve(e_slice).unwrap(); - - // Step 3: Compute x = x - A'z - let at_z = a.t().dot(&ArrayView1::from(&z[..])); - for (xi, corr) in x.iter_mut().zip(at_z.iter()) { - *xi -= *corr; - } - } - - /// Affine sets are convex. - fn is_convex(&self) -> bool { - true - } -} diff --git a/src/constraints/ball1.rs b/src/constraints/ball1.rs deleted file mode 100644 index eb1ee77f..00000000 --- a/src/constraints/ball1.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::Constraint; -use super::Simplex; - -#[derive(Copy, Clone)] -/// A norm-1 ball, that is, a set given by $B_1^r = \\{x \in \mathbb{R}^n {}:{} \Vert{}x{}\Vert_1 \leq r\\}$ -/// or a ball-1 centered at a point $x_c$, that is, $B_1^{x_c, r} = \\{x \in \mathbb{R}^n {}:{} \Vert{}x-x_c{}\Vert_1 \leq r\\}$ -pub struct Ball1<'a> { - center: Option<&'a [f64]>, - radius: f64, - simplex: Simplex, -} - -impl<'a> Ball1<'a> { - /// Construct a new ball-1 with given center and radius. - /// If no `center` is given, then it is assumed to be in the origin - pub fn new(center: Option<&'a [f64]>, radius: f64) -> Self { - assert!(radius > 0.0); - let simplex = Simplex::new(radius); - Ball1 { - center, - radius, - simplex, - } - } - - fn project_on_ball1_centered_at_origin(&self, x: &mut [f64]) { - if crate::matrix_operations::norm1(x) > self.radius { - // u = |x| (copied) - let mut u = vec![0.0; x.len()]; - u.iter_mut() - .zip(x.iter()) - .for_each(|(ui, &xi)| *ui = f64::abs(xi)); - // u = P_simplex(u) - self.simplex.project(&mut u); - x.iter_mut() - .zip(u.iter()) - .for_each(|(xi, &ui)| *xi = f64::signum(*xi) * ui); - } - } -} - -impl<'a> Constraint for Ball1<'a> { - fn project(&self, x: &mut [f64]) { - if let Some(center) = &self.center { - assert_eq!( - x.len(), - center.len(), - "x and xc have incompatible dimensions" - ); - x.iter_mut() - .zip(center.iter()) - .for_each(|(xi, &ci)| *xi -= ci); - self.project_on_ball1_centered_at_origin(x); - x.iter_mut() - .zip(center.iter()) - .for_each(|(xi, &ci)| *xi += ci); - } else { - self.project_on_ball1_centered_at_origin(x); - } - } - - fn is_convex(&self) -> bool { - true - } -} diff --git a/src/constraints/no_constraints.rs b/src/constraints/no_constraints.rs deleted file mode 100644 index 88df8845..00000000 --- a/src/constraints/no_constraints.rs +++ /dev/null @@ -1,21 +0,0 @@ -use super::Constraint; - -/// The whole space, no constraints -#[derive(Default, Clone, Copy)] -pub struct NoConstraints {} - -impl NoConstraints { - /// Constructs new instance of `NoConstraints` - /// - pub fn new() -> NoConstraints { - NoConstraints {} - } -} - -impl Constraint for NoConstraints { - fn project(&self, _x: &mut [f64]) {} - - fn is_convex(&self) -> bool { - true - } -} diff --git a/src/constraints/zero.rs b/src/constraints/zero.rs deleted file mode 100644 index 81064d33..00000000 --- a/src/constraints/zero.rs +++ /dev/null @@ -1,24 +0,0 @@ -use super::Constraint; - -#[derive(Clone, Copy, Default)] -/// Set Zero, $\\{0\\}$ -pub struct Zero {} - -impl Zero { - /// Constructs new instance of `Zero` - pub fn new() -> Self { - Zero {} - } -} - -impl Constraint for Zero { - /// Computes the projection on $\\{0\\}$, that is, $\Pi_{\\{0\\}}(x) = 0$ - /// for all $x$ - fn project(&self, x: &mut [f64]) { - x.iter_mut().for_each(|xi| *xi = 0.0); - } - - fn is_convex(&self) -> bool { - true - } -} diff --git a/src/core/fbs/fbs_engine.rs b/src/core/fbs/fbs_engine.rs deleted file mode 100644 index 717e70c6..00000000 --- a/src/core/fbs/fbs_engine.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! FBS Engine -//! -use crate::{ - constraints, - core::{fbs::FBSCache, AlgorithmEngine, Problem}, - matrix_operations, FunctionCallResult, SolverError, -}; - -/// The FBE Engine defines the steps of the FBE algorithm and the termination criterion -/// -pub struct FBSEngine<'a, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, -{ - pub(crate) problem: Problem<'a, GradientType, ConstraintType, CostType>, - pub(crate) cache: &'a mut FBSCache, -} - -impl<'a, GradientType, ConstraintType, CostType> - FBSEngine<'a, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult, - ConstraintType: constraints::Constraint, -{ - /// Constructor for instances of `FBSEngine` - /// - /// ## Arguments - /// - /// - `problem` problem definition (cost function, gradient of the cost, constraints) - /// - mutable reference to a `cache` a cache (which is created once); the cache is reuseable - /// - /// ## Returns - /// - /// An new instance of `FBSEngine` - pub fn new( - problem: Problem<'a, GradientType, ConstraintType, CostType>, - cache: &'a mut FBSCache, - ) -> FBSEngine<'a, GradientType, ConstraintType, CostType> { - FBSEngine { problem, cache } - } - - fn gradient_step(&mut self, u_current: &mut [f64]) { - assert_eq!( - Ok(()), - (self.problem.gradf)(u_current, &mut self.cache.work_gradient_u), - "The computation of the gradient of the cost failed miserably" - ); - - // take a gradient step: u_currect -= gamma * gradient - u_current - .iter_mut() - .zip(self.cache.work_gradient_u.iter()) - .for_each(|(u, w)| *u -= self.cache.gamma * *w); - } - - fn projection_step(&mut self, u_current: &mut [f64]) { - self.problem.constraints.project(u_current); - } -} - -impl<'a, GradientType, ConstraintType, CostType> AlgorithmEngine - for FBSEngine<'a, GradientType, ConstraintType, CostType> -where - GradientType: Fn(&[f64], &mut [f64]) -> FunctionCallResult + 'a, - CostType: Fn(&[f64], &mut f64) -> FunctionCallResult + 'a, - ConstraintType: constraints::Constraint + 'a, -{ - /// Take a forward-backward step and check whether the algorithm should terminate - /// - /// ## Arguments - /// - /// - `u_current` the current mutable - /// - /// ## Returns - /// - /// - A boolean flag which is`true` if and only if the algorithm should not - /// terminate - /// - /// ## Panics - /// - /// The method may panick if the computation of the gradient of the cost function - /// or the cost function panics. - fn step(&mut self, u_current: &mut [f64]) -> Result { - self.cache.work_u_previous.copy_from_slice(u_current); // cache the previous step - self.gradient_step(u_current); // compute the gradient - self.projection_step(u_current); // project - self.cache.norm_fpr = - matrix_operations::norm_inf_diff(u_current, &self.cache.work_u_previous); - - Ok(self.cache.norm_fpr > self.cache.tolerance) - } - - fn init(&mut self, _u_current: &mut [f64]) -> FunctionCallResult { - Ok(()) - } -} diff --git a/website/README.md b/website/README.md deleted file mode 100644 index 574aecfa..00000000 --- a/website/README.md +++ /dev/null @@ -1,57 +0,0 @@ -This website now uses Docusaurus v3. - -# Development - -Install dependencies: - -```sh -yarn -``` - -Start the local dev server: - -```sh -yarn start -``` - -Build the production site: - -```sh -yarn build -``` - -Preview the production build locally: - -```sh -yarn serve -``` - -Deploy to GitHub Pages: - -```sh -yarn deploy -``` - -# Project Layout - -``` -optimization-engine/ - docs/ # documentation markdown files - website/ - blog/ # blog posts - src/ - css/ - pages/ - static/ - img/ - js/ - docusaurus.config.js - sidebars.js - package.json -``` - -# Notes - -- The docs remain in the repository root under `/docs`. -- Legacy inline MathJax and widget scripts are stripped at build time, and equivalent site-wide support is loaded from `website/static/js`. -- Sidebar ordering now lives in `website/sidebars.js`.