Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,24 @@ uuid = "581a75fa-a23a-52d0-a590-d6201de2218a"
version = "0.6.4"

[deps]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
ColorSchemes = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
GitHub = "bc5e4493-9b4d-5f90-b8aa-2b2bcaad7a26"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
JSOSolvers = "10dff2fc-5484-5881-a0e0-c90441020f8a"
LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
LaTeXTabulars = "266f59ce-6e72-579c-98bb-27b39b5c037e"
LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
PGFPlotsX = "8314cec4-20b6-5062-9cdb-752b83310925"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
Expand All @@ -27,16 +32,21 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"

[compat]
ADNLPModels = "0.8.12"
BenchmarkProfiles = "0.4.6"
BenchmarkTools = "^0.4.2, 0.5, 0.6, 0.7, 1"
ColorSchemes = "^3.9"
DataFrames = "^1.4, 1.5, 1.6"
Distributed = "1.11.0"
GitHub = "^5.0.2"
JLD2 = "0.1.12, 0.2, 0.3, 0.4, 0.5"
JSON = "0.20.0, 0.21.0"
JSOSolvers = "0.14.6"
LaTeXStrings = "1.3"
LaTeXTabulars = "^0.1.0"
NLPModels = "0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21"
NLPModelsIpopt = "0.11.0"
OptimizationProblems = "0.9.1"
PGFPlotsX = "1.5"
PkgBenchmark = "^0.2.0"
Plots = "1.0, 1.1"
Expand Down
54 changes: 54 additions & 0 deletions examples/bmark_opt_problems_distributed.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
using Distributed

# 1. Setup Workers (One per solver is ideal, or use available cores)
# We add workers if they aren't already there
if nprocs() == 1
addprocs(Sys.CPU_THREADS - 2)
end

# 2. Load packages on ALL workers
@everywhere begin
using ADNLPModels
using JSOSolvers
using NLPModelsIpopt
using OptimizationProblems
using OptimizationProblems.ADNLPProblems
using SolverBenchmark
end

# These are only needed on the main process for saving/plotting
using JLD2, Plots

# define problems
# NOTE: We keep this as a Generator.
# Because we are using "Parallel by Solver", the Worker will iterate this generator.
# The instantiation '()' happens ON THE WORKER, which is memory-safe.
probs = OptimizationProblems.meta
problem_names = probs[(probs.ncon .== 0) .& .!probs.has_bounds .& (5 .<= probs.nvar .<= 100), :name]
problems = (eval(Meta.parse(problem))() for problem ∈ problem_names)

# define solvers
# NOTE: Must be defined @everywhere so workers know what ':trunk' and ':ipopt' do.
@everywhere begin
solvers = Dict(
:trunk => nlp -> trunk(nlp, atol = 1.0e-4, rtol = 1.0e-5, max_time = 10.0, verbose = 0),
:ipopt => nlp -> ipopt(nlp, tol = 1.0e-5, max_cpu_time = 10.0, print_level = 0, sb = "no"),
)
end

# solve problems, but skip one of our choice
to_skip = ["thurber"]

# 3. Run with parallel=true
stats = bmark_solvers(
solvers,
problems,
skipif = prob -> prob.meta.name ∈ to_skip,
parallel = true # <--- NEW FLAG
)

# save DataFrame for later
@save "stats_opt_problems.jld2" stats

# plot time profile
performance_profile(stats, df -> df.neval_obj)
2 changes: 2 additions & 0 deletions src/SolverBenchmark.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ using PrettyTables
using NLPModels
using SolverCore

using Distributed

# reexport PrettyTable table formats for convenience
export unicode,
ascii_dots,
Expand Down
45 changes: 39 additions & 6 deletions src/bmark_solvers.jl
Original file line number Diff line number Diff line change
@@ -1,25 +1,58 @@
export bmark_solvers

"""
bmark_solvers(solvers :: Dict{Symbol,Any}, args...; kwargs...)
bmark_solvers(solvers :: Dict{Symbol,Any}, args...; parallel::Bool=false, kwargs...)

Run a set of solvers on a set of problems.

#### Arguments
* `solvers`: a dictionary of solvers to which each problem should be passed
* `parallel`: if true, runs each solver on a separate worker process (default: false)
* other positional arguments accepted by `solve_problems`, except for a solver name

#### Keyword arguments
Any keyword argument accepted by `solve_problems`

#### Return value
A Dict{Symbol, AbstractExecutionStats} of statistics.
A Dict{Symbol, DataFrame} of statistics.
"""
function bmark_solvers(solvers::Dict{Symbol, <:Any}, args...; kwargs...)
function bmark_solvers(solvers::Dict{Symbol, <:Any}, args...; parallel::Bool = false, kwargs...)

# --- 1. SERIAL PATH (Default) ---
if !parallel
stats = Dict{Symbol, DataFrame}()
for (name, solver) in solvers
@info "Running solver $name (Serial)"
stats[name] = solve_problems(solver, name, args...; kwargs...)
end
return stats
end

# --- 2. PARALLEL PATH (By Solver) ---
if nworkers() == 1
@warn "parallel=true but only 1 worker process found. Did you forget `addprocs()`?"
end

# Helper function runs on the worker
# It takes a pair (:solver_name, solver_function)
run_solver_worker = function (solver_pair)
sname, sfunc = solver_pair
@info "Worker $(myid()) running solver $sname"
result_df = solve_problems(sfunc, sname, args...; kwargs...)
return (sname, result_df)
end

# Collect solvers into a list so pmap can distribute them
solver_list = collect(solvers)

# Distribute work
results = pmap(run_solver_worker, solver_list)

# Aggregate results back into the Dict
stats = Dict{Symbol, DataFrame}()
for (name, solver) in solvers
@info "running solver $name"
stats[name] = solve_problems(solver, name, args...; kwargs...)
for (name, df) in results
stats[name] = df
end

return stats
end
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,4 @@ include("test-tables.jl")
include("profiles.jl")
include("pkgbmark.jl")
include("test_bmark.jl")
include("test_distributed.jl")
46 changes: 46 additions & 0 deletions test/test_distributed.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
using Distributed
#THIS NEEDS TO BE TESTED IN DISTRIBUTED SERVER ENVIRONMENT

if nprocs() == 1
addprocs(2)
end

@everywhere using ADNLPModels, JSOSolvers, NLPModelsIpopt, OptimizationProblems, SolverBenchmark
@everywhere using OptimizationProblems.ADNLPProblems

@testset "Parallel vs Serial: Real OptimizationProblems" begin

# 1. Setup Data
probs = OptimizationProblems.meta
problem_names = probs[(probs.ncon .== 0) .& .!probs.has_bounds .& (5 .<= probs.nvar .<= 100), :name][1:5]

# Function to create fresh generator for each run
get_problems = () -> (eval(Meta.parse(problem))() for problem ∈ problem_names)

@everywhere begin
solvers = Dict(
:trunk => nlp -> trunk(nlp, atol = 1.0e-4, rtol = 1.0e-5, max_time = 10.0, verbose = 0),
:ipopt => nlp -> ipopt(nlp, tol = 1.0e-5, max_cpu_time = 10.0, print_level = 0, sb = "no"),
)
end

to_skip = ["thurber"]
skip_fn = prob -> prob.meta.name ∈ to_skip

# 2. Run Serial
stats_serial = bmark_solvers(solvers, get_problems(), skipif = skip_fn, parallel=false)

# 3. Run Parallel
stats_parallel = bmark_solvers(solvers, get_problems(), skipif = skip_fn, parallel=true)

# 4. Compare
for k in keys(solvers)
df_s = sort(stats_serial[k], :name)
df_p = sort(stats_parallel[k], :name)

@test size(df_s) == size(df_p)
@test df_s.name == df_p.name
@test df_s.status == df_p.status
@test isapprox(df_s.objective, df_p.objective, atol=1e-5, nans=true)
end
end
Loading