diff --git a/src/NonLinearProgram/NonLinearProgram.jl b/src/NonLinearProgram/NonLinearProgram.jl index f7a04b1e..c31eb2e7 100644 --- a/src/NonLinearProgram/NonLinearProgram.jl +++ b/src/NonLinearProgram/NonLinearProgram.jl @@ -574,13 +574,13 @@ function DiffOpt.reverse_differentiate!(model::Model; tol = 1e-6) end end for (i, var_idx) in enumerate(cache.primal_vars[cache.has_low]) - idx = form.constraint_lower_bounds[var_idx.value].value + idx = form.constraint_lower_bounds[var_idx.value] if haskey(model.input_cache.dy, idx) Δdual[num_constraints+i] = model.input_cache.dy[idx] end end for (i, var_idx) in enumerate(cache.primal_vars[cache.has_up]) - idx = form.constraint_upper_bounds[var_idx.value].value + idx = form.constraint_upper_bounds[var_idx.value] if haskey(model.input_cache.dy, idx) Δdual[num_constraints+num_low+i] = model.input_cache.dy[idx] end diff --git a/test/nlp_program.jl b/test/nlp_program.jl index c9dbbc73..c08b925e 100644 --- a/test/nlp_program.jl +++ b/test/nlp_program.jl @@ -977,6 +977,21 @@ function test_changing_factorization() ) end +function test_reverse_bounds() + model = DiffOpt.nonlinear_diff_model(Ipopt.Optimizer) + set_silent(model) + @variable(model, x[1:3] >= 0) # x[3] ≥ 0 is active + @variable(model, p in MOI.Parameter(4.5)) + @constraint(model, 6x[1] + 3x[2] + 2x[3] == p) + @constraint(model, x[1] + x[2] - x[3] == 1) + @objective(model, Min, sum(x .^ 2)) + optimize!(model) + MOI.set(model, DiffOpt.ReverseConstraintDual(), LowerBoundRef(x[3]), 1.0) + DiffOpt.reverse_differentiate!(model) + dp = MOI.get(model, DiffOpt.ReverseConstraintSet(), ParameterRef(p)).value + @test isapprox(dp, -2.88888; atol = 1e-4) +end + end # module TestNLPProgram.runtests()