diff --git a/solvers/benchmarking/BUILD.bazel b/solvers/benchmarking/BUILD.bazel index 63ce5cdde8d7..f5570bacc169 100644 --- a/solvers/benchmarking/BUILD.bazel +++ b/solvers/benchmarking/BUILD.bazel @@ -7,6 +7,8 @@ load( ) load("//tools/lint:lint.bzl", "add_lint_tests") +package(default_visibility = ["//visibility:public"]) + drake_cc_googlebench_binary( name = "benchmark_mathematical_program", srcs = ["benchmark_mathematical_program.cc"], @@ -20,11 +22,28 @@ drake_cc_googlebench_binary( ], ) -package(default_visibility = ["//visibility:public"]) - drake_py_experiment_binary( name = "mathematical_program_experiment", googlebench_binary = ":benchmark_mathematical_program", ) +drake_cc_googlebench_binary( + name = "benchmark_ipopt_solver", + srcs = ["benchmark_ipopt_solver.cc"], + add_test_rule = True, + test_timeout = "moderate", + deps = [ + "//common:add_text_logging_gflags", + "//solvers:ipopt_solver", + "//solvers:mathematical_program", + "//tools/performance:fixture_common", + "//tools/performance:gflags_main", + ], +) + +drake_py_experiment_binary( + name = "ipopt_solver_experiment", + googlebench_binary = ":benchmark_ipopt_solver", +) + add_lint_tests() diff --git a/solvers/benchmarking/benchmark_ipopt_solver.cc b/solvers/benchmarking/benchmark_ipopt_solver.cc new file mode 100644 index 000000000000..47d8b1c61870 --- /dev/null +++ b/solvers/benchmarking/benchmark_ipopt_solver.cc @@ -0,0 +1,48 @@ +#include "drake/solvers/ipopt_solver.h" +#include "drake/solvers/mathematical_program.h" +#include "drake/tools/performance/fixture_common.h" + +namespace drake { +namespace solvers { +namespace { + +static void BenchmarkIpoptSolver(benchmark::State& state) { // NOLINT + // Number of decision variables. + const int nx = 1000; + // Create the mathematical program and the solver. + MathematicalProgram prog; + IpoptSolver solver; + // Create decision variables. + auto x = prog.NewContinuousVariables(); + // Add bounding box constraints. + auto lbx = -1e0 * Eigen::VectorXd::Ones(nx); + auto ubx = +1e0 * Eigen::VectorXd::Ones(nx); + prog.AddBoundingBoxConstraint(lbx, ubx, x); + // Add random linear constraints. + auto A = Eigen::MatrixXd::Random(nx, nx); + auto lb = Eigen::VectorXd::Zero(nx); + auto ub = 1e20 * Eigen::VectorXd::Ones(nx); + prog.AddLinearConstraint(A, lb, ub, x); + // Add linear equality constraints. + auto Aeq = Eigen::MatrixXd::Identity(nx, nx); + auto beq = Eigen::VectorXd::Zero(nx); + prog.AddLinearEqualityConstraint(Aeq, beq, x); + // Add a nonlinear constraint: closed unit disk. + prog.AddConstraint(x.transpose() * x <= 1e0); + // Add a quadratic cost. + prog.AddQuadraticCost(Eigen::MatrixXd::Identity(nx, nx), + Eigen::VectorXd::Zero(nx), x); + + // Run the solver and measure the performance. + MathematicalProgramResult result; + for (auto _ : state) { + result = solver.Solve(prog); + } + // Verify the success of the solver. + DRAKE_DEMAND(result.is_success()); +} + +BENCHMARK(BenchmarkIpoptSolver); +} // namespace +} // namespace solvers +} // namespace drake diff --git a/solvers/ipopt_solver.cc b/solvers/ipopt_solver.cc index cf08a05323d3..38f22af5138a 100644 --- a/solvers/ipopt_solver.cc +++ b/solvers/ipopt_solver.cc @@ -185,10 +185,13 @@ Eigen::VectorXd MakeEigenVector(Index n, const Number* x) { /// GetGradientMatrix. /// /// @return number of gradient entries populated. +template size_t EvaluateConstraint(const MathematicalProgram& prog, - const Eigen::VectorXd& xvec, const Constraint& c, - const VectorXDecisionVariable& variables, + const Eigen::VectorXd& xvec, + const Binding& binding, Number* result, Number* grad) { + Constraint* c = binding.evaluator().get(); + // For constraints which don't use all of the variables in the X // input, extract a subset into the AutoDiffVecXd this_x to evaluate // the constraint (we actually do this for all constraints. One @@ -196,21 +199,21 @@ size_t EvaluateConstraint(const MathematicalProgram& prog, // the correct geometry (e.g. the constraint uses all decision // variables in the same order they appear in xvec), but this is not // currently done). - int num_v_variables = variables.rows(); + int num_v_variables = binding.variables().rows(); Eigen::VectorXd this_x(num_v_variables); for (int i = 0; i < num_v_variables; ++i) { - this_x(i) = xvec(prog.FindDecisionVariableIndex(variables(i))); + this_x(i) = xvec(prog.FindDecisionVariableIndex(binding.variables()(i))); } if (!grad) { // We don't want the gradient info, so just call the VectorXd version of // Eval. - Eigen::VectorXd ty(c.num_constraints()); + Eigen::VectorXd ty(c->num_constraints()); - c.Eval(this_x, &ty); + c->Eval(this_x, &ty); // Store the results. - for (int i = 0; i < c.num_constraints(); i++) { + for (int i = 0; i < c->num_constraints(); i++) { result[i] = ty(i); } return 0; @@ -218,33 +221,56 @@ size_t EvaluateConstraint(const MathematicalProgram& prog, // Run the version which calculates gradients. - AutoDiffVecXd ty(c.num_constraints()); - c.Eval(math::InitializeAutoDiff(this_x), &ty); + // Leverage the fact that the gradient is equal to the A matrix for linear + // constraints. + if constexpr (std::is_same_v || + std::is_same_v) { + auto A = static_cast(c)->GetDenseA(); + // Verify that A has the proper size. + DRAKE_ASSERT(A.rows() == c->num_constraints()); + DRAKE_ASSERT(A.cols() == binding.variables().rows()); + // Evaluate the constraint. + Eigen::VectorXd ty(c->num_constraints()); + c->Eval(this_x, &ty); + // Set the result and the gradient. + size_t grad_idx = 0; + for (int i = 0; i < ty.rows(); i++) { + result[i] = ty(i); + for (int j = 0; j < binding.variables().rows(); j++) { + grad[grad_idx++] = A.coeff(i, j); + } + } + return grad_idx; + } else { + // Otherwise, use auto-diff. + AutoDiffVecXd ty(c->num_constraints()); + c->Eval(math::InitializeAutoDiff(this_x), &ty); - // Store the results. Since IPOPT directly knows the bounds of the - // constraint, we don't need to apply any bounding information here. - for (int i = 0; i < c.num_constraints(); i++) { - result[i] = ty(i).value(); - } + // Store the results. Since IPOPT directly knows the bounds of the + // constraint, we don't need to apply any bounding information here. + for (int i = 0; i < c->num_constraints(); i++) { + result[i] = ty(i).value(); + } - // Extract the appropriate derivatives from our result into the - // gradient array. - size_t grad_idx = 0; + // Extract the appropriate derivatives from our result into the + // gradient array. + size_t grad_idx = 0; - DRAKE_ASSERT(ty.rows() == c.num_constraints()); - for (int i = 0; i < ty.rows(); i++) { - if (ty(i).derivatives().size() > 0) { - for (int j = 0; j < variables.rows(); j++) { - grad[grad_idx++] = ty(i).derivatives()(j); - } - } else { - for (int j = 0; j < variables.rows(); j++) { - grad[grad_idx++] = 0; + DRAKE_ASSERT(ty.rows() == c->num_constraints()); + for (int i = 0; i < ty.rows(); i++) { + if (ty(i).derivatives().size() > 0) { + for (int j = 0; j < binding.variables().rows(); j++) { + grad[grad_idx++] = ty(i).derivatives()(j); + } + } else { + for (int j = 0; j < binding.variables().rows(); j++) { + grad[grad_idx++] = 0; + } } } - } - return grad_idx; + return grad_idx; + } } // IPOPT uses separate callbacks to get the result and the gradients. When @@ -662,28 +688,23 @@ class IpoptSolver_NLP : public Ipopt::TNLP { Number* grad = eval_gradient ? constraint_cache_->grad.data() : nullptr; for (const auto& c : problem_->generic_constraints()) { - grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()), - c.variables(), result, grad); + grad += EvaluateConstraint(*problem_, xvec, c, result, grad); result += c.evaluator()->num_constraints(); } for (const auto& c : problem_->lorentz_cone_constraints()) { - grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()), - c.variables(), result, grad); + grad += EvaluateConstraint(*problem_, xvec, c, result, grad); result += c.evaluator()->num_constraints(); } for (const auto& c : problem_->rotated_lorentz_cone_constraints()) { - grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()), - c.variables(), result, grad); + grad += EvaluateConstraint(*problem_, xvec, c, result, grad); result += c.evaluator()->num_constraints(); } for (const auto& c : problem_->linear_constraints()) { - grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()), - c.variables(), result, grad); + grad += EvaluateConstraint(*problem_, xvec, c, result, grad); result += c.evaluator()->num_constraints(); } for (const auto& c : problem_->linear_equality_constraints()) { - grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()), - c.variables(), result, grad); + grad += EvaluateConstraint(*problem_, xvec, c, result, grad); result += c.evaluator()->num_constraints(); }