Skip to content

Commit

Permalink
Avoid auto-diff for linear IPOPT constraints (RobotLocomotion#18603)
Browse files Browse the repository at this point in the history
  • Loading branch information
aykut-tri authored and xuchenhan-tri committed Feb 6, 2023
1 parent 24dc995 commit db690b3
Show file tree
Hide file tree
Showing 3 changed files with 128 additions and 40 deletions.
23 changes: 21 additions & 2 deletions solvers/benchmarking/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ load(
)
load("//tools/lint:lint.bzl", "add_lint_tests")

package(default_visibility = ["//visibility:public"])

drake_cc_googlebench_binary(
name = "benchmark_mathematical_program",
srcs = ["benchmark_mathematical_program.cc"],
Expand All @@ -20,11 +22,28 @@ drake_cc_googlebench_binary(
],
)

package(default_visibility = ["//visibility:public"])

drake_py_experiment_binary(
name = "mathematical_program_experiment",
googlebench_binary = ":benchmark_mathematical_program",
)

drake_cc_googlebench_binary(
name = "benchmark_ipopt_solver",
srcs = ["benchmark_ipopt_solver.cc"],
add_test_rule = True,
test_timeout = "moderate",
deps = [
"//common:add_text_logging_gflags",
"//solvers:ipopt_solver",
"//solvers:mathematical_program",
"//tools/performance:fixture_common",
"//tools/performance:gflags_main",
],
)

drake_py_experiment_binary(
name = "ipopt_solver_experiment",
googlebench_binary = ":benchmark_ipopt_solver",
)

add_lint_tests()
48 changes: 48 additions & 0 deletions solvers/benchmarking/benchmark_ipopt_solver.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#include "drake/solvers/ipopt_solver.h"
#include "drake/solvers/mathematical_program.h"
#include "drake/tools/performance/fixture_common.h"

namespace drake {
namespace solvers {
namespace {

static void BenchmarkIpoptSolver(benchmark::State& state) { // NOLINT
// Number of decision variables.
const int nx = 1000;
// Create the mathematical program and the solver.
MathematicalProgram prog;
IpoptSolver solver;
// Create decision variables.
auto x = prog.NewContinuousVariables<nx>();
// Add bounding box constraints.
auto lbx = -1e0 * Eigen::VectorXd::Ones(nx);
auto ubx = +1e0 * Eigen::VectorXd::Ones(nx);
prog.AddBoundingBoxConstraint(lbx, ubx, x);
// Add random linear constraints.
auto A = Eigen::MatrixXd::Random(nx, nx);
auto lb = Eigen::VectorXd::Zero(nx);
auto ub = 1e20 * Eigen::VectorXd::Ones(nx);
prog.AddLinearConstraint(A, lb, ub, x);
// Add linear equality constraints.
auto Aeq = Eigen::MatrixXd::Identity(nx, nx);
auto beq = Eigen::VectorXd::Zero(nx);
prog.AddLinearEqualityConstraint(Aeq, beq, x);
// Add a nonlinear constraint: closed unit disk.
prog.AddConstraint(x.transpose() * x <= 1e0);
// Add a quadratic cost.
prog.AddQuadraticCost(Eigen::MatrixXd::Identity(nx, nx),
Eigen::VectorXd::Zero(nx), x);

// Run the solver and measure the performance.
MathematicalProgramResult result;
for (auto _ : state) {
result = solver.Solve(prog);
}
// Verify the success of the solver.
DRAKE_DEMAND(result.is_success());
}

BENCHMARK(BenchmarkIpoptSolver);
} // namespace
} // namespace solvers
} // namespace drake
97 changes: 59 additions & 38 deletions solvers/ipopt_solver.cc
Original file line number Diff line number Diff line change
Expand Up @@ -185,66 +185,92 @@ Eigen::VectorXd MakeEigenVector(Index n, const Number* x) {
/// GetGradientMatrix.
///
/// @return number of gradient entries populated.
template <typename ConstraintType>
size_t EvaluateConstraint(const MathematicalProgram& prog,
const Eigen::VectorXd& xvec, const Constraint& c,
const VectorXDecisionVariable& variables,
const Eigen::VectorXd& xvec,
const Binding<ConstraintType>& binding,
Number* result, Number* grad) {
Constraint* c = binding.evaluator().get();

// For constraints which don't use all of the variables in the X
// input, extract a subset into the AutoDiffVecXd this_x to evaluate
// the constraint (we actually do this for all constraints. One
// potential optimization might be to detect if the initial "tx" has
// the correct geometry (e.g. the constraint uses all decision
// variables in the same order they appear in xvec), but this is not
// currently done).
int num_v_variables = variables.rows();
int num_v_variables = binding.variables().rows();
Eigen::VectorXd this_x(num_v_variables);
for (int i = 0; i < num_v_variables; ++i) {
this_x(i) = xvec(prog.FindDecisionVariableIndex(variables(i)));
this_x(i) = xvec(prog.FindDecisionVariableIndex(binding.variables()(i)));
}

if (!grad) {
// We don't want the gradient info, so just call the VectorXd version of
// Eval.
Eigen::VectorXd ty(c.num_constraints());
Eigen::VectorXd ty(c->num_constraints());

c.Eval(this_x, &ty);
c->Eval(this_x, &ty);

// Store the results.
for (int i = 0; i < c.num_constraints(); i++) {
for (int i = 0; i < c->num_constraints(); i++) {
result[i] = ty(i);
}
return 0;
}

// Run the version which calculates gradients.

AutoDiffVecXd ty(c.num_constraints());
c.Eval(math::InitializeAutoDiff(this_x), &ty);
// Leverage the fact that the gradient is equal to the A matrix for linear
// constraints.
if constexpr (std::is_same_v<ConstraintType, LinearEqualityConstraint> ||
std::is_same_v<ConstraintType, LinearConstraint>) {
auto A = static_cast<ConstraintType*>(c)->GetDenseA();
// Verify that A has the proper size.
DRAKE_ASSERT(A.rows() == c->num_constraints());
DRAKE_ASSERT(A.cols() == binding.variables().rows());
// Evaluate the constraint.
Eigen::VectorXd ty(c->num_constraints());
c->Eval(this_x, &ty);
// Set the result and the gradient.
size_t grad_idx = 0;
for (int i = 0; i < ty.rows(); i++) {
result[i] = ty(i);
for (int j = 0; j < binding.variables().rows(); j++) {
grad[grad_idx++] = A.coeff(i, j);
}
}
return grad_idx;
} else {
// Otherwise, use auto-diff.
AutoDiffVecXd ty(c->num_constraints());
c->Eval(math::InitializeAutoDiff(this_x), &ty);

// Store the results. Since IPOPT directly knows the bounds of the
// constraint, we don't need to apply any bounding information here.
for (int i = 0; i < c.num_constraints(); i++) {
result[i] = ty(i).value();
}
// Store the results. Since IPOPT directly knows the bounds of the
// constraint, we don't need to apply any bounding information here.
for (int i = 0; i < c->num_constraints(); i++) {
result[i] = ty(i).value();
}

// Extract the appropriate derivatives from our result into the
// gradient array.
size_t grad_idx = 0;
// Extract the appropriate derivatives from our result into the
// gradient array.
size_t grad_idx = 0;

DRAKE_ASSERT(ty.rows() == c.num_constraints());
for (int i = 0; i < ty.rows(); i++) {
if (ty(i).derivatives().size() > 0) {
for (int j = 0; j < variables.rows(); j++) {
grad[grad_idx++] = ty(i).derivatives()(j);
}
} else {
for (int j = 0; j < variables.rows(); j++) {
grad[grad_idx++] = 0;
DRAKE_ASSERT(ty.rows() == c->num_constraints());
for (int i = 0; i < ty.rows(); i++) {
if (ty(i).derivatives().size() > 0) {
for (int j = 0; j < binding.variables().rows(); j++) {
grad[grad_idx++] = ty(i).derivatives()(j);
}
} else {
for (int j = 0; j < binding.variables().rows(); j++) {
grad[grad_idx++] = 0;
}
}
}
}

return grad_idx;
return grad_idx;
}
}

// IPOPT uses separate callbacks to get the result and the gradients. When
Expand Down Expand Up @@ -662,28 +688,23 @@ class IpoptSolver_NLP : public Ipopt::TNLP {
Number* grad = eval_gradient ? constraint_cache_->grad.data() : nullptr;

for (const auto& c : problem_->generic_constraints()) {
grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()),
c.variables(), result, grad);
grad += EvaluateConstraint(*problem_, xvec, c, result, grad);
result += c.evaluator()->num_constraints();
}
for (const auto& c : problem_->lorentz_cone_constraints()) {
grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()),
c.variables(), result, grad);
grad += EvaluateConstraint(*problem_, xvec, c, result, grad);
result += c.evaluator()->num_constraints();
}
for (const auto& c : problem_->rotated_lorentz_cone_constraints()) {
grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()),
c.variables(), result, grad);
grad += EvaluateConstraint(*problem_, xvec, c, result, grad);
result += c.evaluator()->num_constraints();
}
for (const auto& c : problem_->linear_constraints()) {
grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()),
c.variables(), result, grad);
grad += EvaluateConstraint(*problem_, xvec, c, result, grad);
result += c.evaluator()->num_constraints();
}
for (const auto& c : problem_->linear_equality_constraints()) {
grad += EvaluateConstraint(*problem_, xvec, (*c.evaluator()),
c.variables(), result, grad);
grad += EvaluateConstraint(*problem_, xvec, c, result, grad);
result += c.evaluator()->num_constraints();
}

Expand Down

0 comments on commit db690b3

Please sign in to comment.