From 5e986eb834732c39bd154cb099e75f8a527e9fd6 Mon Sep 17 00:00:00 2001 From: EnricoMingo Date: Tue, 13 Feb 2024 15:23:25 +0100 Subject: [PATCH] Small optimization in MinEffort. Tested and working --- .../OpenSoT/tasks/velocity/MinimumEffort.h | 2 ++ src/tasks/velocity/MinimumEffort.cpp | 26 +++++++++---------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/include/OpenSoT/tasks/velocity/MinimumEffort.h b/include/OpenSoT/tasks/velocity/MinimumEffort.h index 821b7fb6..129c6e1e 100644 --- a/include/OpenSoT/tasks/velocity/MinimumEffort.h +++ b/include/OpenSoT/tasks/velocity/MinimumEffort.h @@ -92,6 +92,8 @@ ComputeGTauGradient _gTauGradientWorker; double _step; + Eigen::VectorXd _gradient; + Eigen::VectorXd _deltas; public: diff --git a/src/tasks/velocity/MinimumEffort.cpp b/src/tasks/velocity/MinimumEffort.cpp index b8a818e8..4f3ad275 100644 --- a/src/tasks/velocity/MinimumEffort.cpp +++ b/src/tasks/velocity/MinimumEffort.cpp @@ -36,6 +36,9 @@ MinimumEffort::MinimumEffort(const XBot::ModelInterface& robot_model, const doub _A.resize(_x_size, _x_size); _A.setIdentity(_x_size, _x_size); + _gradient.resize(_model.getNv()); + _deltas.resize(_model.getNv()); + /* first update. Setting desired pose equal to the actual pose */ this->_update(Eigen::VectorXd(0)); } @@ -50,28 +53,25 @@ void MinimumEffort::_update(const Eigen::VectorXd &x) { /************************* COMPUTING TASK *****************************/ - - Eigen::VectorXd gradient(_model.getNv()); - gradient.setZero(); - Eigen::VectorXd deltas(_model.getNv()); - deltas.setZero(); + _gradient.setZero(); + _deltas.setZero(); - for(unsigned int i = 0; i < gradient.size(); ++i) + for(unsigned int i = 0; i < _gradient.size(); ++i) { if(this->getActiveJointsMask()[i]) { - deltas[i] = _step; - double fun_a = _gTauGradientWorker.compute(_model.sum(_q, deltas)); - double fun_b = _gTauGradientWorker.compute(_model.sum(_q, -deltas)); + _deltas[i] = _step; + double fun_a = _gTauGradientWorker.compute(_model.sum(_q, _deltas)); + double fun_b = _gTauGradientWorker.compute(_model.sum(_q, -_deltas)); - gradient[i] = (fun_a - fun_b)/(2.0*_step); - deltas[i] = 0.0; + _gradient[i] = (fun_a - fun_b)/(2.0*_step); + _deltas[i] = 0.0; } else - gradient[i] = 0.0; + _gradient[i] = 0.0; } - _b = -1.0 * _lambda * gradient; + _b = -1.0 * _lambda * _gradient; /**********************************************************************/ }