27 #define DIM_TO_SOLVER(p, l) BlockSolver< BlockSolverTraits<p, l> > 29 #define ALLOC_CHOLMOD(s, p, l) \ 31 std::cerr << "# Using CHOLMOD online poseDim " << p << " landMarkDim " << l << " blockordering 1" << std::endl; \ 32 LinearSolverCholmodOnline < DIM_TO_SOLVER(p, l)::PoseMatrixType >* linearSolver = new LinearSolverCholmodOnline<DIM_TO_SOLVER(p, l)::PoseMatrixType>(); \ 33 s = new DIM_TO_SOLVER(p, l)(linearSolver); \ 37 using namespace Eigen;
48 OptimizableGraph::Vertex* vertex;
50 bool operator<(
const VertexBackup& other)
const 52 return hessianIndex < other.hessianIndex;
57 SparseOptimizerIncremental::SparseOptimizerIncremental()
61 cholmod_start(&_cholmodCommon);
64 _cholmodCommon.nmethods = 1 ;
65 _cholmodCommon.method[0].ordering = CHOLMOD_NATURAL;
66 _cholmodCommon.postorder = 0;
67 _cholmodCommon.supernodal = CHOLMOD_SIMPLICIAL;
69 _permutedUpdate = cholmod_allocate_triplet(1000, 1000, 1024, 0, CHOLMOD_REAL, &_cholmodCommon);
77 SparseOptimizerIncremental::~SparseOptimizerIncremental()
79 delete _permutedUpdateAsSparse;
80 _updateMat.clear(
true);
81 delete _cholmodSparse;
83 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
86 cholmod_free_triplet(&_permutedUpdate, &_cholmodCommon);
87 cholmod_finish(&_cholmodCommon);
90 int SparseOptimizerIncremental::optimize(
int iterations,
bool online)
99 if (! online || batchStep) {
102 ok = _underlyingSolver->buildStructure();
110 if (slamDimension == 3) {
111 for (
size_t i = 0; i < indexMapping().size(); ++i) {
116 else if (slamDimension == 6) {
117 for (
size_t i = 0; i < indexMapping().size(); ++i) {
123 SparseOptimizer::computeActiveErrors();
125 _underlyingSolver->buildSystem();
128 int numBlocksRequired = _ivMap.size();
129 if (_cmember.size() < numBlocksRequired) {
130 _cmember.resize(2 * numBlocksRequired);
132 memset(_cmember.data(), 0, numBlocksRequired *
sizeof(int));
133 if (_ivMap.size() > 100) {
134 for (
size_t i = _ivMap.size() - 20; i < _ivMap.size(); ++i) {
136 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
150 ok = _underlyingSolver->solve();
153 _L = _solverInterface->L();
154 if (_perm.size() < (int)_L->n)
155 _perm.resize(2*_L->n);
156 int* p = (
int*)_L->Perm;
157 for (
size_t i = 0; i < _L->n; ++i)
163 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
166 v->
copyB(_underlyingSolver->b() + iBase);
168 _solverInterface->solve(_underlyingSolver->x(), _underlyingSolver->b());
175 update(_underlyingSolver->x());
179 computeActiveErrors();
182 <<
"\t edges= " << _activeEdges.size()
183 <<
"\t chi2= " << FIXED(activeChi2())
188 gnuplotVisualization();
198 return SparseOptimizerOnline::updateInitialization(vset, eset);
201 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
207 _touchedVertices.clear();
208 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
213 _touchedVertices.insert(v1);
215 _touchedVertices.insert(v2);
220 std::vector<HyperGraph::Vertex*> newVertices;
221 newVertices.reserve(vset.size());
222 _activeVertices.reserve(_activeVertices.size() + vset.size());
223 _activeEdges.reserve(_activeEdges.size() + eset.size());
224 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
225 _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
229 size_t next = _ivMap.size();
230 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
236 newVertices.push_back(v);
237 _activeVertices.push_back(v);
251 VertexBackup* backupIdx =
new VertexBackup[_touchedVertices.size()];
253 VertexBackup backupIdx[_touchedVertices.size()];
255 memset(backupIdx, 0,
sizeof(VertexBackup) * _touchedVertices.size());
257 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
260 backupIdx[idx].vertex = v;
264 sort(backupIdx, backupIdx + _touchedVertices.size());
265 for (
int i = 0; i < idx; ++i) {
266 backupIdx[i].vertex->setHessianIndex(i);
271 _updateMat.clear(
true);
272 _updateMat.rowBlockIndices().clear();
273 _updateMat.colBlockIndices().clear();
274 _updateMat.blockCols().clear();
277 MatrixXd* lastBlock = 0;
279 for (
int i = 0; i < idx; ++i) {
283 _updateMat.rowBlockIndices().push_back(sizePoses);
284 _updateMat.colBlockIndices().push_back(sizePoses);
289 MatrixXd* m = _updateMat.block(ind, ind,
true);
294 lastBlock->diagonal().array() += 1e-6;
297 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
308 bool transposedBlock = ind1 > ind2;
312 MatrixXd* m = _updateMat.block(ind1, ind2,
true);
317 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
321 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
328 for (
int i = 0; i < idx; ++i) {
329 backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex);
330 if (backupIdx[i].hessianData)
331 backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
335 bool solverStatus = _algorithm->updateStructure(newVertices, eset);
337 bool updateStatus = computeCholeskyUpdate();
338 if (! updateStatus) {
339 cerr <<
"Error while computing update" << endl;
342 cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);
345 if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
347 cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon);
349 _permutedUpdate->nnz = 0;
350 _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n;
352 int* Ap = (
int*)updateAsSparseFactor->p;
353 int* Ai = (
int*)updateAsSparseFactor->i;
354 double* Ax = (
double*)updateAsSparseFactor->x;
355 int* Bj = (
int*)_permutedUpdate->j;
356 int* Bi = (
int*)_permutedUpdate->i;
357 double* Bx = (
double*)_permutedUpdate->x;
358 for (
size_t c = 0; c < updateAsSparseFactor->ncol; ++c) {
359 const int& rbeg = Ap[c];
360 const int& rend = Ap[c+1];
361 int cc = c / slamDimension;
362 int coff = c % slamDimension;
363 const int& cbase = backupIdx[cc].vertex->colInHessian();
364 const int& ccol = _perm(cbase + coff);
365 for (
int j = rbeg; j < rend; j++) {
366 const int& r = Ai[j];
367 const double& val = Ax[j];
369 int rr = r / slamDimension;
370 int roff = r % slamDimension;
371 const int& rbase = backupIdx[rr].vertex->colInHessian();
373 int row = _perm(rbase + roff);
377 Bi[_permutedUpdate->nnz] = row;
378 Bj[_permutedUpdate->nnz] = col;
379 Bx[_permutedUpdate->nnz] = val;
380 ++_permutedUpdate->nnz;
384 cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon);
390 cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon);
392 _solverInterface->choleskyUpdate(updatePermuted);
393 cholmod_free_sparse(&updatePermuted, &_cholmodCommon);
395 convertTripletUpdateToSparse();
396 _solverInterface->choleskyUpdate(_permutedUpdateAsSparse);
402 bool SparseOptimizerIncremental::computeCholeskyUpdate()
404 if (_cholmodFactor) {
405 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
413 if (_cholmodSparse->columnsAllocated < n) {
415 _cholmodSparse->columnsAllocated = _cholmodSparse->columnsAllocated == 0 ? n : 2 * n;
416 delete[] (
int*)_cholmodSparse->p;
417 _cholmodSparse->p =
new int[_cholmodSparse->columnsAllocated+1];
420 if (_cholmodSparse->nzmax < nzmax) {
422 _cholmodSparse->nzmax = _cholmodSparse->nzmax == 0 ? nzmax : 2 * nzmax;
423 delete[] (
double*)_cholmodSparse->x;
424 delete[] (
int*)_cholmodSparse->i;
425 _cholmodSparse->i =
new int[_cholmodSparse->nzmax];
426 _cholmodSparse->x =
new double[_cholmodSparse->nzmax];
428 _cholmodSparse->ncol = n;
429 _cholmodSparse->nrow = m;
431 A.
fillCCS((
int*)_cholmodSparse->p, (
int*)_cholmodSparse->i, (
double*)_cholmodSparse->x,
true);
434 _cholmodFactor = cholmod_analyze(_cholmodSparse, &_cholmodCommon);
435 cholmod_factorize(_cholmodSparse, _cholmodFactor, &_cholmodCommon);
438 int* p = (
int*)_cholmodFactor->Perm;
439 for (
int i = 0; i < (int)n; ++i)
441 cerr <<
"wrong permutation" << i <<
" -> " << p[i] << endl;
444 if (_cholmodCommon.status == CHOLMOD_NOT_POSDEF) {
451 int change_status = cholmod_change_factor(CHOLMOD_REAL, 1, 0, 1, 1, _cholmodFactor, &_cholmodCommon);
452 if (! change_status) {
463 if (solverName ==
"fix3_2_cholmod") {
466 else if (solverName ==
"fix6_3_cholmod") {
474 bool SparseOptimizerIncremental::initSolver(
int dimension,
int batchEveryN)
477 slamDimension = dimension;
478 if (dimension == 3) {
483 assert(bs &&
"Unable to get internal block solver");
487 _solverInterface = s;
488 _underlyingSolver = bs;
494 assert(bs &&
"Unable to get internal block solver");
498 _solverInterface = s;
499 _underlyingSolver = bs;
501 _solverInterface->cmember = &_cmember;
502 _solverInterface->batchEveryN = batchEveryN;
504 cerr <<
"Error allocating solver. Allocating CHOLMOD solver failed!" << endl;
510 void SparseOptimizerIncremental::convertTripletUpdateToSparse()
513 if (_tripletWorkspace.size() < (int)_permutedUpdate->ncol) {
514 _tripletWorkspace.
resize(_permutedUpdate->ncol * 2);
518 if (_permutedUpdateAsSparse->nzmax < _permutedUpdate->nzmax) {
519 _permutedUpdateAsSparse->nzmax = _permutedUpdate->nzmax;
520 delete[] (
int*)_permutedUpdateAsSparse->i;
521 delete[] (
double*)_permutedUpdateAsSparse->x;
522 _permutedUpdateAsSparse->x =
new double[_permutedUpdateAsSparse->nzmax];
523 _permutedUpdateAsSparse->i =
new int[_permutedUpdateAsSparse->nzmax];
526 if (_permutedUpdateAsSparse->columnsAllocated < _permutedUpdate->ncol) {
527 _permutedUpdateAsSparse->columnsAllocated = 2*_permutedUpdate->ncol;
528 delete[] (
int*) _permutedUpdateAsSparse->p;
529 _permutedUpdateAsSparse->p =
new int[_permutedUpdateAsSparse->columnsAllocated + 1];
532 _permutedUpdateAsSparse->ncol = _permutedUpdate->ncol;
533 _permutedUpdateAsSparse->nrow = _permutedUpdate->nrow;
535 int* w = _tripletWorkspace.data();
536 memset(w, 0,
sizeof(
int) * _permutedUpdate->ncol);
538 int* Ti = (
int*) _permutedUpdate->i;
539 int* Tj = (
int*) _permutedUpdate->j;
540 double* Tx = (
double*) _permutedUpdate->x;
542 int* Cp = (
int*) _permutedUpdateAsSparse->p;
543 int* Ci = (
int*) _permutedUpdateAsSparse->i;
544 double* Cx = (
double*) _permutedUpdateAsSparse->x;
546 for (
size_t k = 0; k < _permutedUpdate->nnz; ++k)
550 int n = _permutedUpdate->ncol;
552 for (
int i = 0 ; i < n ; i++) {
558 assert((
size_t)nz == _permutedUpdate->nnz);
561 for (
size_t k = 0 ; k < _permutedUpdate->nnz ; ++k) {
#define __PRETTY_FUNCTION__
void setHessianIndex(int ti)
set the temporary index of the vertex in the parameter blocks
virtual double * hessianData()=0
int cols() const
columns of the matrix
static OptimizationAlgorithm * createSolver(const std::string &solverName)
virtual int copyB(double *b_) const =0
virtual void setSchur(bool s)
std::set< Vertex * > VertexSet
int rows() const
rows of the matrix
virtual bool init(bool online=false)=0
void resize(int *blockPoseIndices, int numPoseBlocks, int *blockLandmarkIndices, int numLandmarkBlocks, int totalDim)
int hessianIndex() const
temporary index of this node in the parameter vector obtained from linearization
virtual void linearizeOplus(JacobianWorkspace &jacobianWorkspace)=0
size_t nonZeros() const
number of non-zero elements
Our extension of the CHOLMOD matrix struct.
Implementation of the Gauss Newton Algorithm.
Solver * solver()
return the underlying solver used to solve the linear system
void setAdditionalVectorSpace(size_t s)
std::set< Edge * > EdgeSet
int dimension() const
dimension of the estimated state belonging to this node
Protocol The SLAM executable accepts such as solving the and retrieving or vertices in the explicitly state the reprensentation poses are represented by poses by VERTEX_XYZRPY In the Quaternions and other representations could be but note that it is up to the SLAM algorithm to choose the internal representation of the angles The keyword is followed by a unique vertex ID and an optional initialization of the or edges in the explicitly state the type of the constraint pose constraints are given by pose constraints by EDGE_XYZRPY The keyword is followed by a unique edge the IDs of the referenced vertices
const VertexContainer & vertices() const
virtual void constructQuadraticForm()=0
LinearSolver< PoseMatrixType > * linearSolver() const
int colInHessian() const
get the row of this vertex in the Hessian
virtual void computeError()=0
Generic interface for a sparse solver operating on a graph which solves one iteration of the lineariz...
virtual void mapHessianMemory(double *d)=0
std::map< int, SparseMatrixBlock * > IntBlockMap
virtual void clearQuadraticForm()=0
void setEstimate(const EstimateType &et)
set the estimate for the vertex also calls updateCache()
bool marginalized() const
true => this node is marginalized out during the optimization
A general case Vertex for optimization.
bool operator<(const HyperDijkstra::AdjacencyMapEntry &a, const HyperDijkstra::AdjacencyMapEntry &b)
bool fixed() const
true => this node is fixed during the optimization
VertexSE2::EstimateType updatedEstimate
Implementation of a solver operating on the blocks of the Hessian.
Generic interface for a non-linear solver operating on a graph.
virtual void mapHessianMemory(double *d, int i, int j, bool rowMajor)=0
VertexSE3::EstimateType updatedEstimate
linear solver which allows to update the cholesky factor
Sparse matrix which uses blocks.
int fillCCS(int *Cp, int *Ci, double *Cx, bool upperTriangle=false) const
#define ALLOC_CHOLMOD(s, p, l)