Eigen  3.2.0
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
SparseLU.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
5 // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 
12 #ifndef EIGEN_SPARSE_LU_H
13 #define EIGEN_SPARSE_LU_H
14 
15 namespace Eigen {
16 
17 template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
18 template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
19 template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
20 
72 template <typename _MatrixType, typename _OrderingType>
73 class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
74 {
75  public:
76  typedef _MatrixType MatrixType;
77  typedef _OrderingType OrderingType;
78  typedef typename MatrixType::Scalar Scalar;
79  typedef typename MatrixType::RealScalar RealScalar;
80  typedef typename MatrixType::Index Index;
82  typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
86  typedef internal::SparseLUImpl<Scalar, Index> Base;
87 
88  public:
89  SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
90  {
91  initperfvalues();
92  }
93  SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
94  {
95  initperfvalues();
96  compute(matrix);
97  }
98 
99  ~SparseLU()
100  {
101  // Free all explicit dynamic pointers
102  }
103 
104  void analyzePattern (const MatrixType& matrix);
105  void factorize (const MatrixType& matrix);
106  void simplicialfactorize(const MatrixType& matrix);
107 
112  void compute (const MatrixType& matrix)
113  {
114  // Analyze
115  analyzePattern(matrix);
116  //Factorize
117  factorize(matrix);
118  }
119 
120  inline Index rows() const { return m_mat.rows(); }
121  inline Index cols() const { return m_mat.cols(); }
123  void isSymmetric(bool sym)
124  {
125  m_symmetricmode = sym;
126  }
127 
134  SparseLUMatrixLReturnType<SCMatrix> matrixL() const
135  {
136  return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
137  }
144  SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
145  {
146  return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
147  }
148 
153  inline const PermutationType& rowsPermutation() const
154  {
155  return m_perm_r;
156  }
161  inline const PermutationType& colsPermutation() const
162  {
163  return m_perm_c;
164  }
166  void setPivotThreshold(const RealScalar& thresh)
167  {
168  m_diagpivotthresh = thresh;
169  }
170 
177  template<typename Rhs>
178  inline const internal::solve_retval<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const
179  {
180  eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
181  eigen_assert(rows()==B.rows()
182  && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
183  return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
184  }
185 
190  template<typename Rhs>
191  inline const internal::sparse_solve_retval<SparseLU, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
192  {
193  eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
194  eigen_assert(rows()==B.rows()
195  && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
196  return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
197  }
198 
208  {
209  eigen_assert(m_isInitialized && "Decomposition is not initialized.");
210  return m_info;
211  }
212 
216  std::string lastErrorMessage() const
217  {
218  return m_lastError;
219  }
220 
221  template<typename Rhs, typename Dest>
222  bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &_X) const
223  {
224  Dest& X(_X.derived());
225  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
226  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
227  THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
228 
229  // Permute the right hand side to form X = Pr*B
230  // on return, X is overwritten by the computed solution
231  X.resize(B.rows(),B.cols());
232  for(Index j = 0; j < B.cols(); ++j)
233  X.col(j) = rowsPermutation() * B.col(j);
234 
235  //Forward substitution with L
236  this->matrixL().solveInPlace(X);
237  this->matrixU().solveInPlace(X);
238 
239  // Permute back the solution
240  for (Index j = 0; j < B.cols(); ++j)
241  X.col(j) = colsPermutation().inverse() * X.col(j);
242 
243  return true;
244  }
245 
256  Scalar absDeterminant()
257  {
258  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
259  // Initialize with the determinant of the row matrix
260  Scalar det = Scalar(1.);
261  //Note that the diagonal blocks of U are stored in supernodes,
262  // which are available in the L part :)
263  for (Index j = 0; j < this->cols(); ++j)
264  {
265  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
266  {
267  if(it.row() < j) continue;
268  if(it.row() == j)
269  {
270  det *= (std::abs)(it.value());
271  break;
272  }
273  }
274  }
275  return det;
276  }
277 
286  Scalar logAbsDeterminant() const
287  {
288  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
289  Scalar det = Scalar(0.);
290  for (Index j = 0; j < this->cols(); ++j)
291  {
292  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
293  {
294  if(it.row() < j) continue;
295  if(it.row() == j)
296  {
297  det += (std::log)((std::abs)(it.value()));
298  break;
299  }
300  }
301  }
302  return det;
303  }
304 
310  {
311  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
312  return Scalar(m_detPermR);
313  }
314 
315  protected:
316  // Functions
317  void initperfvalues()
318  {
319  m_perfv.panel_size = 1;
320  m_perfv.relax = 1;
321  m_perfv.maxsuper = 128;
322  m_perfv.rowblk = 16;
323  m_perfv.colblk = 8;
324  m_perfv.fillfactor = 20;
325  }
326 
327  // Variables
328  mutable ComputationInfo m_info;
329  bool m_isInitialized;
330  bool m_factorizationIsOk;
331  bool m_analysisIsOk;
332  std::string m_lastError;
333  NCMatrix m_mat; // The input (permuted ) matrix
334  SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
335  MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
336  PermutationType m_perm_c; // Column permutation
337  PermutationType m_perm_r ; // Row permutation
338  IndexVector m_etree; // Column elimination tree
339 
340  typename Base::GlobalLU_t m_glu;
341 
342  // SparseLU options
343  bool m_symmetricmode;
344  // values for performance
345  internal::perfvalues<Index> m_perfv;
346  RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
347  Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
348  Index m_detPermR; // Determinant of the coefficient matrix
349  private:
350  // Disable copy constructor
351  SparseLU (const SparseLU& );
352 
353 }; // End class SparseLU
354 
355 
356 
357 // Functions needed by the anaysis phase
368 template <typename MatrixType, typename OrderingType>
370 {
371 
372  //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
373 
374  OrderingType ord;
375  ord(mat,m_perm_c);
376 
377  // Apply the permutation to the column of the input matrix
378  //First copy the whole input matrix.
379  m_mat = mat;
380  if (m_perm_c.size()) {
381  m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
382  //Then, permute only the column pointers
383  const Index * outerIndexPtr;
384  if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
385  else
386  {
387  Index *outerIndexPtr_t = new Index[mat.cols()+1];
388  for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
389  outerIndexPtr = outerIndexPtr_t;
390  }
391  for (Index i = 0; i < mat.cols(); i++)
392  {
393  m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
394  m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
395  }
396  if(!mat.isCompressed()) delete[] outerIndexPtr;
397  }
398  // Compute the column elimination tree of the permuted matrix
399  IndexVector firstRowElt;
400  internal::coletree(m_mat, m_etree,firstRowElt);
401 
402  // In symmetric mode, do not do postorder here
403  if (!m_symmetricmode) {
404  IndexVector post, iwork;
405  // Post order etree
406  internal::treePostorder(m_mat.cols(), m_etree, post);
407 
408 
409  // Renumber etree in postorder
410  Index m = m_mat.cols();
411  iwork.resize(m+1);
412  for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
413  m_etree = iwork;
414 
415  // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
416  PermutationType post_perm(m);
417  for (Index i = 0; i < m; i++)
418  post_perm.indices()(i) = post(i);
419 
420  // Combine the two permutations : postorder the permutation for future use
421  if(m_perm_c.size()) {
422  m_perm_c = post_perm * m_perm_c;
423  }
424 
425  } // end postordering
426 
427  m_analysisIsOk = true;
428 }
429 
430 // Functions needed by the numerical factorization phase
431 
432 
451 template <typename MatrixType, typename OrderingType>
452 void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
453 {
454  using internal::emptyIdxLU;
455  eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
456  eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
457 
458  typedef typename IndexVector::Scalar Index;
459 
460 
461  // Apply the column permutation computed in analyzepattern()
462  // m_mat = matrix * m_perm_c.inverse();
463  m_mat = matrix;
464  if (m_perm_c.size())
465  {
466  m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
467  //Then, permute only the column pointers
468  const Index * outerIndexPtr;
469  if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
470  else
471  {
472  Index* outerIndexPtr_t = new Index[matrix.cols()+1];
473  for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
474  outerIndexPtr = outerIndexPtr_t;
475  }
476  for (Index i = 0; i < matrix.cols(); i++)
477  {
478  m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
479  m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
480  }
481  if(!matrix.isCompressed()) delete[] outerIndexPtr;
482  }
483  else
484  { //FIXME This should not be needed if the empty permutation is handled transparently
485  m_perm_c.resize(matrix.cols());
486  for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
487  }
488 
489  Index m = m_mat.rows();
490  Index n = m_mat.cols();
491  Index nnz = m_mat.nonZeros();
492  Index maxpanel = m_perfv.panel_size * m;
493  // Allocate working storage common to the factor routines
494  Index lwork = 0;
495  Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
496  if (info)
497  {
498  m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
499  m_factorizationIsOk = false;
500  return ;
501  }
502 
503  // Set up pointers for integer working arrays
504  IndexVector segrep(m); segrep.setZero();
505  IndexVector parent(m); parent.setZero();
506  IndexVector xplore(m); xplore.setZero();
507  IndexVector repfnz(maxpanel);
508  IndexVector panel_lsub(maxpanel);
509  IndexVector xprune(n); xprune.setZero();
510  IndexVector marker(m*internal::LUNoMarker); marker.setZero();
511 
512  repfnz.setConstant(-1);
513  panel_lsub.setConstant(-1);
514 
515  // Set up pointers for scalar working arrays
516  ScalarVector dense;
517  dense.setZero(maxpanel);
518  ScalarVector tempv;
519  tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
520 
521  // Compute the inverse of perm_c
522  PermutationType iperm_c(m_perm_c.inverse());
523 
524  // Identify initial relaxed snodes
525  IndexVector relax_end(n);
526  if ( m_symmetricmode == true )
527  Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
528  else
529  Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
530 
531 
532  m_perm_r.resize(m);
533  m_perm_r.indices().setConstant(-1);
534  marker.setConstant(-1);
535  m_detPermR = 1; // Record the determinant of the row permutation
536 
537  m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
538  m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
539 
540  // Work on one 'panel' at a time. A panel is one of the following :
541  // (a) a relaxed supernode at the bottom of the etree, or
542  // (b) panel_size contiguous columns, <panel_size> defined by the user
543  Index jcol;
544  IndexVector panel_histo(n);
545  Index pivrow; // Pivotal row number in the original row matrix
546  Index nseg1; // Number of segments in U-column above panel row jcol
547  Index nseg; // Number of segments in each U-column
548  Index irep;
549  Index i, k, jj;
550  for (jcol = 0; jcol < n; )
551  {
552  // Adjust panel size so that a panel won't overlap with the next relaxed snode.
553  Index panel_size = m_perfv.panel_size; // upper bound on panel width
554  for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
555  {
556  if (relax_end(k) != emptyIdxLU)
557  {
558  panel_size = k - jcol;
559  break;
560  }
561  }
562  if (k == n)
563  panel_size = n - jcol;
564 
565  // Symbolic outer factorization on a panel of columns
566  Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
567 
568  // Numeric sup-panel updates in topological order
569  Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
570 
571  // Sparse LU within the panel, and below the panel diagonal
572  for ( jj = jcol; jj< jcol + panel_size; jj++)
573  {
574  k = (jj - jcol) * m; // Column index for w-wide arrays
575 
576  nseg = nseg1; // begin after all the panel segments
577  //Depth-first-search for the current column
578  VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
579  VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
580  info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
581  if ( info )
582  {
583  m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
584  m_info = NumericalIssue;
585  m_factorizationIsOk = false;
586  return;
587  }
588  // Numeric updates to this column
589  VectorBlock<ScalarVector> dense_k(dense, k, m);
590  VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
591  info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
592  if ( info )
593  {
594  m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
595  m_info = NumericalIssue;
596  m_factorizationIsOk = false;
597  return;
598  }
599 
600  // Copy the U-segments to ucol(*)
601  info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
602  if ( info )
603  {
604  m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
605  m_info = NumericalIssue;
606  m_factorizationIsOk = false;
607  return;
608  }
609 
610  // Form the L-segment
611  info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
612  if ( info )
613  {
614  m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
615  std::ostringstream returnInfo;
616  returnInfo << info;
617  m_lastError += returnInfo.str();
618  m_info = NumericalIssue;
619  m_factorizationIsOk = false;
620  return;
621  }
622 
623  // Update the determinant of the row permutation matrix
624  if (pivrow != jj) m_detPermR *= -1;
625 
626  // Prune columns (0:jj-1) using column jj
627  Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
628 
629  // Reset repfnz for this column
630  for (i = 0; i < nseg; i++)
631  {
632  irep = segrep(i);
633  repfnz_k(irep) = emptyIdxLU;
634  }
635  } // end SparseLU within the panel
636  jcol += panel_size; // Move to the next panel
637  } // end for -- end elimination
638 
639  // Count the number of nonzeros in factors
640  Base::countnz(n, m_nnzL, m_nnzU, m_glu);
641  // Apply permutation to the L subscripts
642  Base::fixupL(n, m_perm_r.indices(), m_glu);
643 
644  // Create supernode matrix L
645  m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
646  // Create the column major upper sparse matrix U;
647  new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
648 
649  m_info = Success;
650  m_factorizationIsOk = true;
651 }
652 
653 template<typename MappedSupernodalType>
654 struct SparseLUMatrixLReturnType : internal::no_assignment_operator
655 {
656  typedef typename MappedSupernodalType::Index Index;
657  typedef typename MappedSupernodalType::Scalar Scalar;
658  SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
659  { }
660  Index rows() { return m_mapL.rows(); }
661  Index cols() { return m_mapL.cols(); }
662  template<typename Dest>
663  void solveInPlace( MatrixBase<Dest> &X) const
664  {
665  m_mapL.solveInPlace(X);
666  }
667  const MappedSupernodalType& m_mapL;
668 };
669 
670 template<typename MatrixLType, typename MatrixUType>
671 struct SparseLUMatrixUReturnType : internal::no_assignment_operator
672 {
673  typedef typename MatrixLType::Index Index;
674  typedef typename MatrixLType::Scalar Scalar;
675  SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
676  : m_mapL(mapL),m_mapU(mapU)
677  { }
678  Index rows() { return m_mapL.rows(); }
679  Index cols() { return m_mapL.cols(); }
680 
681  template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
682  {
683  Index nrhs = X.cols();
684  Index n = X.rows();
685  // Backward solve with U
686  for (Index k = m_mapL.nsuper(); k >= 0; k--)
687  {
688  Index fsupc = m_mapL.supToCol()[k];
689  Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
690  Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
691  Index luptr = m_mapL.colIndexPtr()[fsupc];
692 
693  if (nsupc == 1)
694  {
695  for (Index j = 0; j < nrhs; j++)
696  {
697  X(fsupc, j) /= m_mapL.valuePtr()[luptr];
698  }
699  }
700  else
701  {
702  Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
703  Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
704  U = A.template triangularView<Upper>().solve(U);
705  }
706 
707  for (Index j = 0; j < nrhs; ++j)
708  {
709  for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
710  {
711  typename MatrixUType::InnerIterator it(m_mapU, jcol);
712  for ( ; it; ++it)
713  {
714  Index irow = it.index();
715  X(irow, j) -= X(jcol, j) * it.value();
716  }
717  }
718  }
719  } // End For U-solve
720  }
721  const MatrixLType& m_mapL;
722  const MatrixUType& m_mapU;
723 };
724 
725 namespace internal {
726 
727 template<typename _MatrixType, typename Derived, typename Rhs>
728 struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
729  : solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
730 {
731  typedef SparseLU<_MatrixType,Derived> Dec;
732  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
733 
734  template<typename Dest> void evalTo(Dest& dst) const
735  {
736  dec()._solve(rhs(),dst);
737  }
738 };
739 
740 template<typename _MatrixType, typename Derived, typename Rhs>
741 struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
742  : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
743 {
744  typedef SparseLU<_MatrixType,Derived> Dec;
745  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
746 
747  template<typename Dest> void evalTo(Dest& dst) const
748  {
749  this->defaultEvalTo(dst);
750  }
751 };
752 } // end namespace internal
753 
754 } // End namespace Eigen
755 
756 #endif
SparseLUMatrixLReturnType< SCMatrix > matrixL() const
Definition: SparseLU.h:134
Index rows() const
Definition: SparseMatrix.h:119
void analyzePattern(const MatrixType &matrix)
Definition: SparseLU.h:369
Index cols() const
Definition: SparseMatrix.h:121
const IndicesType & indices() const
Definition: PermutationMatrix.h:358
Transpose< PermutationBase > inverse() const
Definition: PermutationMatrix.h:201
Definition: Constants.h:378
const internal::sparse_solve_retval< SparseLU, Rhs > solve(const SparseMatrixBase< Rhs > &B) const
Definition: SparseLU.h:191
Scalar absDeterminant()
Definition: SparseLU.h:256
void factorize(const MatrixType &matrix)
Definition: SparseLU.h:452
Scalar logAbsDeterminant() const
Definition: SparseLU.h:286
ColXpr col(Index i)
Definition: DenseBase.h:709
const PermutationType & rowsPermutation() const
Definition: SparseLU.h:153
Sparse supernodal LU factorization for general matrices.
Definition: SparseLU.h:17
const PermutationType & colsPermutation() const
Definition: SparseLU.h:161
int coletree(const MatrixType &mat, IndexVector &parent, IndexVector &firstRowElt, typename MatrixType::Index *perm=0)
Definition: SparseColEtree.h:61
Expression of a fixed-size or dynamic-size sub-vector.
Definition: ForwardDeclarations.h:83
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:26
Derived & derived()
Definition: EigenBase.h:34
const internal::solve_retval< SparseLU, Rhs > solve(const MatrixBase< Rhs > &B) const
Definition: SparseLU.h:178
void isSymmetric(bool sym)
Definition: SparseLU.h:123
void compute(const MatrixType &matrix)
Definition: SparseLU.h:112
Derived & setConstant(Index size, const Scalar &value)
Definition: CwiseNullaryOp.h:348
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, Index > > matrixU() const
Definition: SparseLU.h:144
ComputationInfo info() const
Reports whether previous computation was successful.
Definition: SparseLU.h:207
Definition: Constants.h:376
const unsigned int RowMajorBit
Definition: Constants.h:53
void resize(Index nbRows, Index nbCols)
Definition: PlainObjectBase.h:232
std::string lastErrorMessage() const
Definition: SparseLU.h:216
Index rows() const
Definition: SparseMatrixBase.h:150
void setPivotThreshold(const RealScalar &thresh)
Definition: SparseLU.h:166
ComputationInfo
Definition: Constants.h:374
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
Scalar signDeterminant()
Definition: SparseLU.h:309
void treePostorder(Index n, IndexVector &parent, IndexVector &post)
Post order a tree.
Definition: SparseColEtree.h:176
Derived & setZero(Index size)
Definition: CwiseNullaryOp.h:515