| // This file is part of Eigen, a lightweight C++ template library |
| // for linear algebra. |
| // |
| // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> |
| // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> |
| // |
| // This Source Code Form is subject to the terms of the Mozilla |
| // Public License v. 2.0. If a copy of the MPL was not distributed |
| // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
| |
| #ifndef EIGEN_INCOMPLETE_LUT_H |
| #define EIGEN_INCOMPLETE_LUT_H |
| |
| // IWYU pragma: private |
| #include "./InternalHeaderCheck.h" |
| |
| namespace Eigen { |
| |
| namespace internal { |
| |
| /** \internal |
| * Compute a quick-sort split of a vector |
| * On output, the vector row is permuted such that its elements satisfy |
| * abs(row(i)) >= abs(row(ncut)) if i<ncut |
| * abs(row(i)) <= abs(row(ncut)) if i>ncut |
| * \param row The vector of values |
| * \param ind The array of index for the elements in @p row |
| * \param ncut The number of largest elements to keep |
| **/ |
| template <typename VectorV, typename VectorI> |
| Index QuickSplit(VectorV& row, VectorI& ind, Index ncut) { |
| typedef typename VectorV::RealScalar RealScalar; |
| using std::abs; |
| using std::swap; |
| Index mid; |
| Index n = row.size(); /* length of the vector */ |
| Index first, last; |
| |
| ncut--; /* to fit the zero-based indices */ |
| first = 0; |
| last = n - 1; |
| if (ncut < first || ncut > last) return 0; |
| |
| do { |
| mid = first; |
| RealScalar abskey = abs(row(mid)); |
| for (Index j = first + 1; j <= last; j++) { |
| if (abs(row(j)) > abskey) { |
| ++mid; |
| swap(row(mid), row(j)); |
| swap(ind(mid), ind(j)); |
| } |
| } |
| /* Interchange for the pivot element */ |
| swap(row(mid), row(first)); |
| swap(ind(mid), ind(first)); |
| |
| if (mid > ncut) |
| last = mid - 1; |
| else if (mid < ncut) |
| first = mid + 1; |
| } while (mid != ncut); |
| |
| return 0; /* mid is equal to ncut */ |
| } |
| |
| } // end namespace internal |
| |
| /** \ingroup IterativeLinearSolvers_Module |
| * \class IncompleteLUT |
| * \brief Incomplete LU factorization with dual-threshold strategy |
| * |
| * \implsparsesolverconcept |
| * |
| * During the numerical factorization, two dropping rules are used : |
| * 1) any element whose magnitude is less than some tolerance is dropped. |
| * This tolerance is obtained by multiplying the input tolerance @p droptol |
| * by the average magnitude of all the original elements in the current row. |
| * 2) After the elimination of the row, only the @p fill largest elements in |
| * the L part and the @p fill largest elements in the U part are kept |
| * (in addition to the diagonal element ). Note that @p fill is computed from |
| * the input parameter @p fillfactor which is used the ratio to control the fill_in |
| * relatively to the initial number of nonzero elements. |
| * |
| * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) |
| * and when @p fill=n/2 with @p droptol being different to zero. |
| * |
| * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, |
| * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. |
| * |
| * NOTE : The following implementation is derived from the ILUT implementation |
| * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota |
| * released under the terms of the GNU LGPL: |
| * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README |
| * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. |
| * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: |
| * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html |
| * alternatively, on GMANE: |
| * http://comments.gmane.org/gmane.comp.lib.eigen/3302 |
| */ |
| template <typename Scalar_, typename StorageIndex_ = int> |
| class IncompleteLUT : public SparseSolverBase<IncompleteLUT<Scalar_, StorageIndex_> > { |
| protected: |
| typedef SparseSolverBase<IncompleteLUT> Base; |
| using Base::m_isInitialized; |
| |
| public: |
| typedef Scalar_ Scalar; |
| typedef StorageIndex_ StorageIndex; |
| typedef typename NumTraits<Scalar>::Real RealScalar; |
| typedef Matrix<Scalar, Dynamic, 1> Vector; |
| typedef Matrix<StorageIndex, Dynamic, 1> VectorI; |
| typedef SparseMatrix<Scalar, RowMajor, StorageIndex> FactorType; |
| |
| enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; |
| |
| public: |
| IncompleteLUT() |
| : m_droptol(NumTraits<Scalar>::dummy_precision()), |
| m_fillfactor(10), |
| m_analysisIsOk(false), |
| m_factorizationIsOk(false) {} |
| |
| template <typename MatrixType> |
| explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol = NumTraits<Scalar>::dummy_precision(), |
| int fillfactor = 10) |
| : m_droptol(droptol), m_fillfactor(fillfactor), m_analysisIsOk(false), m_factorizationIsOk(false) { |
| eigen_assert(fillfactor != 0); |
| compute(mat); |
| } |
| |
| /** \brief Extraction Method for L-Factor */ |
| const FactorType matrixL() const; |
| |
| /** \brief Extraction Method for U-Factor */ |
| const FactorType matrixU() const; |
| |
| constexpr Index rows() const noexcept { return m_lu.rows(); } |
| |
| constexpr Index cols() const noexcept { return m_lu.cols(); } |
| |
| /** \brief Reports whether previous computation was successful. |
| * |
| * \returns \c Success if computation was successful, |
| * \c NumericalIssue if a zero pivot was encountered during |
| * factorization (the resulting preconditioner is unlikely to be |
| * usable; the input matrix typically has zero diagonal entries |
| * that cannot be moved by a static row permutation, e.g. it is |
| * structurally singular). |
| */ |
| ComputationInfo info() const { |
| eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); |
| return m_info; |
| } |
| |
| template <typename MatrixType> |
| void analyzePattern(const MatrixType& amat); |
| |
| template <typename MatrixType> |
| void factorize(const MatrixType& amat); |
| |
| /** |
| * Compute an incomplete LU factorization with dual threshold on the matrix mat |
| * No partial pivoting is done in this version. A static row permutation |
| * (maximum bipartite matching) is computed in \c analyzePattern so that |
| * the permuted matrix has a structurally nonzero diagonal whenever one |
| * exists; without it, the lack of pivoting makes ILUT silently produce |
| * a useless preconditioner on matrices with zero diagonal entries. |
| * |
| **/ |
| template <typename MatrixType> |
| IncompleteLUT& compute(const MatrixType& amat) { |
| analyzePattern(amat); |
| factorize(amat); |
| return *this; |
| } |
| |
| void setDroptol(const RealScalar& droptol); |
| void setFillfactor(int fillfactor); |
| |
| template <typename Rhs, typename Dest> |
| void _solve_impl(const Rhs& b, Dest& x) const { |
| x = m_PinvPr * b; |
| x = m_lu.template triangularView<UnitLower>().solve(x); |
| x = m_lu.template triangularView<Upper>().solve(x); |
| x = m_P * x; |
| } |
| |
| protected: |
| /** keeps off-diagonal entries; drops diagonal entries */ |
| struct keep_diag { |
| inline bool operator()(const Index& row, const Index& col, const Scalar&) const { return row != col; } |
| }; |
| |
| template <typename MatrixType> |
| Index computeRowMatching(const MatrixType& amat); |
| |
| protected: |
| FactorType m_lu; |
| RealScalar m_droptol; |
| int m_fillfactor; |
| bool m_analysisIsOk; |
| bool m_factorizationIsOk; |
| ComputationInfo m_info; |
| PermutationMatrix<Dynamic, Dynamic, StorageIndex> m_P; // Fill-reducing permutation |
| PermutationMatrix<Dynamic, Dynamic, StorageIndex> m_Pinv; // Inverse permutation |
| PermutationMatrix<Dynamic, Dynamic, StorageIndex> m_Pr; // Static row permutation (matching-based) |
| PermutationMatrix<Dynamic, Dynamic, StorageIndex> m_PinvPr; // Cached composition m_Pinv * m_Pr for solve |
| }; |
| |
| /** |
| * Set control parameter droptol |
| * \param droptol Drop any element whose magnitude is less than this tolerance |
| **/ |
| template <typename Scalar, typename StorageIndex> |
| void IncompleteLUT<Scalar, StorageIndex>::setDroptol(const RealScalar& droptol) { |
| this->m_droptol = droptol; |
| } |
| |
| /** |
| * Set control parameter fillfactor |
| * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. |
| **/ |
| template <typename Scalar, typename StorageIndex> |
| void IncompleteLUT<Scalar, StorageIndex>::setFillfactor(int fillfactor) { |
| this->m_fillfactor = fillfactor; |
| } |
| |
| /** |
| * get L-Factor |
| * \return L-Factor is a matrix containing the lower triangular part of the sparse matrix. All elements of the matrix |
| * above the main diagonal are zero. |
| **/ |
| template <typename Scalar, typename StorageIndex> |
| const typename IncompleteLUT<Scalar, StorageIndex>::FactorType IncompleteLUT<Scalar, StorageIndex>::matrixL() const { |
| eigen_assert(m_factorizationIsOk && "factorize() should be called first"); |
| return m_lu.template triangularView<UnitLower>(); |
| } |
| |
| /** |
| * get U-Factor |
| * \return L-Factor is a matrix containing the upper triangular part of the sparse matrix. All elements of the matrix |
| * below the main diagonal are zero. |
| **/ |
| template <typename Scalar, typename StorageIndex> |
| const typename IncompleteLUT<Scalar, StorageIndex>::FactorType IncompleteLUT<Scalar, StorageIndex>::matrixU() const { |
| eigen_assert(m_factorizationIsOk && "Factorization must be computed first."); |
| return m_lu.template triangularView<Upper>(); |
| } |
| |
| // Compute a row permutation m_Pr such that (m_Pr * amat) has a structurally |
| // nonzero diagonal wherever one exists. Returns the number of matched columns. |
| // Uses a maximum bipartite cardinality matching on the sparsity pattern, with |
| // a greedy initialization that prefers the natural diagonal so that matrices |
| // already having a nonzero diagonal yield the identity permutation. |
| template <typename Scalar, typename StorageIndex> |
| template <typename MatrixType_> |
| Index IncompleteLUT<Scalar, StorageIndex>::computeRowMatching(const MatrixType_& amat) { |
| using internal::convert_index; |
| const Index n = amat.rows(); |
| // We only need amat's column-major sparsity pattern; never read scalar |
| // values. The pattern view aliases amat's index storage when amat is |
| // already a column-major SparseMatrix, and otherwise materializes a CSC |
| // pattern into the scratch buffers. |
| Matrix<StorageIndex, Dynamic, 1> outer_buf; |
| Matrix<StorageIndex, Dynamic, 1> inner_buf; |
| internal::SparsityPatternRef<StorageIndex> pat = internal::make_col_major_pattern_ref(amat, outer_buf, inner_buf); |
| const StorageIndex* outer = pat.outer; |
| const StorageIndex* inner = pat.inner; |
| |
| const StorageIndex kUnmatched = StorageIndex(-1); |
| // match_row[j] = original row matched to column j; match_col[i] = column matched to row i. |
| std::vector<StorageIndex> match_row(n, kUnmatched); |
| std::vector<StorageIndex> match_col(n, kUnmatched); |
| |
| // The matching uses the stored sparsity pattern only and is independent of |
| // numerical values. This preserves the analyzePattern/factorize contract: |
| // the same analysis is reusable for any matrix sharing this stored pattern. |
| // Phase 1: greedy diagonal preference. |
| for (Index j = 0; j < n; ++j) { |
| const Index col_end = outer[j] + pat.nonZeros(j); |
| for (Index k = outer[j]; k < col_end; ++k) { |
| if (Index(inner[k]) == j) { |
| match_row[j] = convert_index<StorageIndex>(j); |
| match_col[j] = convert_index<StorageIndex>(j); |
| break; |
| } |
| } |
| } |
| // Phase 2: greedy off-diagonal pickup of any free row. |
| for (Index j = 0; j < n; ++j) { |
| if (match_row[j] != kUnmatched) continue; |
| const Index col_end = outer[j] + pat.nonZeros(j); |
| for (Index k = outer[j]; k < col_end; ++k) { |
| Index i = inner[k]; |
| if (match_col[i] == kUnmatched) { |
| match_row[j] = convert_index<StorageIndex>(i); |
| match_col[i] = convert_index<StorageIndex>(j); |
| break; |
| } |
| } |
| } |
| // Phase 3: augmenting paths for any column still unmatched. |
| std::vector<StorageIndex> visited(n, kUnmatched); |
| // Iterative DFS: the stack frames are (column, edge index, chosen row). |
| // chosen_row[k] is the row that frame k will commit to if a path is found. |
| std::vector<Index> stack_col; |
| std::vector<Index> stack_pos; |
| std::vector<Index> stack_chosen_row; |
| stack_col.reserve(n); |
| stack_pos.reserve(n); |
| stack_chosen_row.reserve(n); |
| |
| for (Index start = 0; start < n; ++start) { |
| if (match_row[start] != kUnmatched) continue; |
| StorageIndex epoch = convert_index<StorageIndex>(start); |
| stack_col.clear(); |
| stack_pos.clear(); |
| stack_chosen_row.clear(); |
| stack_col.push_back(start); |
| stack_pos.push_back(outer[start]); |
| stack_chosen_row.push_back(-1); |
| |
| while (!stack_col.empty()) { |
| Index j = stack_col.back(); |
| Index pos = stack_pos.back(); |
| Index col_end = outer[j] + pat.nonZeros(j); |
| bool advanced = false; |
| |
| while (pos < col_end) { |
| Index i = inner[pos]; |
| ++pos; |
| if (visited[i] == epoch) continue; |
| visited[i] = epoch; |
| |
| if (match_col[i] == kUnmatched) { |
| // Found an augmenting path: commit it. |
| stack_chosen_row.back() = i; |
| stack_pos.back() = pos; |
| for (size_t k = 0; k < stack_col.size(); ++k) { |
| Index col = stack_col[k]; |
| Index row = stack_chosen_row[k]; |
| match_row[col] = convert_index<StorageIndex>(row); |
| match_col[row] = convert_index<StorageIndex>(col); |
| } |
| stack_col.clear(); |
| break; |
| } else { |
| // Descend into the column currently matched to row i. |
| stack_chosen_row.back() = i; |
| stack_pos.back() = pos; |
| Index next_col = match_col[i]; |
| stack_col.push_back(next_col); |
| stack_pos.push_back(outer[next_col]); |
| stack_chosen_row.push_back(-1); |
| advanced = true; |
| break; |
| } |
| } |
| |
| if (!advanced && !stack_col.empty()) { |
| stack_col.pop_back(); |
| stack_pos.pop_back(); |
| stack_chosen_row.pop_back(); |
| } |
| } |
| } |
| |
| // Build the row permutation. Matched columns get their matching row; |
| // any leftover columns are filled in identity-fashion with the leftover rows. |
| m_Pr.resize(n); |
| std::vector<bool> col_used(n, false), row_used(n, false); |
| Index matched = 0; |
| for (Index j = 0; j < n; ++j) { |
| if (match_row[j] != kUnmatched) { |
| m_Pr.indices()(match_row[j]) = convert_index<StorageIndex>(j); |
| col_used[j] = true; |
| row_used[match_row[j]] = true; |
| ++matched; |
| } |
| } |
| Index next_col = 0; |
| for (Index i = 0; i < n; ++i) { |
| if (row_used[i]) continue; |
| while (next_col < n && col_used[next_col]) ++next_col; |
| m_Pr.indices()(i) = convert_index<StorageIndex>(next_col); |
| ++next_col; |
| } |
| return matched; |
| } |
| |
| template <typename Scalar, typename StorageIndex> |
| template <typename MatrixType_> |
| void IncompleteLUT<Scalar, StorageIndex>::analyzePattern(const MatrixType_& amat) { |
| eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); |
| // 1. Compute a static row permutation that makes the diagonal structurally |
| // nonzero. This is a workaround for the lack of partial pivoting in ILUT. |
| // For matrices that already have a nonzero diagonal, this returns the |
| // identity permutation and is essentially free. |
| computeRowMatching(amat); |
| |
| // 2. Compute the Fill-reducing permutation on the row-permuted matrix. |
| // Since ILUT does not perform any numerical pivoting, it is highly |
| // preferable to keep the diagonal through symmetric permutations. AMD |
| // computes a fill-reducing ordering for a symmetric matrix and only reads |
| // the sparsity pattern; build a value-free, row-permuted representation |
| // (1-byte placeholder Scalar, indices remapped through m_Pr) and feed that |
| // to AMDOrdering, avoiding the previous mat1/mat2/AtA value copies. |
| SparseMatrix<signed char, ColMajor, StorageIndex> permuted_pattern; |
| { |
| Matrix<StorageIndex, Dynamic, 1> outer_buf; |
| Matrix<StorageIndex, Dynamic, 1> inner_buf; |
| internal::SparsityPatternRef<StorageIndex> pat = internal::make_col_major_pattern_ref(amat, outer_buf, inner_buf); |
| internal::materialize_col_major_pattern(pat, m_Pr.indices().data(), permuted_pattern); |
| } |
| AMDOrdering<StorageIndex> ordering; |
| ordering(permuted_pattern, m_P); |
| m_Pinv = m_P.inverse(); // cache the inverse permutation |
| // Cache the composition m_Pinv * m_Pr so _solve_impl applies a single |
| // permutation to the RHS instead of two. |
| m_PinvPr = m_Pinv * m_Pr; |
| m_analysisIsOk = true; |
| m_factorizationIsOk = false; |
| m_isInitialized = true; |
| } |
| |
| template <typename Scalar, typename StorageIndex> |
| template <typename MatrixType_> |
| void IncompleteLUT<Scalar, StorageIndex>::factorize(const MatrixType_& amat) { |
| using internal::convert_index; |
| using std::abs; |
| using std::sqrt; |
| using std::swap; |
| |
| eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); |
| Index n = amat.cols(); // Size of the matrix |
| m_lu.resize(n, n); |
| // Declare Working vectors and variables |
| Vector u(n); // real values of the row -- maximum size is n -- |
| VectorI ju(n); // column position of the values in u -- maximum size is n |
| VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 |
| |
| // Apply the static row permutation (from analyzePattern), then the |
| // fill-reducing symmetric permutation. |
| eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); |
| SparseMatrix<Scalar, RowMajor, StorageIndex> row_permuted_mat = m_Pr * amat; |
| SparseMatrix<Scalar, RowMajor, StorageIndex> mat; |
| mat = row_permuted_mat.twistedBy(m_Pinv); |
| Index zero_pivots = 0; |
| |
| // Initialization |
| jr.fill(-1); |
| ju.fill(0); |
| u.fill(0); |
| |
| // number of largest elements to keep in each row: |
| Index fill_in = (amat.nonZeros() * m_fillfactor) / n + 1; |
| if (fill_in > n) fill_in = n; |
| |
| // number of largest nonzero elements to keep in the L and the U part of the current row: |
| Index nnzL = fill_in / 2; |
| Index nnzU = nnzL; |
| m_lu.reserve(n * (nnzL + nnzU + 1)); |
| |
| // global loop over the rows of the sparse matrix |
| for (Index ii = 0; ii < n; ii++) { |
| // 1 - copy the lower and the upper part of the row i of mat in the working vector u |
| |
| Index sizeu = 1; // number of nonzero elements in the upper part of the current row |
| Index sizel = 0; // number of nonzero elements in the lower part of the current row |
| ju(ii) = convert_index<StorageIndex>(ii); |
| u(ii) = 0; |
| jr(ii) = convert_index<StorageIndex>(ii); |
| RealScalar rownorm = 0; |
| |
| typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii |
| for (; j_it; ++j_it) { |
| Index k = j_it.index(); |
| if (k < ii) { |
| // copy the lower part |
| ju(sizel) = convert_index<StorageIndex>(k); |
| u(sizel) = j_it.value(); |
| jr(k) = convert_index<StorageIndex>(sizel); |
| ++sizel; |
| } else if (k == ii) { |
| u(ii) = j_it.value(); |
| } else { |
| // copy the upper part |
| Index jpos = ii + sizeu; |
| ju(jpos) = convert_index<StorageIndex>(k); |
| u(jpos) = j_it.value(); |
| jr(k) = convert_index<StorageIndex>(jpos); |
| ++sizeu; |
| } |
| rownorm += numext::abs2(j_it.value()); |
| } |
| |
| // 2 - detect possible zero row |
| if (rownorm == 0) { |
| m_info = NumericalIssue; |
| return; |
| } |
| // Take the 2-norm of the current row as a relative tolerance |
| rownorm = sqrt(rownorm); |
| |
| // 3 - eliminate the previous nonzero rows |
| Index jj = 0; |
| Index len = 0; |
| while (jj < sizel) { |
| // In order to eliminate in the correct order, |
| // we must select first the smallest column index among ju(jj:sizel) |
| Index k; |
| Index minrow = ju.segment(jj, sizel - jj).minCoeff(&k); // k is relative to the segment |
| k += jj; |
| if (minrow != ju(jj)) { |
| // swap the two locations |
| Index j = ju(jj); |
| swap(ju(jj), ju(k)); |
| jr(minrow) = convert_index<StorageIndex>(jj); |
| jr(j) = convert_index<StorageIndex>(k); |
| swap(u(jj), u(k)); |
| } |
| // Reset this location |
| jr(minrow) = -1; |
| |
| // Start elimination |
| typename FactorType::InnerIterator ki_it(m_lu, minrow); |
| while (ki_it && ki_it.index() < minrow) ++ki_it; |
| eigen_internal_assert(ki_it && ki_it.col() == minrow); |
| Scalar fact = u(jj) / ki_it.value(); |
| |
| // drop too small elements |
| if (abs(fact) <= m_droptol) { |
| jj++; |
| continue; |
| } |
| |
| // linear combination of the current row ii and the row minrow |
| ++ki_it; |
| for (; ki_it; ++ki_it) { |
| Scalar prod = fact * ki_it.value(); |
| Index j = ki_it.index(); |
| Index jpos = jr(j); |
| if (jpos == -1) // fill-in element |
| { |
| Index newpos; |
| if (j >= ii) // dealing with the upper part |
| { |
| newpos = ii + sizeu; |
| sizeu++; |
| eigen_internal_assert(sizeu <= n); |
| } else // dealing with the lower part |
| { |
| newpos = sizel; |
| sizel++; |
| eigen_internal_assert(sizel <= ii); |
| } |
| ju(newpos) = convert_index<StorageIndex>(j); |
| u(newpos) = -prod; |
| jr(j) = convert_index<StorageIndex>(newpos); |
| } else |
| u(jpos) -= prod; |
| } |
| // store the pivot element |
| u(len) = fact; |
| ju(len) = convert_index<StorageIndex>(minrow); |
| ++len; |
| |
| jj++; |
| } // end of the elimination on the row ii |
| |
| // reset the upper part of the pointer jr to zero |
| for (Index k = 0; k < sizeu; k++) jr(ju(ii + k)) = -1; |
| |
| // 4 - partially sort and insert the elements in the m_lu matrix |
| |
| // sort the L-part of the row |
| sizel = len; |
| len = (std::min)(sizel, nnzL); |
| typename Vector::SegmentReturnType ul(u.segment(0, sizel)); |
| typename VectorI::SegmentReturnType jul(ju.segment(0, sizel)); |
| internal::QuickSplit(ul, jul, len); |
| |
| // store the largest m_fill elements of the L part |
| m_lu.startVec(ii); |
| for (Index k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii, ju(k)) = u(k); |
| |
| // store the diagonal element |
| // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization) |
| if (u(ii) == Scalar(0)) { |
| u(ii) = sqrt(m_droptol) * rownorm; |
| ++zero_pivots; |
| } |
| m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii); |
| |
| // sort the U-part of the row |
| // apply the dropping rule first |
| len = 0; |
| for (Index k = 1; k < sizeu; k++) { |
| if (abs(u(ii + k)) > m_droptol * rownorm) { |
| ++len; |
| u(ii + len) = u(ii + k); |
| ju(ii + len) = ju(ii + k); |
| } |
| } |
| sizeu = len + 1; // +1 to take into account the diagonal element |
| len = (std::min)(sizeu, nnzU); |
| typename Vector::SegmentReturnType uu(u.segment(ii + 1, sizeu - 1)); |
| typename VectorI::SegmentReturnType juu(ju.segment(ii + 1, sizeu - 1)); |
| internal::QuickSplit(uu, juu, len); |
| |
| // store the largest elements of the U part |
| for (Index k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii, ju(k)) = u(k); |
| } |
| m_lu.finalize(); |
| m_lu.makeCompressed(); |
| |
| m_factorizationIsOk = true; |
| // If we had to shift any zero pivot, the factorization is not faithful to |
| // the input matrix and the resulting preconditioner may be useless. |
| // Report this to the caller via NumericalIssue rather than silently |
| // returning Success. |
| m_info = (zero_pivots == 0) ? Success : NumericalIssue; |
| } |
| |
| } // end namespace Eigen |
| |
| #endif // EIGEN_INCOMPLETE_LUT_H |