fixing a lot of typos
diff --git a/Eigen/Geometry b/Eigen/Geometry index 3334874..019c98b 100644 --- a/Eigen/Geometry +++ b/Eigen/Geometry
@@ -23,7 +23,7 @@ * - translation, scaling, 2D and 3D rotations * - \link Quaternion quaternions \endlink * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3) - * - orthognal vector generation (\ref MatrixBase::unitOrthogonal) + * - orthogonal vector generation (\ref MatrixBase::unitOrthogonal) * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes * \endlink * - \link AlignedBox axis aligned bounding boxes \endlink
diff --git a/Eigen/OrderingMethods b/Eigen/OrderingMethods index 921b8a0..0167419 100644 --- a/Eigen/OrderingMethods +++ b/Eigen/OrderingMethods
@@ -54,7 +54,7 @@ * \note Some of these methods (like AMD or METIS), need the sparsity pattern * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method. - * If your matrix is already symmetric (at leat in structure), you can avoid that + * If your matrix is already symmetric (at least in structure), you can avoid that * by calling the method with a SelfAdjointView type. * * \code
diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h index e5b46c4..7e3c881 100644 --- a/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -425,7 +425,7 @@ RealScalar logDet = 0; Scalar* x = static_cast<Scalar*>(m_cholmodFactor->x); if (m_cholmodFactor->is_super) { - // Supernodal factorization stored as a packed list of dense column-major blocs, + // Supernodal factorization stored as a packed list of dense column-major blocks, // as described by the following structure: // super[k] == index of the first column of the j-th super node
diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h index 5da9c57..3d78fd8 100644 --- a/Eigen/src/Core/CoreEvaluators.h +++ b/Eigen/src/Core/CoreEvaluators.h
@@ -402,7 +402,7 @@ #if 0 && EIGEN_COMP_MSVC > 0 // Disable this ugly workaround. This is now handled in traits<Ref>::match, // but this piece of code might still become handly if some other weird compilation -// erros pop up again. +// errors pop up again. // MSVC exhibits a weird compilation error when // compiling: @@ -645,7 +645,7 @@ // There is no source packet type with equal or fewer elements than DstPacketType. // This is problematic as the evaluation loop may attempt to access data outside the bounds of the array. // For example, consider the cast utilizing pcast<Packet4f,Packet2d> with an array of size 4: {0.0f,1.0f,2.0f,3.0f}. - // The first iteration of the evaulation loop will load 16 bytes: {0.0f,1.0f,2.0f,3.0f} and cast to {0.0,1.0}, which + // The first iteration of the evaluation loop will load 16 bytes: {0.0f,1.0f,2.0f,3.0f} and cast to {0.0,1.0}, which // is acceptable. The second iteration will load 16 bytes: {2.0f,3.0f,?,?}, which is outside the bounds of the array. // Instead, perform runtime check to determine if the load would access data outside the bounds of the array. @@ -701,7 +701,7 @@ srcPacket<SrcLoadMode>(row, col, 6), srcPacket<SrcLoadMode>(row, col, 7)); } - // Analagous routines for linear access. + // Analogous routines for linear access. template <int LoadMode, typename DstPacketType, AltSrcScalarOp<DstPacketType> = true> EIGEN_STRONG_INLINE DstPacketType packet(Index index) const { constexpr int DstPacketSize = unpacket_traits<DstPacketType>::size; @@ -838,7 +838,7 @@ Data m_d; }; -// specialization for expresions like (a < b).select(c, d) to enable full vectorization +// specialization for expressions like (a < b).select(c, d) to enable full vectorization template <typename Arg1, typename Arg2, typename Scalar, typename CmpLhsType, typename CmpRhsType, ComparisonName cmp> struct evaluator<CwiseTernaryOp<scalar_boolean_select_op<Scalar, Scalar, bool>, Arg1, Arg2, CwiseBinaryOp<scalar_cmp_op<Scalar, Scalar, cmp, false>, CmpLhsType, CmpRhsType>>>
diff --git a/Eigen/src/Core/DeviceWrapper.h b/Eigen/src/Core/DeviceWrapper.h index 9fdbe60..3ae8256 100644 --- a/Eigen/src/Core/DeviceWrapper.h +++ b/Eigen/src/Core/DeviceWrapper.h
@@ -82,7 +82,7 @@ } }; -// this allows us to use the default evaulation scheme if it is not specialized for the device +// this allows us to use the default evaluation scheme if it is not specialized for the device template <typename Kernel, typename Device, int Traversal = Kernel::AssignmentTraits::Traversal, int Unrolling = Kernel::AssignmentTraits::Unrolling> struct dense_assignment_loop_with_device { @@ -152,4 +152,4 @@ return DeviceWrapper<const Derived, Device>(derived(), device); } } // namespace Eigen -#endif \ No newline at end of file +#endif
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h index e1347b9..1d79b4a 100644 --- a/Eigen/src/Core/GenericPacketMath.h +++ b/Eigen/src/Core/GenericPacketMath.h
@@ -207,7 +207,7 @@ }; }; -// provides a succint template to define vectorized casting traits with respect to the largest accessible packet types +// provides a succinct template to define vectorized casting traits with respect to the largest accessible packet types template <typename Src, typename Tgt> struct vectorized_type_casting_traits { enum : int {
diff --git a/Eigen/src/Core/MathFunctionsImpl.h b/Eigen/src/Core/MathFunctionsImpl.h index 689c6d8..10ddabd 100644 --- a/Eigen/src/Core/MathFunctionsImpl.h +++ b/Eigen/src/Core/MathFunctionsImpl.h
@@ -23,7 +23,7 @@ Preconditions: 1. The starting guess provided in approx_a_recip must have at least half the leading mantissa bits in the correct result, such that a single - Newton-Raphson step is sufficient to get within 1-2 ulps of the currect + Newton-Raphson step is sufficient to get within 1-2 ulps of the correct result. 2. If a is zero, approx_a_recip must be infinite with the same sign as a. 3. If a is infinite, approx_a_recip must be zero with the same sign as a. @@ -61,7 +61,7 @@ Preconditions: 1. The starting guess provided in approx_a_recip must have at least half the leading mantissa bits in the correct result, such that a single - Newton-Raphson step is sufficient to get within 1-2 ulps of the currect + Newton-Raphson step is sufficient to get within 1-2 ulps of the correct result. 2. If a is zero, approx_a_recip must be infinite with the same sign as a. 3. If a is infinite, approx_a_recip must be zero with the same sign as a. @@ -112,7 +112,7 @@ 1. The starting guess for the reciprocal sqrt provided in approx_rsqrt must have at least half the leading mantissa bits in the correct result, such that a single Newton-Raphson step is sufficient to get within 1-2 ulps of - the currect result. + the correct result. 2. If a is zero, approx_rsqrt must be infinite. 3. If a is infinite, approx_rsqrt must be zero.
diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h index 6945964..4748b11 100644 --- a/Eigen/src/Core/PermutationMatrix.h +++ b/Eigen/src/Core/PermutationMatrix.h
@@ -170,7 +170,7 @@ * \note \blank \note_try_to_help_rvo */ inline InverseReturnType inverse() const { return InverseReturnType(derived()); } - /** \returns the tranpose permutation matrix. + /** \returns the transpose permutation matrix. * * \note \blank \note_try_to_help_rvo */
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 6513120..de84d81 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h
@@ -218,7 +218,7 @@ return internal::blueNorm_impl(*this); } -/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. +/** \returns the \em l2 norm of \c *this avoiding underflow and overflow. * This version use a concatenation of hypot() calls, and it is very slow. * * \sa norm(), stableNorm()
diff --git a/Eigen/src/Core/Transpositions.h b/Eigen/src/Core/Transpositions.h index ad136d3..6fbbbd8 100644 --- a/Eigen/src/Core/Transpositions.h +++ b/Eigen/src/Core/Transpositions.h
@@ -91,7 +91,7 @@ /** \returns the inverse transformation */ inline Transpose<TranspositionsBase> inverse() const { return Transpose<TranspositionsBase>(derived()); } - /** \returns the tranpose transformation */ + /** \returns the transpose transformation */ inline Transpose<TranspositionsBase> transpose() const { return Transpose<TranspositionsBase>(derived()); } protected:
diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h index 0302646..198ec95 100644 --- a/Eigen/src/Core/Visitor.h +++ b/Eigen/src/Core/Visitor.h
@@ -38,7 +38,7 @@ // unrolled inner-outer traversal template <typename Visitor, typename Derived, int UnrollCount, bool Vectorize, bool ShortCircuitEvaluation> struct visitor_impl<Visitor, Derived, UnrollCount, Vectorize, false, ShortCircuitEvaluation> { - // don't use short circuit evaulation for unrolled version + // don't use short circuit evaluation for unrolled version using Scalar = typename Derived::Scalar; using Packet = typename packet_traits<Scalar>::type; static constexpr bool RowMajor = Derived::IsRowMajor; @@ -93,7 +93,7 @@ // unrolled linear traversal template <typename Visitor, typename Derived, int UnrollCount, bool Vectorize, bool ShortCircuitEvaluation> struct visitor_impl<Visitor, Derived, UnrollCount, Vectorize, true, ShortCircuitEvaluation> { - // don't use short circuit evaulation for unrolled version + // don't use short circuit evaluation for unrolled version using Scalar = typename Derived::Scalar; using Packet = typename packet_traits<Scalar>::type; static constexpr int PacketSize = packet_traits<Scalar>::size;
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h index b05429c..ea58f0e 100644 --- a/Eigen/src/Core/arch/AVX/PacketMath.h +++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -1180,7 +1180,7 @@ } #endif -// Add specializations for min/max with prescribed NaN progation. +// Add specializations for min/max with prescribed NaN propagation. template <> EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) { return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
diff --git a/Eigen/src/Core/arch/AVX512/PacketMath.h b/Eigen/src/Core/arch/AVX512/PacketMath.h index 9a0edca..0659ddd 100644 --- a/Eigen/src/Core/arch/AVX512/PacketMath.h +++ b/Eigen/src/Core/arch/AVX512/PacketMath.h
@@ -572,7 +572,7 @@ return _mm512_max_epi64(b, a); } -// Add specializations for min/max with prescribed NaN progation. +// Add specializations for min/max with prescribed NaN propagation. template <> EIGEN_STRONG_INLINE Packet16f pmin<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) { return pminmax_propagate_numbers(a, b, pmin<Packet16f>);
diff --git a/Eigen/src/Core/arch/AVX512/TrsmKernel.h b/Eigen/src/Core/arch/AVX512/TrsmKernel.h index 903bca5..c763b5f 100644 --- a/Eigen/src/Core/arch/AVX512/TrsmKernel.h +++ b/Eigen/src/Core/arch/AVX512/TrsmKernel.h
@@ -206,7 +206,7 @@ /** * GEMM like operation for trsm panel updates. * Computes: C -= A*B - * K must be multipe of 4. + * K must be multiple of 4. * * Unrolls used are {1,2,4,8}x{U1,U2,U3}; * For good performance we want K to be large with M/N relatively small, but also large enough
diff --git a/Eigen/src/Core/arch/AVX512/TrsmUnrolls.inc b/Eigen/src/Core/arch/AVX512/TrsmUnrolls.inc index 4c6116c..1983c08 100644 --- a/Eigen/src/Core/arch/AVX512/TrsmUnrolls.inc +++ b/Eigen/src/Core/arch/AVX512/TrsmUnrolls.inc
@@ -28,7 +28,7 @@ * func(startI,startJ) startJ = (startC)%(endJ) * func(...) * - * The 1-D loop can be unrolled recursively by using enable_if and defining an auxillary function + * The 1-D loop can be unrolled recursively by using enable_if and defining an auxiliary function * with a template parameter used as a counter. * * template <endI, endJ, counter> @@ -124,7 +124,7 @@ } /*** - * Unrolls for tranposed C stores + * Unrolls for transposed C stores */ template <typename Scalar> class trans { @@ -134,7 +134,7 @@ static constexpr int64_t PacketSize = packet_traits<Scalar>::size; /*********************************** - * Auxillary Functions for: + * Auxiliary Functions for: * - storeC *********************************** */ @@ -285,7 +285,7 @@ static constexpr int64_t PacketSize = packet_traits<Scalar>::size; /*********************************** - * Auxillary Functions for: + * Auxiliary Functions for: * - loadB * - storeB * - loadBBlock @@ -588,7 +588,7 @@ static constexpr int64_t PacketSize = packet_traits<Scalar>::size; /*********************************** - * Auxillary Functions for: + * Auxiliary Functions for: * - loadRHS * - storeRHS * - divRHSByDiag @@ -867,7 +867,7 @@ static constexpr int64_t PacketSize = packet_traits<Scalar>::size; /*********************************** - * Auxillary Functions for: + * Auxiliary Functions for: * - setzero * - updateC * - storeC @@ -1101,7 +1101,7 @@ } } - // We have updated all accumlators, time to load next set of B's + // We have updated all accumulators, time to load next set of B's EIGEN_IF_CONSTEXPR((startN == endN - 1) && (startM == endM - 1)) { gemm::template loadB<endM, endN, startK, endK, numLoad, numBCast, rem>(B_t, LDB, zmm, rem_); }
diff --git a/Eigen/src/Core/arch/AltiVec/MatrixProduct.h b/Eigen/src/Core/arch/AltiVec/MatrixProduct.h index 94306da..be79eba 100644 --- a/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +++ b/Eigen/src/Core/arch/AltiVec/MatrixProduct.h
@@ -1590,7 +1590,7 @@ pger_common<Packet, false, N>(&cImag, bImag, aReal.packet); } -// Load a PacketBlock, the N parameters make tunning gemm easier so we can add more accumulators as needed. +// Load a PacketBlock, the N parameters make tuning gemm easier so we can add more accumulators as needed. // // full = operate (load) on the entire PacketBlock or only half template <typename DataMapper, typename Packet, const Index accCols, int StorageOrder, bool Complex, int N, bool full>
diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h index 1c46ba4..d9e6d03 100644 --- a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
@@ -1655,7 +1655,7 @@ } // This function implements the division of double word {x_hi, x_lo} -// by float y. This is Algorithm 15 from "Tight and rigourous error bounds +// by float y. This is Algorithm 15 from "Tight and rigorous error bounds // for basic building blocks of double-word arithmetic", Joldes, Muller, & Popescu, // 2017. https://hal.archives-ouvertes.fr/hal-01351529 template <typename Packet> @@ -2376,7 +2376,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet handle_negative_exponent(const Packet& x, const ScalarExponent& exponent) { using Scalar = typename unpacket_traits<Packet>::type; - // singed integer base, signed integer exponent case + // signed integer base, signed integer exponent case // This routine handles negative exponents. // The return value is either 0, 1, or -1.
diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h index 1bf1128..4d113ca 100644 --- a/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h
@@ -21,7 +21,7 @@ // This is needed to workaround a circular dependency. /*************************************************************************** - * Some generic implementations to be used by implementors + * Some generic implementations to be used by implementers ***************************************************************************/ /** Default implementation of pfrexp.
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h index 50cf56f..794d063 100644 --- a/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -103,7 +103,7 @@ return res; } -// fuctionally equivalent to _mm_shuffle_ps in SSE when interleave +// functionally equivalent to _mm_shuffle_ps in SSE when interleave // == false (i.e. shuffle<false>(m, n, mask) equals _mm_shuffle_ps(m, n, mask)), // interleave m and n when interleave == true. Currently used in LU/arch/InverseSize4.h // to enable a shared implementation for fast inversion of matrices of size 4. @@ -5029,7 +5029,7 @@ //---------- double ---------- -// Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. +// Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrinsics for double. // Confirmed at least with __apple_build_version__ = 6000054. #if EIGEN_COMP_CLANGAPPLE // Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. @@ -5075,7 +5075,7 @@ EIGEN_ALWAYS_INLINE Packet2d make_packet2d(double a, double b) { return Packet2d{a, b}; } #endif -// fuctionally equivalent to _mm_shuffle_pd in SSE (i.e. shuffle(m, n, mask) equals _mm_shuffle_pd(m,n,mask)) +// functionally equivalent to _mm_shuffle_pd in SSE (i.e. shuffle(m, n, mask) equals _mm_shuffle_pd(m,n,mask)) // Currently used in LU/arch/InverseSize4.h to enable a shared implementation // for fast inversion of matrices of size 4. EIGEN_STRONG_INLINE Packet2d shuffle(const Packet2d& m, const Packet2d& n, int mask) {
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index e19e948..e5dce3b 100644 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -1127,7 +1127,7 @@ return pselect<Packet>(not_nan_mask_a, m, a); } -// Add specializations for min/max with prescribed NaN progation. +// Add specializations for min/max with prescribed NaN propagation. template <> EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) { return pminmax_propagate_numbers(a, b, pmin<Packet4f>);
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h index c4fa771..1b7861a 100644 --- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -1257,7 +1257,7 @@ traits.initAcc(C3); // To improve instruction pipelining, let's double the accumulation registers: // even k will accumulate in C*, while odd k will accumulate in D*. - // This trick is crutial to get good performance with FMA, otherwise it is + // This trick is crucial to get good performance with FMA, otherwise it is // actually faster to perform separated MUL+ADD because of a naturally // better instruction-level parallelism. AccPacket D0, D1, D2, D3; @@ -3130,9 +3130,8 @@ return l2; } -/** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\ -rs. -* \sa setCpuCacheSize */ +/** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size parameters. + * \sa setCpuCacheSize */ inline std::ptrdiff_t l3CacheSize() { std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h index e138535..6817cc0 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h
@@ -137,7 +137,7 @@ EIGEN_BLAS_RANKUPDATE_R(float, float, ssyrk_) #endif -// TODO hanlde complex cases +// TODO handle complex cases // EIGEN_BLAS_RANKUPDATE_C(dcomplex, double, double, zherk_) // EIGEN_BLAS_RANKUPDATE_C(scomplex, float, float, cherk_)
diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h index afd8155..ba72a8a 100644 --- a/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -64,7 +64,7 @@ /* Optimized col-major matrix * vector product: * This algorithm processes the matrix per vertical panels, - * which are then processed horizontally per chunck of 8*PacketSize x 1 vertical segments. + * which are then processed horizontally per chunk of 8*PacketSize x 1 vertical segments. * * Mixing type logic: C += alpha * A * B * | A | B |alpha| comments @@ -112,7 +112,7 @@ eigen_internal_assert(resIncr == 1); // The following copy tells the compiler that lhs's attributes are not modified outside this function - // This helps GCC to generate propoer code. + // This helps GCC to generate proper code. LhsMapper lhs(alhs); conj_helper<LhsScalar, RhsScalar, ConjugateLhs, ConjugateRhs> cj; @@ -302,7 +302,7 @@ Version>::run(Index rows, Index cols, const LhsMapper& alhs, const RhsMapper& rhs, ResScalar* res, Index resIncr, ResScalar alpha) { // The following copy tells the compiler that lhs's attributes are not modified outside this function - // This helps GCC to generate propoer code. + // This helps GCC to generate proper code. LhsMapper lhs(alhs); eigen_internal_assert(rhs.stride() == 1);
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h index 9f4a2d8..4f0b273 100644 --- a/Eigen/src/Core/util/Constants.h +++ b/Eigen/src/Core/util/Constants.h
@@ -152,7 +152,7 @@ * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout * of the array of coefficients must be exactly the natural one suggested by rows(), cols(), * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients, - * though referencable, do not have such a regular memory layout. + * though referenceable, do not have such a regular memory layout. * * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal. */
diff --git a/Eigen/src/Core/util/IntegralConstant.h b/Eigen/src/Core/util/IntegralConstant.h index 2eb5fd9..53fabd5 100644 --- a/Eigen/src/Core/util/IntegralConstant.h +++ b/Eigen/src/Core/util/IntegralConstant.h
@@ -263,8 +263,8 @@ * } * \endcode * In this example, the function Eigen::seqN knows that the second argument is expected to be a size. - * If the passed compile-time value N equals Eigen::Dynamic, then the proxy object returned by fix will be dissmissed, - * and converted to an Eigen::Index of value \c n. Otherwise, the runtime-value \c n will be dissmissed, and the + * If the passed compile-time value N equals Eigen::Dynamic, then the proxy object returned by fix will be dismissed, + * and converted to an Eigen::Index of value \c n. Otherwise, the runtime-value \c n will be dismissed, and the * returned ArithmeticSequence will be of the exact same type as <tt> seqN(0,fix<N>) </tt>. * * \sa fix, seqN, class ArithmeticSequence
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 4d10eec..91c821b 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h
@@ -212,7 +212,7 @@ /// \internal EIGEN_COMP_FCC set to FCC version if the compiler is Fujitsu Compiler (traditional mode) /// \note The Fujitsu C/C++ compiler uses the traditional mode based -/// on EDG g++ 6.1 by default or if envoked with the -Nnoclang flag +/// on EDG g++ 6.1 by default or if invoked with the -Nnoclang flag #if defined(__FUJITSU) #define EIGEN_COMP_FCC (__FCC_major__ * 100 + __FCC_minor__ * 10 + __FCC_patchlevel__) #else @@ -221,7 +221,7 @@ /// \internal EIGEN_COMP_CLANGFCC set to FCC version if the compiler is Fujitsu Compiler (Clang mode) /// \note The Fujitsu C/C++ compiler uses the non-traditional mode -/// based on Clang 7.1.0 if envoked with the -Nclang flag +/// based on Clang 7.1.0 if invoked with the -Nclang flag #if defined(__CLANG_FUJITSU) #define EIGEN_COMP_CLANGFCC (__FCC_major__ * 100 + __FCC_minor__ * 10 + __FCC_patchlevel__) #else
diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h index 08f1e34..ca15e6d 100644 --- a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -361,7 +361,7 @@ // Compute eigenvector in position (i+1) and then position (i) is just the conjugate cv.setZero(); cv.coeffRef(i + 1) = Scalar(1.0); - // here, the "static_cast" workaound expression template issues. + // here, the "static_cast" workaround expression template issues. cv.coeffRef(i) = -(static_cast<Scalar>(beta * mS.coeffRef(i, i + 1)) - alpha * mT.coeffRef(i, i + 1)) / (static_cast<Scalar>(beta * mS.coeffRef(i, i)) - alpha * mT.coeffRef(i, i)); for (Index j = i - 1; j >= 0; j--) {
diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h index eb94b52..5918025 100644 --- a/Eigen/src/Geometry/Rotation2D.h +++ b/Eigen/src/Geometry/Rotation2D.h
@@ -60,7 +60,7 @@ /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ EIGEN_DEVICE_FUNC explicit inline Rotation2D(const Scalar& a) : m_angle(a) {} - /** Default constructor wihtout initialization. The represented rotation is undefined. */ + /** Default constructor without initialization. The represented rotation is undefined. */ EIGEN_DEVICE_FUNC Rotation2D() {} /** Construct a 2D rotation from a 2x2 rotation matrix \a mat.
diff --git a/Eigen/src/Householder/BlockHouseholder.h b/Eigen/src/Householder/BlockHouseholder.h index 1d6cc1c..8b92304 100644 --- a/Eigen/src/Householder/BlockHouseholder.h +++ b/Eigen/src/Householder/BlockHouseholder.h
@@ -35,7 +35,7 @@ // // Warning, note that hCoeffs may alias with vectors. // // It is then necessary to copy it before modifying vectors(i,i). // typename CoeffsType::Scalar h = hCoeffs(i); -// // This hack permits to pass trough nested Block<> and Transpose<> expressions. +// // This hack permits to pass through nested Block<> and Transpose<> expressions. // Scalar *Vii_ptr = const_cast<Scalar*>(vectors.data() + vectors.outerStride()*i + vectors.innerStride()*i); // Scalar Vii = *Vii_ptr; // *Vii_ptr = Scalar(1);
diff --git a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h index 020241b..2b146b3 100644 --- a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +++ b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
@@ -23,7 +23,7 @@ * * \brief Pseudo expression representing a solving operation * - * \tparam Decomposition the type of the matrix or decomposion object + * \tparam Decomposition the type of the matrix or decomposition object * \tparam Rhstype the type of the right-hand side * * This class represents an expression of A.solve(B)
diff --git a/Eigen/src/LU/arch/InverseSize4.h b/Eigen/src/LU/arch/InverseSize4.h index f0ddb2f..29c9b03 100644 --- a/Eigen/src/LU/arch/InverseSize4.h +++ b/Eigen/src/LU/arch/InverseSize4.h
@@ -24,7 +24,7 @@ // // Copyright (c) 2001 Intel Corporation. // -// Permition is granted to use, copy, distribute and prepare derivative works +// Permission is granted to use, copy, distribute and prepare derivative works // of this library for any purpose and without fee, provided, that the above // copyright notice and this statement appear in all copies. // Intel makes no representations about the suitability of this software for
diff --git a/Eigen/src/OrderingMethods/Eigen_Colamd.h b/Eigen/src/OrderingMethods/Eigen_Colamd.h index 7bce3d5..f6c5be0 100644 --- a/Eigen/src/OrderingMethods/Eigen_Colamd.h +++ b/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -1374,7 +1374,7 @@ /* order this column */ Col[c].shared2.order = order++; - /* collaps tree */ + /* collapse tree */ Col[c].shared1.parent = parent; /* get immediate parent of this column */
diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h index 52f3564..6bafc9d 100644 --- a/Eigen/src/SVD/BDCSVD.h +++ b/Eigen/src/SVD/BDCSVD.h
@@ -164,10 +164,10 @@ * Like the default constructor but with preallocation of the internal data * according to the specified problem size and the \a computationOptions. * - * One \b cannot request unitiaries using both the \a Options template parameter + * One \b cannot request unitaries using both the \a Options template parameter * and the constructor. If possible, prefer using the \a Options template parameter. * - * \param computationOptions specifification for computing Thin/Full unitaries U/V + * \param computationOptions specification for computing Thin/Full unitaries U/V * \sa BDCSVD() * * \deprecated Will be removed in the next major Eigen version. Options should @@ -179,7 +179,7 @@ } /** \brief Constructor performing the decomposition of given matrix, using the custom options specified - * with the \a Options template paramter. + * with the \a Options template parameter. * * \param matrix the matrix to decompose */ @@ -190,11 +190,11 @@ /** \brief Constructor performing the decomposition of given matrix using specified options * for computing unitaries. * - * One \b cannot request unitiaries using both the \a Options template parameter + * One \b cannot request unitaries using both the \a Options template parameter * and the constructor. If possible, prefer using the \a Options template parameter. * * \param matrix the matrix to decompose - * \param computationOptions specifification for computing Thin/Full unitaries U/V + * \param computationOptions specification for computing Thin/Full unitaries U/V * * \deprecated Will be removed in the next major Eigen version. Options should * be specified in the \a Options template parameter.
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 086d750..615aad1 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h
@@ -559,7 +559,7 @@ } /** \brief Constructor performing the decomposition of given matrix, using the custom options specified - * with the \a Options template paramter. + * with the \a Options template parameter. * * \param matrix the matrix to decompose */
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index 6858263..7fcf2c2 100644 --- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -834,7 +834,7 @@ const XprType& m_expr; }; -// when DupFunc is wrapped with scalar_dup_op, use disjunction evaulator +// when DupFunc is wrapped with scalar_dup_op, use disjunction evaluator template <typename T1, typename T2, typename DupFunc, typename Lhs, typename Rhs> struct binary_evaluator<CwiseBinaryOp<scalar_disjunction_op<DupFunc, T1, T2>, Lhs, Rhs>, IteratorBased, IteratorBased> : sparse_disjunction_evaluator<CwiseBinaryOp<scalar_disjunction_op<DupFunc, T1, T2>, Lhs, Rhs> > {
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index fd92ab0..24ebb7c 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -250,7 +250,7 @@ } } if ((dst < end) && (m_data.index(dst) == inner)) { - // this coefficient exists, return a refernece to it + // this coefficient exists, return a reference to it if (inserted != nullptr) { *inserted = false; } @@ -1226,8 +1226,8 @@ // matrix is finalized } -// thin wrapper around a generic binary functor to use the sparse disjunction evaulator instead of the default -// "arithmetic" evaulator +// thin wrapper around a generic binary functor to use the sparse disjunction evaluator instead of the default +// "arithmetic" evaluator template <typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar> struct scalar_disjunction_op { using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type; @@ -1633,7 +1633,7 @@ // first, check if there is adequate allocated memory if (m_data.allocatedSize() <= m_data.size()) { // if there is no capacity for a single insertion, double the capacity - // increase capacity by a mininum of 32 + // increase capacity by a minimum of 32 Index minReserve = 32; Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize()); m_data.reserve(reserveSize);
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index 3402bae..05b3de5 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -143,7 +143,7 @@ return *this = src.twistedBy(pnull); } - // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor + // Since we override the copy-assignment operator, we need to explicitly redeclare the copy-constructor EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView) template <typename SrcMatrixType, unsigned int SrcMode>
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index fac162e..6b1816b 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h
@@ -109,7 +109,7 @@ } /** \returns a reference to the coefficient value at given index \a i - * This operation involes a log(rho*size) binary search. If the coefficient does not + * This operation involves a log(rho*size) binary search. If the coefficient does not * exist yet, then a sorted insertion into a sequential buffer is performed. * * This insertion might be very costly if the number of nonzeros above \a i is large.
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index 29be01a..cc69a42 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h
@@ -257,7 +257,7 @@ /** \brief Give the number of rows. */ inline Index rows() const { return m_mat.rows(); } - /** \brief Give the numver of columns. + /** \brief Give the number of columns. */ inline Index cols() const { return m_mat.cols(); } /** \brief Let you set that the pattern of the input matrix is symmetric @@ -600,7 +600,7 @@ * This exit was 0 if successful factorization. * > 0 if info = i, and i is been completed, but the factor U is exactly singular, * and division by zero will occur if it is used to solve a system of equation. - * > A->ncol: number of bytes allocated when memory allocation failure occured, plus A->ncol. + * > A->ncol: number of bytes allocated when memory allocation failure occurred, plus A->ncol. * If lwork = -1, it is the estimated amount of space needed, plus A->ncol. * * It seems that A was the name of the matrix in the past.
diff --git a/Eigen/src/SparseLU/SparseLU_Structs.h b/Eigen/src/SparseLU/SparseLU_Structs.h index 2afab01..85ba884 100644 --- a/Eigen/src/SparseLU/SparseLU_Structs.h +++ b/Eigen/src/SparseLU/SparseLU_Structs.h
@@ -50,7 +50,7 @@ * values. * * The last column structures (for pruning) will be removed - * after the numercial LU factorization phase. + * after the numerical LU factorization phase. * * (xlusup,lusup): lusup[*] contains the numerical values of the * rectangular supernodes; xlusup[j] points to the starting
diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h index ada511e..10a090b 100644 --- a/Eigen/src/SparseLU/SparseLU_pivotL.h +++ b/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -37,7 +37,7 @@ namespace internal { /** - * \brief Performs the numerical pivotin on the current column of L, and the CDIV operation. + * \brief Performs the numerical pivoting on the current column of L, and the CDIV operation. * * Pivot policy : * (1) Compute thresh = u * max_(i>=j) abs(A_ij);
diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h index 4f51d59..620f285 100644 --- a/Eigen/src/SparseLU/SparseLU_pruneL.h +++ b/Eigen/src/SparseLU/SparseLU_pruneL.h
@@ -101,7 +101,7 @@ kmin++; else { // kmin below pivrow (not yet pivoted), and kmax - // above pivrow: interchange the two suscripts + // above pivrow: interchange the two subscripts std::swap(glu.lsub(kmin), glu.lsub(kmax)); // If the supernode has only one column, then we
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.inc b/Eigen/src/plugins/ArrayCwiseUnaryOps.inc index 3f9d4e8..cc708fa 100644 --- a/Eigen/src/plugins/ArrayCwiseUnaryOps.inc +++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.inc
@@ -518,7 +518,7 @@ const ScalarExponent& exponent) const { return UnaryPowReturnType<ScalarExponent>(derived(), internal::scalar_unary_pow_op<Scalar, ScalarExponent>(exponent)); #else -/** \returns an expression of the coefficients of \c *this rasied to the constant power \a exponent +/** \returns an expression of the coefficients of \c *this raised to the constant power \a exponent * * \tparam T is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression. *