the Index types change.
As discussed on the list (too long to explain here).
diff --git a/Eigen/src/Array/Array.h b/Eigen/src/Array/Array.h
index e9fabcc..30d5529 100644
--- a/Eigen/src/Array/Array.h
+++ b/Eigen/src/Array/Array.h
@@ -101,7 +101,7 @@
* is called a null matrix. This constructor is the unique way to create null matrices: resizing
* a matrix to 0 is not supported.
*
- * \sa resize(int,int)
+ * \sa resize(Index,Index)
*/
EIGEN_STRONG_INLINE explicit Array() : Base()
{
@@ -126,7 +126,7 @@
* it is redundant to pass the dimension here, so it makes more sense to use the default
* constructor Matrix() instead.
*/
- EIGEN_STRONG_INLINE explicit Array(int dim)
+ EIGEN_STRONG_INLINE explicit Array(Index dim)
: Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
{
Base::_check_template_params();
@@ -149,7 +149,7 @@
* This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead. */
- Array(int rows, int cols);
+ Array(Index rows, Index cols);
/** constructs an initialized 2D vector with given coefficients */
Array(const Scalar& x, const Scalar& y);
#endif
@@ -217,8 +217,8 @@
void swap(ArrayBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other)
{ this->_swap(other.derived()); }
- inline int innerStride() const { return 1; }
- inline int outerStride() const { return this->innerSize(); }
+ inline Index innerStride() const { return 1; }
+ inline Index outerStride() const { return this->innerSize(); }
#ifdef EIGEN_ARRAY_PLUGIN
#include EIGEN_ARRAY_PLUGIN
diff --git a/Eigen/src/Array/ArrayBase.h b/Eigen/src/Array/ArrayBase.h
index b835a57..ccbc772 100644
--- a/Eigen/src/Array/ArrayBase.h
+++ b/Eigen/src/Array/ArrayBase.h
@@ -60,8 +60,11 @@
using ei_special_scalar_op_base<Derived,typename ei_traits<Derived>::Scalar,
typename NumTraits<typename ei_traits<Derived>::Scalar>::Real>::operator*;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base;
using Base::RowsAtCompileTime;
@@ -88,7 +91,6 @@
using Base::operator*=;
using Base::operator/=;
- typedef typename Base::RealScalar RealScalar;
typedef typename Base::CoeffReturnType CoeffReturnType;
#endif // not EIGEN_PARSED_BY_DOXYGEN
@@ -161,8 +163,8 @@
ArrayBase() : Base() {}
private:
- explicit ArrayBase(int);
- ArrayBase(int,int);
+ explicit ArrayBase(Index);
+ ArrayBase(Index,Index);
template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);
};
diff --git a/Eigen/src/Array/ArrayWrapper.h b/Eigen/src/Array/ArrayWrapper.h
index 83cd8ba..98d388d 100644
--- a/Eigen/src/Array/ArrayWrapper.h
+++ b/Eigen/src/Array/ArrayWrapper.h
@@ -53,51 +53,51 @@
inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int outerStride() const { return m_expression.outerStride(); }
- inline int innerStride() const { return m_expression.innerStride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
@@ -138,51 +138,51 @@
inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int outerStride() const { return m_expression.outerStride(); }
- inline int innerStride() const { return m_expression.innerStride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
diff --git a/Eigen/src/Array/BooleanRedux.h b/Eigen/src/Array/BooleanRedux.h
index 9c6985a..67c29f5 100644
--- a/Eigen/src/Array/BooleanRedux.h
+++ b/Eigen/src/Array/BooleanRedux.h
@@ -97,8 +97,8 @@
>::run(derived());
else
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if (!coeff(i, j)) return false;
return true;
}
@@ -121,8 +121,8 @@
>::run(derived());
else
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if (coeff(i, j)) return true;
return false;
}
@@ -135,9 +135,9 @@
* \sa all(), any()
*/
template<typename Derived>
-inline int DenseBase<Derived>::count() const
+inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
{
- return derived().template cast<bool>().template cast<int>().sum();
+ return derived().template cast<bool>().template cast<Index>().sum();
}
#endif // EIGEN_ALLANDANY_H
diff --git a/Eigen/src/Array/Random.h b/Eigen/src/Array/Random.h
index 9a81c7b..c4c482b 100644
--- a/Eigen/src/Array/Random.h
+++ b/Eigen/src/Array/Random.h
@@ -27,7 +27,8 @@
template<typename Scalar> struct ei_scalar_random_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op)
- inline const Scalar operator() (int, int = 0) const { return ei_random<Scalar>(); }
+ template<typename Index>
+ inline const Scalar operator() (Index, Index = 0) const { return ei_random<Scalar>(); }
};
template<typename Scalar>
struct ei_functor_traits<ei_scalar_random_op<Scalar> >
@@ -51,11 +52,11 @@
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
*/
template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
-DenseBase<Derived>::Random(int rows, int cols)
+DenseBase<Derived>::Random(Index rows, Index cols)
{
return NullaryExpr(rows, cols, ei_scalar_random_op<Scalar>());
}
@@ -80,11 +81,11 @@
* a temporary vector whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
*/
template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
-DenseBase<Derived>::Random(int size)
+DenseBase<Derived>::Random(Index size)
{
return NullaryExpr(size, ei_scalar_random_op<Scalar>());
}
@@ -103,7 +104,7 @@
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices.
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int)
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
*/
template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
@@ -119,7 +120,7 @@
* Example: \include MatrixBase_setRandom.cpp
* Output: \verbinclude MatrixBase_setRandom.out
*
- * \sa class CwiseNullaryOp, setRandom(int), setRandom(int,int)
+ * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/
template<typename Derived>
inline Derived& DenseBase<Derived>::setRandom()
@@ -134,11 +135,11 @@
* Example: \include Matrix_setRandom_int.cpp
* Output: \verbinclude Matrix_setRandom_int.out
*
- * \sa MatrixBase::setRandom(), setRandom(int,int), class CwiseNullaryOp, MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setRandom(int size)
+DenseStorageBase<Derived>::setRandom(Index size)
{
resize(size);
return setRandom();
@@ -152,11 +153,11 @@
* Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out
*
- * \sa MatrixBase::setRandom(), setRandom(int), class CwiseNullaryOp, MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setRandom(int rows, int cols)
+DenseStorageBase<Derived>::setRandom(Index rows, Index cols)
{
resize(rows, cols);
return setRandom();
diff --git a/Eigen/src/Array/Replicate.h b/Eigen/src/Array/Replicate.h
index 63e4683..c60d990 100644
--- a/Eigen/src/Array/Replicate.h
+++ b/Eigen/src/Array/Replicate.h
@@ -90,28 +90,28 @@
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
}
- inline int rows() const { return m_matrix.rows() * m_rowFactor.value(); }
- inline int cols() const { return m_matrix.cols() * m_colFactor.value(); }
+ inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
+ inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
// try to avoid using modulo; this is a pure optimization strategy
- const int actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
+ const Index actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row%m_matrix.rows();
- const int actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
+ const Index actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col%m_matrix.cols();
return m_matrix.coeff(actual_row, actual_col);
}
template<int LoadMode>
- inline PacketScalar packet(int row, int col) const
+ inline PacketScalar packet(Index row, Index col) const
{
- const int actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
+ const Index actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row
: row%m_matrix.rows();
- const int actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
+ const Index actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col
: col%m_matrix.cols();
@@ -121,8 +121,8 @@
protected:
const typename MatrixType::Nested m_matrix;
- const ei_int_if_dynamic<RowFactor> m_rowFactor;
- const ei_int_if_dynamic<ColFactor> m_colFactor;
+ const ei_variable_if_dynamic<Index, RowFactor> m_rowFactor;
+ const ei_variable_if_dynamic<Index, ColFactor> m_colFactor;
};
/** \nonstableyet
@@ -131,7 +131,7 @@
* Example: \include MatrixBase_replicate.cpp
* Output: \verbinclude MatrixBase_replicate.out
*
- * \sa VectorwiseOp::replicate(), DenseBase::replicate(int,int), class Replicate
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
*/
template<typename Derived>
template<int RowFactor, int ColFactor>
@@ -151,7 +151,7 @@
*/
template<typename Derived>
inline const Replicate<Derived,Dynamic,Dynamic>
-DenseBase<Derived>::replicate(int rowFactor,int colFactor) const
+DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
{
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
}
@@ -166,7 +166,7 @@
*/
template<typename ExpressionType, int Direction>
const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
-VectorwiseOp<ExpressionType,Direction>::replicate(int factor) const
+VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
{
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
diff --git a/Eigen/src/Array/Reverse.h b/Eigen/src/Array/Reverse.h
index 0f56d5d..cca4251 100644
--- a/Eigen/src/Array/Reverse.h
+++ b/Eigen/src/Array/Reverse.h
@@ -103,33 +103,33 @@
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col);
}
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col);
}
- inline const Scalar coeff(int index) const
+ inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(m_matrix.size() - index - 1);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return reverse_packet::run(m_matrix.template packet<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
@@ -137,7 +137,7 @@
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
@@ -146,13 +146,13 @@
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return ei_preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize ));
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, ei_preverse(x));
}
diff --git a/Eigen/src/Array/Select.h b/Eigen/src/Array/Select.h
index 100a264..8834156 100644
--- a/Eigen/src/Array/Select.h
+++ b/Eigen/src/Array/Select.h
@@ -81,10 +81,10 @@
ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
}
- int rows() const { return m_condition.rows(); }
- int cols() const { return m_condition.cols(); }
+ Index rows() const { return m_condition.rows(); }
+ Index cols() const { return m_condition.cols(); }
- const Scalar coeff(int i, int j) const
+ const Scalar coeff(Index i, Index j) const
{
if (m_condition.coeff(i,j))
return m_then.coeff(i,j);
@@ -92,7 +92,7 @@
return m_else.coeff(i,j);
}
- const Scalar coeff(int i) const
+ const Scalar coeff(Index i) const
{
if (m_condition.coeff(i))
return m_then.coeff(i);
diff --git a/Eigen/src/Array/VectorwiseOp.h b/Eigen/src/Array/VectorwiseOp.h
index c1f17f6..e338a91 100644
--- a/Eigen/src/Array/VectorwiseOp.h
+++ b/Eigen/src/Array/VectorwiseOp.h
@@ -89,10 +89,10 @@
PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
: m_matrix(mat), m_functor(func) {}
- int rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
- int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
+ Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
+ Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
- const Scalar coeff(int i, int j) const
+ const Scalar coeff(Index i, Index j) const
{
if (Direction==Vertical)
return m_functor(m_matrix.col(j));
@@ -100,7 +100,7 @@
return m_functor(m_matrix.row(i));
}
- const Scalar coeff(int index) const
+ const Scalar coeff(Index index) const
{
if (Direction==Vertical)
return m_functor(m_matrix.col(index));
@@ -177,7 +177,8 @@
{
public:
- typedef typename ei_traits<ExpressionType>::Scalar Scalar;
+ typedef typename ExpressionType::Scalar Scalar;
+ typedef typename ExpressionType::Index Index;
typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
@@ -209,14 +210,14 @@
typedef typename ei_meta_if<Direction==Vertical,
typename ExpressionType::ColXpr,
typename ExpressionType::RowXpr>::ret SubVector;
- SubVector subVector(int i)
+ SubVector subVector(Index i)
{
return SubVector(m_matrix.derived(),i);
}
/** \internal
* \returns the number of subvectors in the direction \c Direction */
- int subVectors() const
+ Index subVectors() const
{ return Direction==Vertical?m_matrix.cols():m_matrix.rows(); }
template<typename OtherDerived> struct ExtendedType {
@@ -362,7 +363,7 @@
* Output: \verbinclude PartialRedux_count.out
*
* \sa DenseBase::count() */
- const PartialReduxExpr<ExpressionType, ei_member_count<int>, Direction> count() const
+ const PartialReduxExpr<ExpressionType, ei_member_count<Index>, Direction> count() const
{ return _expression(); }
/** \returns a row (or column) vector expression of the product
@@ -387,7 +388,7 @@
{ return Reverse<ExpressionType, Direction>( _expression() ); }
typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType;
- const ReplicateReturnType replicate(int factor) const;
+ const ReplicateReturnType replicate(Index factor) const;
/** \nonstableyet
* \return an expression of the replication of each column (or row) of \c *this
@@ -395,11 +396,11 @@
* Example: \include DirectionWise_replicate.cpp
* Output: \verbinclude DirectionWise_replicate.out
*
- * \sa VectorwiseOp::replicate(int), DenseBase::replicate(), class Replicate
+ * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate
*/
// NOTE implemented here because of sunstudio's compilation errors
template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)>
- replicate(int factor = Factor) const
+ replicate(Index factor = Factor) const
{
return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1>
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
@@ -413,7 +414,7 @@
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
//ei_assert((m_matrix.isNull()) == (other.isNull())); FIXME
- for(int j=0; j<subVectors(); ++j)
+ for(Index j=0; j<subVectors(); ++j)
subVector(j) = other;
return const_cast<ExpressionType&>(m_matrix);
}
@@ -423,7 +424,7 @@
ExpressionType& operator+=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
- for(int j=0; j<subVectors(); ++j)
+ for(Index j=0; j<subVectors(); ++j)
subVector(j) += other.derived();
return const_cast<ExpressionType&>(m_matrix);
}
@@ -433,7 +434,7 @@
ExpressionType& operator-=(const DenseBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
- for(int j=0; j<subVectors(); ++j)
+ for(Index j=0; j<subVectors(); ++j)
subVector(j) -= other.derived();
return const_cast<ExpressionType&>(m_matrix);
}
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 206ccef..a433f8d 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -65,7 +65,8 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType;
+ typedef typename MatrixType::Index Index;
+ typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
/** \brief Default Constructor.
@@ -81,7 +82,7 @@
* according to the specified problem \a size.
* \sa LDLT()
*/
- LDLT(int size) : m_matrix(size, size),
+ LDLT(Index size) : m_matrix(size, size),
m_p(size),
m_transpositions(size),
m_temporary(size),
@@ -168,8 +169,8 @@
MatrixType reconstructedMatrix() const;
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
protected:
/** \internal
@@ -182,7 +183,7 @@
IntColVectorType m_p;
IntColVectorType m_transpositions; // FIXME do we really need to store permanently the transpositions?
TmpMatrixType m_temporary;
- int m_sign;
+ Index m_sign;
bool m_isInitialized;
};
@@ -192,7 +193,7 @@
LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
{
ei_assert(a.rows()==a.cols());
- const int size = a.rows();
+ const Index size = a.rows();
m_matrix = a;
@@ -215,10 +216,10 @@
// have optimal alignment.
m_temporary.resize(size);
- for (int j = 0; j < size; ++j)
+ for (Index j = 0; j < size; ++j)
{
// Find largest diagonal element
- int index_of_biggest_in_corner;
+ Index index_of_biggest_in_corner;
biggest_in_corner = m_matrix.diagonal().tail(size-j).cwiseAbs()
.maxCoeff(&index_of_biggest_in_corner);
index_of_biggest_in_corner += j;
@@ -236,7 +237,7 @@
// Finish early if the matrix is not full rank.
if(biggest_in_corner < cutoff)
{
- for(int i = j; i < size; i++) m_transpositions.coeffRef(i) = i;
+ for(Index i = j; i < size; i++) m_transpositions.coeffRef(i) = i;
break;
}
@@ -256,7 +257,7 @@
RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j).dot(m_matrix.col(j).head(j)));
m_matrix.coeffRef(j,j) = Djj;
- int endSize = size - j - 1;
+ Index endSize = size - j - 1;
if (endSize > 0) {
m_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j)
* m_matrix.col(j).head(j).conjugate();
@@ -272,8 +273,8 @@
}
// Reverse applied swaps to get P matrix.
- for(int k = 0; k < size; ++k) m_p.coeffRef(k) = k;
- for(int k = size-1; k >= 0; --k) {
+ for(Index k = 0; k < size; ++k) m_p.coeffRef(k) = k;
+ for(Index k = size-1; k >= 0; --k) {
std::swap(m_p.coeffRef(k), m_p.coeffRef(m_transpositions.coeff(k)));
}
@@ -310,11 +311,11 @@
bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
{
ei_assert(m_isInitialized && "LDLT is not initialized.");
- const int size = m_matrix.rows();
+ const Index size = m_matrix.rows();
ei_assert(size == bAndX.rows());
// z = P b
- for(int i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
+ for(Index i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
// y = L^-1 z
//matrixL().solveInPlace(bAndX);
@@ -327,7 +328,7 @@
m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(bAndX);
// x = P^T u
- for (int i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
+ for (Index i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
return true;
}
@@ -339,12 +340,12 @@
MatrixType LDLT<MatrixType>::reconstructedMatrix() const
{
ei_assert(m_isInitialized && "LDLT is not initialized.");
- const int size = m_matrix.rows();
+ const Index size = m_matrix.rows();
MatrixType res(size,size);
res.setIdentity();
// PI
- for(int i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
+ for(Index i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
// L^* P
res = matrixL().adjoint() * res;
// D(L^*P)
@@ -352,7 +353,7 @@
// L(DL^*P)
res = matrixL() * res;
// P^T (LDL^*P)
- for (int i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
+ for (Index i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
return res;
}
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 22d0c91..29fa465 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -65,6 +65,7 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
enum {
PacketSize = ei_packet_traits<Scalar>::size,
@@ -88,7 +89,7 @@
* according to the specified problem \a size.
* \sa LLT()
*/
- LLT(int size) : m_matrix(size, size),
+ LLT(Index size) : m_matrix(size, size),
m_isInitialized(false) {}
LLT(const MatrixType& matrix)
@@ -149,8 +150,8 @@
MatrixType reconstructedMatrix() const;
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
protected:
/** \internal
@@ -171,11 +172,12 @@
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
ei_assert(mat.rows()==mat.cols());
- const int size = mat.rows();
- for(int k = 0; k < size; ++k)
+ const Index size = mat.rows();
+ for(Index k = 0; k < size; ++k)
{
- int rs = size-k-1; // remaining size
+ Index rs = size-k-1; // remaining size
Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
@@ -195,19 +197,20 @@
template<typename MatrixType>
static bool blocked(MatrixType& m)
{
+ typedef typename MatrixType::Index Index;
ei_assert(m.rows()==m.cols());
- int size = m.rows();
+ Index size = m.rows();
if(size<32)
return unblocked(m);
- int blockSize = size/8;
+ Index blockSize = size/8;
blockSize = (blockSize/16)*16;
- blockSize = std::min(std::max(blockSize,8), 128);
+ blockSize = std::min(std::max(blockSize,Index(8)), Index(128));
- for (int k=0; k<size; k+=blockSize)
+ for (Index k=0; k<size; k+=blockSize)
{
- int bs = std::min(blockSize, size-k);
- int rs = size - k - bs;
+ Index bs = std::min(blockSize, size-k);
+ Index rs = size - k - bs;
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
@@ -266,7 +269,7 @@
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
assert(a.rows()==a.cols());
- const int size = a.rows();
+ const Index size = a.rows();
m_matrix.resize(size, size);
m_matrix = a;
diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h
index eb7bca1..494df7b 100644
--- a/Eigen/src/Core/Assign.h
+++ b/Eigen/src/Core/Assign.h
@@ -254,12 +254,13 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- for(int outer = 0; outer < outerSize; ++outer)
- for(int inner = 0; inner < innerSize; ++inner)
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
+ for(Index inner = 0; inner < innerSize; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src);
}
};
@@ -277,10 +278,11 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling>
{
+ typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{
- const int outerSize = dst.outerSize();
- for(int outer = 0; outer < outerSize; ++outer)
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
ei_assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
::run(dst, src, outer);
}
@@ -293,10 +295,11 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- const int size = dst.size();
- for(int i = 0; i < size; ++i)
+ const Index size = dst.size();
+ for(Index i = 0; i < size; ++i)
dst.copyCoeff(i, src);
}
};
@@ -318,13 +321,14 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- for(int outer = 0; outer < outerSize; ++outer)
- for(int inner = 0; inner < innerSize; inner+=packetSize)
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
+ for(Index outer = 0; outer < outerSize; ++outer)
+ for(Index inner = 0; inner < innerSize; inner+=packetSize)
dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
}
};
@@ -342,10 +346,11 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling>
{
+ typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{
- const int outerSize = dst.outerSize();
- for(int outer = 0; outer < outerSize; ++outer)
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
ei_assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
::run(dst, src, outer);
}
@@ -359,7 +364,7 @@
struct ei_unaligned_assign_impl
{
template <typename Derived, typename OtherDerived>
- static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, int, int) {}
+ static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
};
template <>
@@ -369,13 +374,13 @@
// packet access path.
#ifdef _MSC_VER
template <typename Derived, typename OtherDerived>
- static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end)
+ static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
#else
template <typename Derived, typename OtherDerived>
- static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end)
+ static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
#endif
{
- for (int index = start; index < end; ++index)
+ for (typename Derived::Index index = start; index < end; ++index)
dst.copyCoeff(index, src);
}
};
@@ -383,17 +388,18 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling>
{
+ typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{
- const int size = dst.size();
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
- : ei_first_aligned(&dst.coeffRef(0), size);
- const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
+ const Index size = dst.size();
+ const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
+ const Index alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
+ : ei_first_aligned(&dst.coeffRef(0), size);
+ const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
ei_unaligned_assign_impl<ei_assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
- for(int index = alignedStart; index < alignedEnd; index += packetSize)
+ for(Index index = alignedStart; index < alignedEnd; index += packetSize)
{
dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::JointAlignment>(index, src);
}
@@ -405,11 +411,12 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling>
{
+ typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{
- const int size = Derived1::SizeAtCompileTime;
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int alignedSize = (size/packetSize)*packetSize;
+ const Index size = Derived1::SizeAtCompileTime;
+ const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
+ const Index alignedSize = (size/packetSize)*packetSize;
ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
ei_assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
@@ -423,32 +430,33 @@
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int packetAlignedMask = packetSize - 1;
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- const int alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask;
- int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
- : ei_first_aligned(&dst.coeffRef(0,0), innerSize);
+ const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
+ const Index packetAlignedMask = packetSize - 1;
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ const Index alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask;
+ Index alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
+ : ei_first_aligned(&dst.coeffRef(0,0), innerSize);
- for(int outer = 0; outer < outerSize; ++outer)
+ for(Index outer = 0; outer < outerSize; ++outer)
{
- const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
+ const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
// do the non-vectorizable part of the assignment
- for(int inner = 0; inner<alignedStart ; ++inner)
+ for(Index inner = 0; inner<alignedStart ; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src);
// do the vectorizable part of the assignment
- for(int inner = alignedStart; inner<alignedEnd; inner+=packetSize)
+ for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
dst.template copyPacketByOuterInner<Derived2, Aligned, Unaligned>(outer, inner, src);
// do the non-vectorizable part of the assignment
- for(int inner = alignedEnd; inner<innerSize ; ++inner)
+ for(Index inner = alignedEnd; inner<innerSize ; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src);
- alignedStart = std::min<int>((alignedStart+alignedStep)%packetSize, innerSize);
+ alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
}
}
};
diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h
index 432df0b..fbe7e39 100644
--- a/Eigen/src/Core/BandMatrix.h
+++ b/Eigen/src/Core/BandMatrix.h
@@ -46,6 +46,7 @@
struct ei_traits<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
{
typedef _Scalar Scalar;
+ typedef Dense StorageKind;
enum {
CoeffReadCost = NumTraits<Scalar>::ReadCost,
RowsAtCompileTime = Rows,
@@ -71,6 +72,7 @@
};
typedef typename ei_traits<BandMatrix>::Scalar Scalar;
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
+ typedef typename DenseMatrixType::Index Index;
protected:
enum {
@@ -83,7 +85,7 @@
public:
- inline BandMatrix(int rows=Rows, int cols=Cols, int supers=Supers, int subs=Subs)
+ inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
: m_data(1+supers+subs,cols),
m_rows(rows), m_supers(supers), m_subs(subs)
{
@@ -91,32 +93,32 @@
}
/** \returns the number of columns */
- inline int rows() const { return m_rows.value(); }
+ inline Index rows() const { return m_rows.value(); }
/** \returns the number of rows */
- inline int cols() const { return m_data.cols(); }
+ inline Index cols() const { return m_data.cols(); }
/** \returns the number of super diagonals */
- inline int supers() const { return m_supers.value(); }
+ inline Index supers() const { return m_supers.value(); }
/** \returns the number of sub diagonals */
- inline int subs() const { return m_subs.value(); }
+ inline Index subs() const { return m_subs.value(); }
/** \returns a vector expression of the \a i -th column,
* only the meaningful part is returned.
* \warning the internal storage must be column major. */
- inline Block<DataType,Dynamic,1> col(int i)
+ inline Block<DataType,Dynamic,1> col(Index i)
{
EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- int start = 0;
- int len = m_data.rows();
+ Index start = 0;
+ Index len = m_data.rows();
if (i<=supers())
{
start = supers()-i;
- len = std::min(rows(),std::max(0,m_data.rows() - (supers()-i)));
+ len = std::min(rows(),std::max<Index>(0,m_data.rows() - (supers()-i)));
}
else if (i>=rows()-subs())
- len = std::max(0,m_data.rows() - (i + 1 - rows() + subs()));
+ len = std::max<Index>(0,m_data.rows() - (i + 1 - rows() + subs()));
return Block<DataType,Dynamic,1>(m_data, start, i, len, 1);
}
@@ -146,30 +148,30 @@
BuildType>::ret Type;
};
- /** \returns a vector expression of the \a Index -th sub or super diagonal */
- template<int Index> inline typename DiagonalIntReturnType<Index>::Type diagonal()
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
+ template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
{
- return typename DiagonalIntReturnType<Index>::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index));
+ return typename DiagonalIntReturnType<N>::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N));
}
- /** \returns a vector expression of the \a Index -th sub or super diagonal */
- template<int Index> inline const typename DiagonalIntReturnType<Index>::Type diagonal() const
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
+ template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
{
- return typename DiagonalIntReturnType<Index>::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index));
+ return typename DiagonalIntReturnType<N>::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N));
}
/** \returns a vector expression of the \a i -th sub or super diagonal */
- inline Block<DataType,1,Dynamic> diagonal(int i)
+ inline Block<DataType,1,Dynamic> diagonal(Index i)
{
ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
- return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i));
+ return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
}
/** \returns a vector expression of the \a i -th sub or super diagonal */
- inline const Block<DataType,1,Dynamic> diagonal(int i) const
+ inline const Block<DataType,1,Dynamic> diagonal(Index i) const
{
ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
- return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i));
+ return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
}
template<typename Dest> inline void evalTo(Dest& dst) const
@@ -177,9 +179,9 @@
dst.resize(rows(),cols());
dst.setZero();
dst.diagonal() = diagonal();
- for (int i=1; i<=supers();++i)
+ for (Index i=1; i<=supers();++i)
dst.diagonal(i) = diagonal(i);
- for (int i=1; i<=subs();++i)
+ for (Index i=1; i<=subs();++i)
dst.diagonal(-i) = diagonal(-i);
}
@@ -192,13 +194,13 @@
protected:
- inline int diagonalLength(int i) const
+ inline Index diagonalLength(Index i) const
{ return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); }
DataType m_data;
- ei_int_if_dynamic<Rows> m_rows;
- ei_int_if_dynamic<Supers> m_supers;
- ei_int_if_dynamic<Subs> m_subs;
+ ei_variable_if_dynamic<Index, Rows> m_rows;
+ ei_variable_if_dynamic<Index, Supers> m_supers;
+ ei_variable_if_dynamic<Index, Subs> m_subs;
};
/** \nonstableyet
@@ -216,8 +218,9 @@
class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
{
typedef BandMatrix<Scalar,Size,Size,1,Options&SelfAdjoint?0:1,Options|RowMajor> Base;
+ typedef typename Base::Index Index;
public:
- TridiagonalMatrix(int size = Size) : Base(size,size,1,1) {}
+ TridiagonalMatrix(Index size = Size) : Base(size,size,1,1) {}
inline typename Base::template DiagonalIntReturnType<1>::Type super()
{ return Base::template diagonal<1>(); }
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index 79c9dd4..bb1b8a6 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -36,7 +36,7 @@
* \param _DirectAccessStatus \internal used for partial specialization
*
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
- * type of DenseBase::block(int,int,int,int) and DenseBase::block<int,int>(int,int) and
+ * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly maniputate block expressions,
@@ -55,7 +55,7 @@
* \include class_FixedBlock.cpp
* Output: \verbinclude class_FixedBlock.out
*
- * \sa DenseBase::block(int,int,int,int), DenseBase::block(int,int), class VectorBlock
+ * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
*/
template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess>
struct ei_traits<Block<XprType, BlockRows, BlockCols, HasDirectAccess> > : ei_traits<XprType>
@@ -110,7 +110,7 @@
/** Column or Row constructor
*/
- inline Block(const XprType& xpr, int i)
+ inline Block(const XprType& xpr, Index i)
: m_xpr(xpr),
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
@@ -128,7 +128,7 @@
/** Fixed-size constructor
*/
- inline Block(const XprType& xpr, int startRow, int startCol)
+ inline Block(const XprType& xpr, Index startRow, Index startCol)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(BlockRows), m_blockCols(BlockCols)
{
@@ -140,8 +140,8 @@
/** Dynamic-size constructor
*/
inline Block(const XprType& xpr,
- int startRow, int startCol,
- int blockRows, int blockCols)
+ Index startRow, Index startCol,
+ Index blockRows, Index blockCols)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(blockRows), m_blockCols(blockCols)
{
@@ -153,28 +153,28 @@
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
- inline int rows() const { return m_blockRows.value(); }
- inline int cols() const { return m_blockCols.value(); }
+ inline Index rows() const { return m_blockRows.value(); }
+ inline Index cols() const { return m_blockCols.value(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_xpr.const_cast_derived()
.coeffRef(row + m_startRow.value(), col + m_startCol.value());
}
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_xpr.const_cast_derived()
.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_xpr
.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@@ -182,21 +182,21 @@
}
template<int LoadMode>
- inline PacketScalar packet(int row, int col) const
+ inline PacketScalar packet(Index row, Index col) const
{
return m_xpr.template packet<Unaligned>
(row + m_startRow.value(), col + m_startCol.value());
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_xpr.const_cast_derived().template writePacket<Unaligned>
(row + m_startRow.value(), col + m_startCol.value(), x);
}
template<int LoadMode>
- inline PacketScalar packet(int index) const
+ inline PacketScalar packet(Index index) const
{
return m_xpr.template packet<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@@ -204,7 +204,7 @@
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_xpr.const_cast_derived().template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@@ -214,17 +214,17 @@
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \sa MapBase::data() */
inline const Scalar* data() const;
- inline int innerStride() const;
- inline int outerStride() const;
+ inline Index innerStride() const;
+ inline Index outerStride() const;
#endif
protected:
const typename XprType::Nested m_xpr;
- const ei_int_if_dynamic<XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
- const ei_int_if_dynamic<XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
- const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
+ const ei_variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+ const ei_variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+ const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+ const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
};
/** \internal */
@@ -241,7 +241,7 @@
/** Column or Row constructor
*/
- inline Block(const XprType& xpr, int i)
+ inline Block(const XprType& xpr, Index i)
: Base(&xpr.const_cast_derived().coeffRef(
(BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
(BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
@@ -257,7 +257,7 @@
/** Fixed-size constructor
*/
- inline Block(const XprType& xpr, int startRow, int startCol)
+ inline Block(const XprType& xpr, Index startRow, Index startCol)
: Base(&xpr.const_cast_derived().coeffRef(startRow,startCol)), m_xpr(xpr)
{
ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
@@ -268,8 +268,8 @@
/** Dynamic-size constructor
*/
inline Block(const XprType& xpr,
- int startRow, int startCol,
- int blockRows, int blockCols)
+ Index startRow, Index startCol,
+ Index blockRows, Index blockCols)
: Base(&xpr.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols),
m_xpr(xpr)
{
@@ -281,7 +281,7 @@
}
/** \sa MapBase::innerStride() */
- inline int innerStride() const
+ inline Index innerStride() const
{
return ei_traits<Block>::HasSameStorageOrderAsXprType
? m_xpr.innerStride()
@@ -289,7 +289,7 @@
}
/** \sa MapBase::outerStride() */
- inline int outerStride() const
+ inline Index outerStride() const
{
return m_outerStride;
}
@@ -302,7 +302,7 @@
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */
- inline Block(const XprType& xpr, const Scalar* data, int blockRows, int blockCols)
+ inline Block(const XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr)
{
init();
@@ -335,19 +335,19 @@
* when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
- * \sa class Block, block(int,int)
+ * \sa class Block, block(Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::block(int startRow, int startCol, int blockRows, int blockCols)
+ ::block(Index startRow, Index startCol, Index blockRows, Index blockCols)
{
return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
}
-/** This is the const version of block(int,int,int,int). */
+/** This is the const version of block(Index,Index,Index,Index). */
template<typename Derived>
inline const Block<Derived> DenseBase<Derived>
- ::block(int startRow, int startCol, int blockRows, int blockCols) const
+ ::block(Index startRow, Index startCol, Index blockRows, Index blockCols) const
{
return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
}
@@ -363,19 +363,19 @@
* Example: \include MatrixBase_topRightCorner_int_int.cpp
* Output: \verbinclude MatrixBase_topRightCorner_int_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::topRightCorner(int cRows, int cCols)
+ ::topRightCorner(Index cRows, Index cCols)
{
return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
}
-/** This is the const version of topRightCorner(int, int).*/
+/** This is the const version of topRightCorner(Index, Index).*/
template<typename Derived>
inline const Block<Derived>
-DenseBase<Derived>::topRightCorner(int cRows, int cCols) const
+DenseBase<Derived>::topRightCorner(Index cRows, Index cCols) const
{
return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
}
@@ -387,7 +387,7 @@
* Example: \include MatrixBase_template_int_int_topRightCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int CRows, int CCols>
@@ -417,19 +417,19 @@
* Example: \include MatrixBase_topLeftCorner_int_int.cpp
* Output: \verbinclude MatrixBase_topLeftCorner_int_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::topLeftCorner(int cRows, int cCols)
+ ::topLeftCorner(Index cRows, Index cCols)
{
return Block<Derived>(derived(), 0, 0, cRows, cCols);
}
-/** This is the const version of topLeftCorner(int, int).*/
+/** This is the const version of topLeftCorner(Index, Index).*/
template<typename Derived>
inline const Block<Derived>
-DenseBase<Derived>::topLeftCorner(int cRows, int cCols) const
+DenseBase<Derived>::topLeftCorner(Index cRows, Index cCols) const
{
return Block<Derived>(derived(), 0, 0, cRows, cCols);
}
@@ -441,7 +441,7 @@
* Example: \include MatrixBase_template_int_int_topLeftCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int CRows, int CCols>
@@ -473,19 +473,19 @@
* Example: \include MatrixBase_bottomRightCorner_int_int.cpp
* Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::bottomRightCorner(int cRows, int cCols)
+ ::bottomRightCorner(Index cRows, Index cCols)
{
return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
-/** This is the const version of bottomRightCorner(int, int).*/
+/** This is the const version of bottomRightCorner(Index, Index).*/
template<typename Derived>
inline const Block<Derived>
-DenseBase<Derived>::bottomRightCorner(int cRows, int cCols) const
+DenseBase<Derived>::bottomRightCorner(Index cRows, Index cCols) const
{
return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
@@ -497,7 +497,7 @@
* Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int CRows, int CCols>
@@ -527,19 +527,19 @@
* Example: \include MatrixBase_bottomLeftCorner_int_int.cpp
* Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::bottomLeftCorner(int cRows, int cCols)
+ ::bottomLeftCorner(Index cRows, Index cCols)
{
return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
}
-/** This is the const version of bottomLeftCorner(int, int).*/
+/** This is the const version of bottomLeftCorner(Index, Index).*/
template<typename Derived>
inline const Block<Derived>
-DenseBase<Derived>::bottomLeftCorner(int cRows, int cCols) const
+DenseBase<Derived>::bottomLeftCorner(Index cRows, Index cCols) const
{
return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
}
@@ -551,7 +551,7 @@
* Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int CRows, int CCols>
@@ -579,19 +579,19 @@
* Example: \include MatrixBase_topRows_int.cpp
* Output: \verbinclude MatrixBase_topRows_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived>
- ::topRows(int n)
+ ::topRows(Index n)
{
return RowsBlockXpr(derived(), 0, 0, n, cols());
}
-/** This is the const version of topRows(int).*/
+/** This is the const version of topRows(Index).*/
template<typename Derived>
inline const typename DenseBase<Derived>::RowsBlockXpr
-DenseBase<Derived>::topRows(int n) const
+DenseBase<Derived>::topRows(Index n) const
{
return RowsBlockXpr(derived(), 0, 0, n, cols());
}
@@ -603,7 +603,7 @@
* Example: \include MatrixBase_template_int_topRows.cpp
* Output: \verbinclude MatrixBase_template_int_topRows.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int N>
@@ -633,19 +633,19 @@
* Example: \include MatrixBase_bottomRows_int.cpp
* Output: \verbinclude MatrixBase_bottomRows_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived>
- ::bottomRows(int n)
+ ::bottomRows(Index n)
{
return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
}
-/** This is the const version of bottomRows(int).*/
+/** This is the const version of bottomRows(Index).*/
template<typename Derived>
inline const typename DenseBase<Derived>::RowsBlockXpr
-DenseBase<Derived>::bottomRows(int n) const
+DenseBase<Derived>::bottomRows(Index n) const
{
return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
}
@@ -657,7 +657,7 @@
* Example: \include MatrixBase_template_int_bottomRows.cpp
* Output: \verbinclude MatrixBase_template_int_bottomRows.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int N>
@@ -687,19 +687,19 @@
* Example: \include MatrixBase_leftCols_int.cpp
* Output: \verbinclude MatrixBase_leftCols_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived>
- ::leftCols(int n)
+ ::leftCols(Index n)
{
return ColsBlockXpr(derived(), 0, 0, rows(), n);
}
-/** This is the const version of leftCols(int).*/
+/** This is the const version of leftCols(Index).*/
template<typename Derived>
inline const typename DenseBase<Derived>::ColsBlockXpr
-DenseBase<Derived>::leftCols(int n) const
+DenseBase<Derived>::leftCols(Index n) const
{
return ColsBlockXpr(derived(), 0, 0, rows(), n);
}
@@ -711,7 +711,7 @@
* Example: \include MatrixBase_template_int_leftCols.cpp
* Output: \verbinclude MatrixBase_template_int_leftCols.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int N>
@@ -741,19 +741,19 @@
* Example: \include MatrixBase_rightCols_int.cpp
* Output: \verbinclude MatrixBase_rightCols_int.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived>
- ::rightCols(int n)
+ ::rightCols(Index n)
{
return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
}
-/** This is the const version of rightCols(int).*/
+/** This is the const version of rightCols(Index).*/
template<typename Derived>
inline const typename DenseBase<Derived>::ColsBlockXpr
-DenseBase<Derived>::rightCols(int n) const
+DenseBase<Derived>::rightCols(Index n) const
{
return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
}
@@ -765,7 +765,7 @@
* Example: \include MatrixBase_template_int_rightCols.cpp
* Output: \verbinclude MatrixBase_template_int_rightCols.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int N>
@@ -802,21 +802,21 @@
* \note since block is a templated member, the keyword template has to be used
* if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int BlockRows, int BlockCols>
inline Block<Derived, BlockRows, BlockCols>
-DenseBase<Derived>::block(int startRow, int startCol)
+DenseBase<Derived>::block(Index startRow, Index startCol)
{
return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
}
-/** This is the const version of block<>(int, int). */
+/** This is the const version of block<>(Index, Index). */
template<typename Derived>
template<int BlockRows, int BlockCols>
inline const Block<Derived, BlockRows, BlockCols>
-DenseBase<Derived>::block(int startRow, int startCol) const
+DenseBase<Derived>::block(Index startRow, Index startCol) const
{
return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
}
@@ -829,7 +829,7 @@
* \sa row(), class Block */
template<typename Derived>
inline typename DenseBase<Derived>::ColXpr
-DenseBase<Derived>::col(int i)
+DenseBase<Derived>::col(Index i)
{
return ColXpr(derived(), i);
}
@@ -837,7 +837,7 @@
/** This is the const version of col(). */
template<typename Derived>
inline const typename DenseBase<Derived>::ColXpr
-DenseBase<Derived>::col(int i) const
+DenseBase<Derived>::col(Index i) const
{
return ColXpr(derived(), i);
}
@@ -850,7 +850,7 @@
* \sa col(), class Block */
template<typename Derived>
inline typename DenseBase<Derived>::RowXpr
-DenseBase<Derived>::row(int i)
+DenseBase<Derived>::row(Index i)
{
return RowXpr(derived(), i);
}
@@ -858,7 +858,7 @@
/** This is the const version of row(). */
template<typename Derived>
inline const typename DenseBase<Derived>::RowXpr
-DenseBase<Derived>::row(int i) const
+DenseBase<Derived>::row(Index i) const
{
return RowXpr(derived(), i);
}
diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h
index adfca4f..311c903 100644
--- a/Eigen/src/Core/CommaInitializer.h
+++ b/Eigen/src/Core/CommaInitializer.h
@@ -39,7 +39,9 @@
template<typename XprType>
struct CommaInitializer
{
- typedef typename ei_traits<XprType>::Scalar Scalar;
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::Index Index;
+
inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
@@ -113,9 +115,9 @@
inline XprType& finished() { return m_xpr; }
XprType& m_xpr; // target expression
- int m_row; // current row id
- int m_col; // current col id
- int m_currentBlockRows; // current block height
+ Index m_row; // current row id
+ Index m_col; // current col id
+ Index m_currentBlockRows; // current block height
};
/** \anchor MatrixBaseCommaInitRef
diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h
index e0617e3..5307775 100644
--- a/Eigen/src/Core/CwiseBinaryOp.h
+++ b/Eigen/src/Core/CwiseBinaryOp.h
@@ -123,14 +123,14 @@
ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
}
- EIGEN_STRONG_INLINE int rows() const {
+ EIGEN_STRONG_INLINE Index rows() const {
// return the fixed size type if available to enable compile time optimizations
if (ei_traits<typename ei_cleantype<LhsNested>::type>::RowsAtCompileTime==Dynamic)
return m_rhs.rows();
else
return m_lhs.rows();
}
- EIGEN_STRONG_INLINE int cols() const {
+ EIGEN_STRONG_INLINE Index cols() const {
// return the fixed size type if available to enable compile time optimizations
if (ei_traits<typename ei_cleantype<LhsNested>::type>::ColsAtCompileTime==Dynamic)
return m_rhs.cols();
@@ -161,27 +161,27 @@
typedef typename ei_dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return derived().functor()(derived().lhs().coeff(row, col),
derived().rhs().coeff(row, col));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
derived().rhs().template packet<LoadMode>(row, col));
}
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return derived().functor()(derived().lhs().coeff(index),
derived().rhs().coeff(index));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
derived().rhs().template packet<LoadMode>(index));
diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h
index bb44703..af16432 100644
--- a/Eigen/src/Core/CwiseNullaryOp.h
+++ b/Eigen/src/Core/CwiseNullaryOp.h
@@ -63,7 +63,7 @@
typedef typename ei_dense_xpr_base<CwiseNullaryOp>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
- CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp())
+ CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
: m_rows(rows), m_cols(cols), m_functor(func)
{
ei_assert(rows >= 0
@@ -72,34 +72,34 @@
&& (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
}
- EIGEN_STRONG_INLINE int rows() const { return m_rows.value(); }
- EIGEN_STRONG_INLINE int cols() const { return m_cols.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
- EIGEN_STRONG_INLINE const Scalar coeff(int rows, int cols) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
{
return m_functor(rows, cols);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return m_functor.packetOp(row, col);
}
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return m_functor(index);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return m_functor.packetOp(index);
}
protected:
- const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
+ const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+ const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
const NullaryOp m_functor;
};
@@ -120,7 +120,7 @@
template<typename Derived>
template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-DenseBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func)
+DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
}
@@ -143,7 +143,7 @@
template<typename Derived>
template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-DenseBase<Derived>::NullaryExpr(int size, const CustomNullaryOp& func)
+DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
@@ -182,7 +182,7 @@
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(int rows, int cols, const Scalar& value)
+DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
{
return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_constant_op<Scalar>(value));
}
@@ -204,7 +204,7 @@
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(int size, const Scalar& value)
+DenseBase<Derived>::Constant(Index size, const Scalar& value)
{
return DenseBase<Derived>::NullaryExpr(size, ei_scalar_constant_op<Scalar>(value));
}
@@ -239,11 +239,11 @@
* Example: \include DenseBase_LinSpaced_seq.cpp
* Output: \verbinclude DenseBase_LinSpaced_seq.out
*
- * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Scalar,Scalar,int), CwiseNullaryOp
+ * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Scalar,Scalar,Index), CwiseNullaryOp
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size)
+DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
@@ -259,11 +259,11 @@
* Example: \include DenseBase_LinSpaced.cpp
* Output: \verbinclude DenseBase_LinSpaced.out
*
- * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Sequential_t,const Scalar&,const Scalar&,int), CwiseNullaryOp
+ * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Sequential_t,const Scalar&,const Scalar&,Index), CwiseNullaryOp
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
-DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high, int size)
+DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high, Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,true>(low,high,size));
@@ -274,8 +274,8 @@
bool DenseBase<Derived>::isApproxToConstant
(const Scalar& value, RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if(!ei_isApprox(this->coeff(i, j), value, prec))
return false;
return true;
@@ -303,7 +303,7 @@
/** Sets all coefficients in this expression to \a value.
*
- * \sa fill(), setConstant(int,const Scalar&), setConstant(int,int,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
+ * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value)
@@ -318,11 +318,11 @@
* Example: \include Matrix_setConstant_int.cpp
* Output: \verbinclude Matrix_setConstant_int.out
*
- * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setConstant(int size, const Scalar& value)
+DenseStorageBase<Derived>::setConstant(Index size, const Scalar& value)
{
resize(size);
return setConstant(value);
@@ -336,11 +336,11 @@
* Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out
*
- * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setConstant(int rows, int cols, const Scalar& value)
+DenseStorageBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value)
{
resize(rows, cols);
return setConstant(value);
@@ -359,7 +359,7 @@
* \sa CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high, int size)
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high, Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return derived() = Derived::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
@@ -379,11 +379,11 @@
* Example: \include MatrixBase_zero_int_int.cpp
* Output: \verbinclude MatrixBase_zero_int_int.out
*
- * \sa Zero(), Zero(int)
+ * \sa Zero(), Zero(Index)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero(int rows, int cols)
+DenseBase<Derived>::Zero(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(0));
}
@@ -402,11 +402,11 @@
* Example: \include MatrixBase_zero_int.cpp
* Output: \verbinclude MatrixBase_zero_int.out
*
- * \sa Zero(), Zero(int,int)
+ * \sa Zero(), Zero(Index,Index)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero(int size)
+DenseBase<Derived>::Zero(Index size)
{
return Constant(size, Scalar(0));
}
@@ -419,7 +419,7 @@
* Example: \include MatrixBase_zero.cpp
* Output: \verbinclude MatrixBase_zero.out
*
- * \sa Zero(int), Zero(int,int)
+ * \sa Zero(Index), Zero(Index,Index)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
@@ -439,8 +439,8 @@
template<typename Derived>
bool DenseBase<Derived>::isZero(RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
return false;
return true;
@@ -466,11 +466,11 @@
* Example: \include Matrix_setZero_int.cpp
* Output: \verbinclude Matrix_setZero_int.out
*
- * \sa DenseBase::setZero(), setZero(int,int), class CwiseNullaryOp, DenseBase::Zero()
+ * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setZero(int size)
+DenseStorageBase<Derived>::setZero(Index size)
{
resize(size);
return setConstant(Scalar(0));
@@ -484,11 +484,11 @@
* Example: \include Matrix_setZero_int_int.cpp
* Output: \verbinclude Matrix_setZero_int_int.out
*
- * \sa DenseBase::setZero(), setZero(int), class CwiseNullaryOp, DenseBase::Zero()
+ * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setZero(int rows, int cols)
+DenseStorageBase<Derived>::setZero(Index rows, Index cols)
{
resize(rows, cols);
return setConstant(Scalar(0));
@@ -508,11 +508,11 @@
* Example: \include MatrixBase_ones_int_int.cpp
* Output: \verbinclude MatrixBase_ones_int_int.out
*
- * \sa Ones(), Ones(int), isOnes(), class Ones
+ * \sa Ones(), Ones(Index), isOnes(), class Ones
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(int rows, int cols)
+DenseBase<Derived>::Ones(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(1));
}
@@ -531,11 +531,11 @@
* Example: \include MatrixBase_ones_int.cpp
* Output: \verbinclude MatrixBase_ones_int.out
*
- * \sa Ones(), Ones(int,int), isOnes(), class Ones
+ * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(int size)
+DenseBase<Derived>::Ones(Index size)
{
return Constant(size, Scalar(1));
}
@@ -548,7 +548,7 @@
* Example: \include MatrixBase_ones.cpp
* Output: \verbinclude MatrixBase_ones.out
*
- * \sa Ones(int), Ones(int,int), isOnes(), class Ones
+ * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
@@ -592,11 +592,11 @@
* Example: \include Matrix_setOnes_int.cpp
* Output: \verbinclude Matrix_setOnes_int.out
*
- * \sa MatrixBase::setOnes(), setOnes(int,int), class CwiseNullaryOp, MatrixBase::Ones()
+ * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setOnes(int size)
+DenseStorageBase<Derived>::setOnes(Index size)
{
resize(size);
return setConstant(Scalar(1));
@@ -610,11 +610,11 @@
* Example: \include Matrix_setOnes_int_int.cpp
* Output: \verbinclude Matrix_setOnes_int_int.out
*
- * \sa MatrixBase::setOnes(), setOnes(int), class CwiseNullaryOp, MatrixBase::Ones()
+ * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-DenseStorageBase<Derived>::setOnes(int rows, int cols)
+DenseStorageBase<Derived>::setOnes(Index rows, Index cols)
{
resize(rows, cols);
return setConstant(Scalar(1));
@@ -638,7 +638,7 @@
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
-MatrixBase<Derived>::Identity(int rows, int cols)
+MatrixBase<Derived>::Identity(Index rows, Index cols)
{
return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_identity_op<Scalar>());
}
@@ -651,7 +651,7 @@
* Example: \include MatrixBase_identity.cpp
* Output: \verbinclude MatrixBase_identity.out
*
- * \sa Identity(int,int), setIdentity(), isIdentity()
+ * \sa Identity(Index,Index), setIdentity(), isIdentity()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
@@ -668,15 +668,15 @@
* Example: \include MatrixBase_isIdentity.cpp
* Output: \verbinclude MatrixBase_isIdentity.out
*
- * \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity()
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
*/
template<typename Derived>
bool MatrixBase<Derived>::isIdentity
(RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
+ for(Index j = 0; j < cols(); ++j)
{
- for(int i = 0; i < rows(); ++i)
+ for(Index i = 0; i < rows(); ++i)
{
if(i == j)
{
@@ -705,11 +705,12 @@
template<typename Derived>
struct ei_setIdentity_impl<Derived, true>
{
+ typedef typename Derived::Index Index;
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{
m.setZero();
- const int size = std::min(m.rows(), m.cols());
- for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
+ const Index size = std::min(m.rows(), m.cols());
+ for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
return m;
}
};
@@ -719,7 +720,7 @@
* Example: \include MatrixBase_setIdentity.cpp
* Output: \verbinclude MatrixBase_setIdentity.out
*
- * \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity()
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
@@ -738,7 +739,7 @@
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(int rows, int cols)
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
{
derived().resize(rows, cols);
return setIdentity();
@@ -748,10 +749,10 @@
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int size, int i)
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(size,size), i);
@@ -763,10 +764,10 @@
*
* This variant is for fixed-size vector only.
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int i)
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(),i);
@@ -776,7 +777,7 @@
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
@@ -786,7 +787,7 @@
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
@@ -796,7 +797,7 @@
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
@@ -806,7 +807,7 @@
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h
index 8f95b69..da398d1 100644
--- a/Eigen/src/Core/CwiseUnaryOp.h
+++ b/Eigen/src/Core/CwiseUnaryOp.h
@@ -76,8 +76,8 @@
inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
: m_xpr(xpr), m_functor(func) {}
- EIGEN_STRONG_INLINE int rows() const { return m_xpr.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_xpr.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
/** \returns the functor representing the unary operation */
const UnaryOp& functor() const { return m_functor; }
@@ -100,32 +100,31 @@
template<typename UnaryOp, typename XprType>
class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
: public ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
- {
- typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
-
+{
public:
+ typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
typedef typename ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return derived().functor()(derived().nestedExpression().coeff(row, col));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
}
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
return derived().functor()(derived().nestedExpression().coeff(index));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
}
diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h
index 9cdd034..11a23c6 100644
--- a/Eigen/src/Core/CwiseUnaryView.h
+++ b/Eigen/src/Core/CwiseUnaryView.h
@@ -74,8 +74,8 @@
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
- EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
/** \returns the functor representing unary operation */
const ViewOp& functor() const { return m_functor; }
@@ -98,40 +98,39 @@
class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
: public ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type
{
- typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
-
public:
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
typedef typename ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
- inline int innerStride() const
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+ inline Index innerStride() const
{
return derived().nestedExpression().innerStride() * sizeof(typename ei_traits<MatrixType>::Scalar) / sizeof(Scalar);
}
- inline int outerStride() const
+ inline Index outerStride() const
{
return derived().nestedExpression().outerStride();
}
- EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
-
- EIGEN_STRONG_INLINE CoeffReturnType coeff(int row, int col) const
+ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
{
return derived().functor()(derived().nestedExpression().coeff(row, col));
}
- EIGEN_STRONG_INLINE CoeffReturnType coeff(int index) const
+ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return derived().functor()(derived().nestedExpression().coeff(index));
}
- EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
}
- EIGEN_STRONG_INLINE Scalar& coeffRef(int index)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
}
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index e2429be..c4b4057 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -50,8 +50,12 @@
class InnerIterator;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
typedef DenseCoeffsBase<Derived> Base;
using Base::derived;
using Base::const_cast_derived;
@@ -168,19 +172,9 @@
OuterStrideAtCompileTime = ei_outer_stride_at_compile_time<Derived>::ret
};
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is the "real scalar" type; if the \a Scalar type is already real numbers
- * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
- * \a Scalar is \a std::complex<T> then RealScalar is \a T.
- *
- * \sa class NumTraits
- */
- typedef typename NumTraits<Scalar>::Real RealScalar;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
- inline int nonZeros() const { return size(); }
+ inline Index nonZeros() const { return size(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
@@ -191,7 +185,7 @@
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
* with respect to the storage order, i.e., the number of columns for a column-major matrix,
* and the number of rows for a row-major matrix. */
- int outerSize() const
+ Index outerSize() const
{
return IsVectorAtCompileTime ? 1
: int(IsRowMajor) ? this->rows() : this->cols();
@@ -202,7 +196,7 @@
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
* with respect to the storage order, i.e., the number of rows for a column-major matrix,
* and the number of columns for a row-major matrix. */
- int innerSize() const
+ Index innerSize() const
{
return IsVectorAtCompileTime ? this->size()
: int(IsRowMajor) ? this->cols() : this->rows();
@@ -212,7 +206,7 @@
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else.
*/
- void resize(int size)
+ void resize(Index size)
{
EIGEN_ONLY_USED_FOR_DEBUG(size);
ei_assert(size == this->size()
@@ -222,7 +216,7 @@
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else.
*/
- void resize(int rows, int cols)
+ void resize(Index rows, Index cols)
{
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
@@ -301,41 +295,41 @@
public:
#endif
- RowXpr row(int i);
- const RowXpr row(int i) const;
+ RowXpr row(Index i);
+ const RowXpr row(Index i) const;
- ColXpr col(int i);
- const ColXpr col(int i) const;
+ ColXpr col(Index i);
+ const ColXpr col(Index i) const;
- Block<Derived> block(int startRow, int startCol, int blockRows, int blockCols);
- const Block<Derived> block(int startRow, int startCol, int blockRows, int blockCols) const;
+ Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols);
+ const Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols) const;
- VectorBlock<Derived> segment(int start, int size);
- const VectorBlock<Derived> segment(int start, int size) const;
+ VectorBlock<Derived> segment(Index start, Index size);
+ const VectorBlock<Derived> segment(Index start, Index size) const;
- VectorBlock<Derived> head(int size);
- const VectorBlock<Derived> head(int size) const;
+ VectorBlock<Derived> head(Index size);
+ const VectorBlock<Derived> head(Index size) const;
- VectorBlock<Derived> tail(int size);
- const VectorBlock<Derived> tail(int size) const;
+ VectorBlock<Derived> tail(Index size);
+ const VectorBlock<Derived> tail(Index size) const;
- Block<Derived> topLeftCorner(int cRows, int cCols);
- const Block<Derived> topLeftCorner(int cRows, int cCols) const;
- Block<Derived> topRightCorner(int cRows, int cCols);
- const Block<Derived> topRightCorner(int cRows, int cCols) const;
- Block<Derived> bottomLeftCorner(int cRows, int cCols);
- const Block<Derived> bottomLeftCorner(int cRows, int cCols) const;
- Block<Derived> bottomRightCorner(int cRows, int cCols);
- const Block<Derived> bottomRightCorner(int cRows, int cCols) const;
+ Block<Derived> topLeftCorner(Index cRows, Index cCols);
+ const Block<Derived> topLeftCorner(Index cRows, Index cCols) const;
+ Block<Derived> topRightCorner(Index cRows, Index cCols);
+ const Block<Derived> topRightCorner(Index cRows, Index cCols) const;
+ Block<Derived> bottomLeftCorner(Index cRows, Index cCols);
+ const Block<Derived> bottomLeftCorner(Index cRows, Index cCols) const;
+ Block<Derived> bottomRightCorner(Index cRows, Index cCols);
+ const Block<Derived> bottomRightCorner(Index cRows, Index cCols) const;
- RowsBlockXpr topRows(int n);
- const RowsBlockXpr topRows(int n) const;
- RowsBlockXpr bottomRows(int n);
- const RowsBlockXpr bottomRows(int n) const;
- ColsBlockXpr leftCols(int n);
- const ColsBlockXpr leftCols(int n) const;
- ColsBlockXpr rightCols(int n);
- const ColsBlockXpr rightCols(int n) const;
+ RowsBlockXpr topRows(Index n);
+ const RowsBlockXpr topRows(Index n) const;
+ RowsBlockXpr bottomRows(Index n);
+ const RowsBlockXpr bottomRows(Index n) const;
+ ColsBlockXpr leftCols(Index n);
+ const ColsBlockXpr leftCols(Index n) const;
+ ColsBlockXpr rightCols(Index n);
+ const ColsBlockXpr rightCols(Index n) const;
template<int CRows, int CCols> Block<Derived, CRows, CCols> topLeftCorner();
template<int CRows, int CCols> const Block<Derived, CRows, CCols> topLeftCorner() const;
@@ -356,9 +350,9 @@
template<int NCols> const typename NColsBlockXpr<NCols>::Type rightCols() const;
template<int BlockRows, int BlockCols>
- Block<Derived, BlockRows, BlockCols> block(int startRow, int startCol);
+ Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol);
template<int BlockRows, int BlockCols>
- const Block<Derived, BlockRows, BlockCols> block(int startRow, int startCol) const;
+ const Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol) const;
template<int Size> VectorBlock<Derived,Size> head(void);
template<int Size> const VectorBlock<Derived,Size> head() const;
@@ -366,8 +360,8 @@
template<int Size> VectorBlock<Derived,Size> tail();
template<int Size> const VectorBlock<Derived,Size> tail() const;
- template<int Size> VectorBlock<Derived,Size> segment(int start);
- template<int Size> const VectorBlock<Derived,Size> segment(int start) const;
+ template<int Size> VectorBlock<Derived,Size> segment(Index start);
+ template<int Size> const VectorBlock<Derived,Size> segment(Index start) const;
Diagonal<Derived,0> diagonal();
const Diagonal<Derived,0> diagonal() const;
@@ -375,8 +369,8 @@
template<int Index> Diagonal<Derived,Index> diagonal();
template<int Index> const Diagonal<Derived,Index> diagonal() const;
- Diagonal<Derived, Dynamic> diagonal(int index);
- const Diagonal<Derived, Dynamic> diagonal(int index) const;
+ Diagonal<Derived, Dynamic> diagonal(Index index);
+ const Diagonal<Derived, Dynamic> diagonal(Index index) const;
template<unsigned int Mode> TriangularView<Derived, Mode> part();
template<unsigned int Mode> const TriangularView<Derived, Mode> part() const;
@@ -388,37 +382,37 @@
template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const;
static const ConstantReturnType
- Constant(int rows, int cols, const Scalar& value);
+ Constant(Index rows, Index cols, const Scalar& value);
static const ConstantReturnType
- Constant(int size, const Scalar& value);
+ Constant(Index size, const Scalar& value);
static const ConstantReturnType
Constant(const Scalar& value);
static const SequentialLinSpacedReturnType
- LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size);
+ LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size);
static const RandomAccessLinSpacedReturnType
- LinSpaced(const Scalar& low, const Scalar& high, int size);
+ LinSpaced(const Scalar& low, const Scalar& high, Index size);
template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived>
- NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
+ NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived>
- NullaryExpr(int size, const CustomNullaryOp& func);
+ NullaryExpr(Index size, const CustomNullaryOp& func);
template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived>
NullaryExpr(const CustomNullaryOp& func);
- static const ConstantReturnType Zero(int rows, int cols);
- static const ConstantReturnType Zero(int size);
+ static const ConstantReturnType Zero(Index rows, Index cols);
+ static const ConstantReturnType Zero(Index size);
static const ConstantReturnType Zero();
- static const ConstantReturnType Ones(int rows, int cols);
- static const ConstantReturnType Ones(int size);
+ static const ConstantReturnType Ones(Index rows, Index cols);
+ static const ConstantReturnType Ones(Index size);
static const ConstantReturnType Ones();
void fill(const Scalar& value);
Derived& setConstant(const Scalar& value);
- Derived& setLinSpaced(const Scalar& low, const Scalar& high, int size);
+ Derived& setLinSpaced(const Scalar& low, const Scalar& high, Index size);
Derived& setZero();
Derived& setOnes();
Derived& setRandom();
@@ -471,11 +465,11 @@
typename ei_traits<Derived>::Scalar minCoeff() const;
typename ei_traits<Derived>::Scalar maxCoeff() const;
- typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col) const;
- typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col) const;
+ typename ei_traits<Derived>::Scalar minCoeff(Index* row, Index* col) const;
+ typename ei_traits<Derived>::Scalar maxCoeff(Index* row, Index* col) const;
- typename ei_traits<Derived>::Scalar minCoeff(int* index) const;
- typename ei_traits<Derived>::Scalar maxCoeff(int* index) const;
+ typename ei_traits<Derived>::Scalar minCoeff(Index* index) const;
+ typename ei_traits<Derived>::Scalar maxCoeff(Index* index) const;
template<typename BinaryOp>
typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
@@ -490,15 +484,15 @@
bool all(void) const;
bool any(void) const;
- int count() const;
+ Index count() const;
const VectorwiseOp<Derived,Horizontal> rowwise() const;
VectorwiseOp<Derived,Horizontal> rowwise();
const VectorwiseOp<Derived,Vertical> colwise() const;
VectorwiseOp<Derived,Vertical> colwise();
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size);
+ static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
+ static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(Index size);
static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
template<typename ThenDerived,typename ElseDerived>
@@ -518,7 +512,7 @@
template<int RowFactor, int ColFactor>
const Replicate<Derived,RowFactor,ColFactor> replicate() const;
- const Replicate<Derived,Dynamic,Dynamic> replicate(int rowFacor,int colFactor) const;
+ const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
Eigen::Reverse<Derived, BothDirections> reverse();
const Eigen::Reverse<Derived, BothDirections> reverse() const;
@@ -526,8 +520,8 @@
#ifdef EIGEN2_SUPPORT
- Block<Derived> corner(CornerType type, int cRows, int cCols);
- const Block<Derived> corner(CornerType type, int cRows, int cCols) const;
+ Block<Derived> corner(CornerType type, Index cRows, Index cCols);
+ const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
template<int CRows, int CCols>
Block<Derived, CRows, CCols> corner(CornerType type);
template<int CRows, int CCols>
diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h
index ccf959b..7026bbe 100644
--- a/Eigen/src/Core/DenseCoeffsBase.h
+++ b/Eigen/src/Core/DenseCoeffsBase.h
@@ -29,7 +29,10 @@
class DenseCoeffsBase : public EigenBase<Derived>
{
public:
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename ei_meta_if<ei_has_direct_access<Derived>::ret, const Scalar&, Scalar>::ret CoeffReturnType;
typedef EigenBase<Derived> Base;
@@ -38,7 +41,7 @@
using Base::size;
using Base::derived;
- EIGEN_STRONG_INLINE int rowIndexByOuterInner(int outer, int inner) const
+ EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
{
return int(Derived::RowsAtCompileTime) == 1 ? 0
: int(Derived::ColsAtCompileTime) == 1 ? inner
@@ -46,7 +49,7 @@
: inner;
}
- EIGEN_STRONG_INLINE int colIndexByOuterInner(int outer, int inner) const
+ EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
{
return int(Derived::ColsAtCompileTime) == 1 ? 0
: int(Derived::RowsAtCompileTime) == 1 ? inner
@@ -55,27 +58,27 @@
}
/** Short version: don't use this function, use
- * \link operator()(int,int) const \endlink instead.
+ * \link operator()(Index,Index) const \endlink instead.
*
* Long version: this function is similar to
- * \link operator()(int,int) const \endlink, but without the assertion.
+ * \link operator()(Index,Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator()(int,int) const \endlink.
+ * function equivalent to \link operator()(Index,Index) const \endlink.
*
- * \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const
+ * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
*/
- EIGEN_STRONG_INLINE const CoeffReturnType coeff(int row, int col) const
+ EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
{
ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
return derived().coeff(row, col);
}
- EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(int outer, int inner) const
+ EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
{
return coeff(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner));
@@ -83,9 +86,9 @@
/** \returns the coefficient at given the given row and column.
*
- * \sa operator()(int,int), operator[](int)
+ * \sa operator()(Index,Index), operator[](Index)
*/
- EIGEN_STRONG_INLINE const CoeffReturnType operator()(int row, int col) const
+ EIGEN_STRONG_INLINE const CoeffReturnType operator()(Index row, Index col) const
{
ei_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -93,22 +96,22 @@
}
/** Short version: don't use this function, use
- * \link operator[](int) const \endlink instead.
+ * \link operator[](Index) const \endlink instead.
*
* Long version: this function is similar to
- * \link operator[](int) const \endlink, but without the assertion.
+ * \link operator[](Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameter \a index is in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator[](int) const \endlink.
+ * function equivalent to \link operator[](Index) const \endlink.
*
- * \sa operator[](int) const, coeffRef(int), coeff(int,int) const
+ * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
*/
EIGEN_STRONG_INLINE const CoeffReturnType
- coeff(int index) const
+ coeff(Index index) const
{
ei_internal_assert(index >= 0 && index < size());
return derived().coeff(index);
@@ -119,12 +122,12 @@
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
- * \sa operator[](int), operator()(int,int) const, x() const, y() const,
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const
*/
EIGEN_STRONG_INLINE const CoeffReturnType
- operator[](int index) const
+ operator[](Index index) const
{
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
@@ -134,16 +137,16 @@
/** \returns the coefficient at given index.
*
- * This is synonymous to operator[](int) const.
+ * This is synonymous to operator[](Index) const.
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
- * \sa operator[](int), operator()(int,int) const, x() const, y() const,
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const
*/
EIGEN_STRONG_INLINE const CoeffReturnType
- operator()(int index) const
+ operator()(Index index) const
{
ei_assert(index >= 0 && index < size());
return derived().coeff(index);
@@ -180,17 +183,17 @@
template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
- packet(int row, int col) const
+ packet(Index row, Index col) const
{
ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
+ && col >= 0 && col < cols());
return derived().template packet<LoadMode>(row,col);
}
template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
- packetByOuterInner(int outer, int inner) const
+ packetByOuterInner(Index outer, Index inner) const
{
return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner));
@@ -207,7 +210,7 @@
template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
- packet(int index) const
+ packet(Index index) const
{
ei_internal_assert(index >= 0 && index < size());
return derived().template packet<LoadMode>(index);
@@ -240,8 +243,14 @@
public:
typedef DenseCoeffsBase<Derived, false> Base;
+
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
- using typename Base::CoeffReturnType;
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
using Base::coeff;
using Base::rows;
using Base::cols;
@@ -257,20 +266,20 @@
using Base::w;
/** Short version: don't use this function, use
- * \link operator()(int,int) \endlink instead.
+ * \link operator()(Index,Index) \endlink instead.
*
* Long version: this function is similar to
- * \link operator()(int,int) \endlink, but without the assertion.
+ * \link operator()(Index,Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator()(int,int) \endlink.
+ * function equivalent to \link operator()(Index,Index) \endlink.
*
- * \sa operator()(int,int), coeff(int, int) const, coeffRef(int)
+ * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
*/
- EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{
ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -278,7 +287,7 @@
}
EIGEN_STRONG_INLINE Scalar&
- coeffRefByOuterInner(int outer, int inner)
+ coeffRefByOuterInner(Index outer, Index inner)
{
return coeffRef(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner));
@@ -286,11 +295,11 @@
/** \returns a reference to the coefficient at given the given row and column.
*
- * \sa operator[](int)
+ * \sa operator[](Index)
*/
EIGEN_STRONG_INLINE Scalar&
- operator()(int row, int col)
+ operator()(Index row, Index col)
{
ei_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -299,22 +308,22 @@
/** Short version: don't use this function, use
- * \link operator[](int) \endlink instead.
+ * \link operator[](Index) \endlink instead.
*
* Long version: this function is similar to
- * \link operator[](int) \endlink, but without the assertion.
+ * \link operator[](Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range.
*
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator[](int) \endlink.
+ * function equivalent to \link operator[](Index) \endlink.
*
- * \sa operator[](int), coeff(int) const, coeffRef(int,int)
+ * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
*/
EIGEN_STRONG_INLINE Scalar&
- coeffRef(int index)
+ coeffRef(Index index)
{
ei_internal_assert(index >= 0 && index < size());
return derived().coeffRef(index);
@@ -324,11 +333,11 @@
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
- * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/
EIGEN_STRONG_INLINE Scalar&
- operator[](int index)
+ operator[](Index index)
{
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
@@ -338,15 +347,15 @@
/** \returns a reference to the coefficient at given index.
*
- * This is synonymous to operator[](int).
+ * This is synonymous to operator[](Index).
*
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
*
- * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/
EIGEN_STRONG_INLINE Scalar&
- operator()(int index)
+ operator()(Index index)
{
ei_assert(index >= 0 && index < size());
return derived().coeffRef(index);
@@ -383,7 +392,7 @@
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
- (int row, int col, const typename ei_packet_traits<Scalar>::type& x)
+ (Index row, Index col, const typename ei_packet_traits<Scalar>::type& x)
{
ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -393,7 +402,7 @@
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacketByOuterInner
- (int outer, int inner, const typename ei_packet_traits<Scalar>::type& x)
+ (Index outer, Index inner, const typename ei_packet_traits<Scalar>::type& x)
{
writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner),
@@ -411,7 +420,7 @@
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
- (int index, const typename ei_packet_traits<Scalar>::type& x)
+ (Index index, const typename ei_packet_traits<Scalar>::type& x)
{
ei_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,x);
@@ -428,7 +437,7 @@
*/
template<typename OtherDerived>
- EIGEN_STRONG_INLINE void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -444,7 +453,7 @@
*/
template<typename OtherDerived>
- EIGEN_STRONG_INLINE void copyCoeff(int index, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
ei_internal_assert(index >= 0 && index < size());
derived().coeffRef(index) = other.derived().coeff(index);
@@ -452,10 +461,10 @@
template<typename OtherDerived>
- EIGEN_STRONG_INLINE void copyCoeffByOuterInner(int outer, int inner, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
{
- const int row = rowIndexByOuterInner(outer,inner);
- const int col = colIndexByOuterInner(outer,inner);
+ const Index row = rowIndexByOuterInner(outer,inner);
+ const Index col = colIndexByOuterInner(outer,inner);
// derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().copyCoeff(row, col, other);
}
@@ -469,7 +478,7 @@
*/
template<typename OtherDerived, int StoreMode, int LoadMode>
- EIGEN_STRONG_INLINE void copyPacket(int row, int col, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
@@ -486,7 +495,7 @@
*/
template<typename OtherDerived, int StoreMode, int LoadMode>
- EIGEN_STRONG_INLINE void copyPacket(int index, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
ei_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,
@@ -494,10 +503,10 @@
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- EIGEN_STRONG_INLINE void copyPacketByOuterInner(int outer, int inner, const DenseBase<OtherDerived>& other)
+ EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
{
- const int row = rowIndexByOuterInner(outer,inner);
- const int col = colIndexByOuterInner(outer,inner);
+ const Index row = rowIndexByOuterInner(outer,inner);
+ const Index col = colIndexByOuterInner(outer,inner);
// derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
}
@@ -507,7 +516,7 @@
*
* \sa outerStride(), rowStride(), colStride()
*/
- inline int innerStride() const
+ inline Index innerStride() const
{
return derived().innerStride();
}
@@ -517,12 +526,12 @@
*
* \sa innerStride(), rowStride(), colStride()
*/
- inline int outerStride() const
+ inline Index outerStride() const
{
return derived().outerStride();
}
- inline int stride() const
+ inline Index stride() const
{
return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
}
@@ -531,7 +540,7 @@
*
* \sa innerStride(), outerStride(), colStride()
*/
- inline int rowStride() const
+ inline Index rowStride() const
{
return Derived::IsRowMajor ? outerStride() : innerStride();
}
@@ -540,7 +549,7 @@
*
* \sa innerStride(), outerStride(), rowStride()
*/
- inline int colStride() const
+ inline Index colStride() const
{
return Derived::IsRowMajor ? innerStride() : outerStride();
}
@@ -549,14 +558,14 @@
template<typename Derived, bool JustReturnZero>
struct ei_first_aligned_impl
{
- inline static int run(const Derived&)
+ inline static typename Derived::Index run(const Derived&)
{ return 0; }
};
template<typename Derived>
struct ei_first_aligned_impl<Derived, false>
{
- inline static int run(const Derived& m)
+ inline static typename Derived::Index run(const Derived& m)
{
return ei_first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
}
@@ -568,7 +577,7 @@
* documentation.
*/
template<typename Derived>
-inline static int ei_first_aligned(const Derived& m)
+inline static typename Derived::Index ei_first_aligned(const Derived& m)
{
return ei_first_aligned_impl
<Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>
diff --git a/Eigen/src/Core/DenseStorageBase.h b/Eigen/src/Core/DenseStorageBase.h
index d2bbb07..15f3988 100644
--- a/Eigen/src/Core/DenseStorageBase.h
+++ b/Eigen/src/Core/DenseStorageBase.h
@@ -44,9 +44,13 @@
public:
enum { Options = ei_traits<Derived>::Options };
typedef typename ei_dense_xpr_base<Derived>::type Base;
- typedef typename Base::PlainObject PlainObject;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::PacketScalar PacketScalar;
+
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime;
@@ -72,10 +76,10 @@
Base& base() { return *static_cast<Base*>(this); }
const Base& base() const { return *static_cast<const Base*>(this); }
- EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
- EIGEN_STRONG_INLINE const Scalar& coeff(int row, int col) const
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
{
if(Flags & RowMajorBit)
return m_storage.data()[col + row * m_storage.cols()];
@@ -83,12 +87,12 @@
return m_storage.data()[row + col * m_storage.rows()];
}
- EIGEN_STRONG_INLINE const Scalar& coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
{
return m_storage.data()[index];
}
- EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{
if(Flags & RowMajorBit)
return m_storage.data()[col + row * m_storage.cols()];
@@ -96,13 +100,13 @@
return m_storage.data()[row + col * m_storage.rows()];
}
- EIGEN_STRONG_INLINE Scalar& coeffRef(int index)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
return m_storage.data()[index];
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
return ei_ploadt<Scalar, LoadMode>
(m_storage.data() + (Flags & RowMajorBit
@@ -111,13 +115,13 @@
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index);
}
template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(int row, int col, const PacketScalar& x)
+ EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
{
ei_pstoret<Scalar, PacketScalar, StoreMode>
(m_storage.data() + (Flags & RowMajorBit
@@ -126,7 +130,7 @@
}
template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(int index, const PacketScalar& x)
+ EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
{
ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
}
@@ -143,7 +147,7 @@
*
* This method is intended for dynamic-size matrices, although it is legal to call it on any
* matrix as long as fixed dimensions are left unchanged. If you only want to change the number
- * of rows and/or of columns, you can use resize(NoChange_t, int), resize(int, NoChange_t).
+ * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
*
* If the current number of coefficients of \c *this exactly matches the
* product \a rows * \a cols, then no memory allocation is performed and
@@ -153,12 +157,12 @@
* Example: \include Matrix_resize_int_int.cpp
* Output: \verbinclude Matrix_resize_int_int.out
*
- * \sa resize(int) for vectors, resize(NoChange_t, int), resize(int, NoChange_t)
+ * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
*/
- inline void resize(int rows, int cols)
+ inline void resize(Index rows, Index cols)
{
#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
- int size = rows*cols;
+ Index size = rows*cols;
bool size_changed = size != this->size();
m_storage.resize(size, rows, cols);
if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
@@ -176,9 +180,9 @@
* Example: \include Matrix_resize_int.cpp
* Output: \verbinclude Matrix_resize_int.out
*
- * \sa resize(int,int), resize(NoChange_t, int), resize(int, NoChange_t)
+ * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
*/
- inline void resize(int size)
+ inline void resize(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseStorageBase)
ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
@@ -200,9 +204,9 @@
* Example: \include Matrix_resize_NoChange_int.cpp
* Output: \verbinclude Matrix_resize_NoChange_int.out
*
- * \sa resize(int,int)
+ * \sa resize(Index,Index)
*/
- inline void resize(NoChange_t, int cols)
+ inline void resize(NoChange_t, Index cols)
{
resize(rows(), cols);
}
@@ -213,9 +217,9 @@
* Example: \include Matrix_resize_int_NoChange.cpp
* Output: \verbinclude Matrix_resize_int_NoChange.out
*
- * \sa resize(int,int)
+ * \sa resize(Index,Index)
*/
- inline void resize(int rows, NoChange_t)
+ inline void resize(Index rows, NoChange_t)
{
resize(rows, cols());
}
@@ -231,7 +235,7 @@
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{
const OtherDerived& other = _other.derived();
- const int othersize = other.rows()*other.cols();
+ const Index othersize = other.rows()*other.cols();
if(RowsAtCompileTime == 1)
{
ei_assert(other.rows() == 1 || other.cols() == 1);
@@ -248,26 +252,26 @@
/** Resizes \c *this to a \a rows x \a cols matrix while leaving old values of \c *this untouched.
*
* This method is intended for dynamic-size matrices. If you only want to change the number
- * of rows and/or of columns, you can use conservativeResize(NoChange_t, int),
- * conservativeResize(int, NoChange_t).
+ * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index),
+ * conservativeResize(Index, NoChange_t).
*
* The top-left part of the resized matrix will be the same as the overlapping top-left corner
* of \c *this. In case values need to be appended to the matrix they will be uninitialized.
*/
- EIGEN_STRONG_INLINE void conservativeResize(int rows, int cols)
+ EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
{
ei_conservative_resize_like_impl<Derived>::run(*this, rows, cols);
}
- EIGEN_STRONG_INLINE void conservativeResize(int rows, NoChange_t)
+ EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
{
- // Note: see the comment in conservativeResize(int,int)
+ // Note: see the comment in conservativeResize(Index,Index)
conservativeResize(rows, cols());
}
- EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, int cols)
+ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
{
- // Note: see the comment in conservativeResize(int,int)
+ // Note: see the comment in conservativeResize(Index,Index)
conservativeResize(rows(), cols);
}
@@ -279,7 +283,7 @@
*
* When values are appended, they will be uninitialized.
*/
- EIGEN_STRONG_INLINE void conservativeResize(int size)
+ EIGEN_STRONG_INLINE void conservativeResize(Index size)
{
ei_conservative_resize_like_impl<Derived>::run(*this, size);
}
@@ -329,7 +333,7 @@
}
#endif
- EIGEN_STRONG_INLINE DenseStorageBase(int size, int rows, int cols)
+ EIGEN_STRONG_INLINE DenseStorageBase(Index size, Index rows, Index cols)
: m_storage(size, rows, cols)
{
// _check_template_params();
@@ -370,44 +374,44 @@
{ return UnalignedMapType(data); }
inline static UnalignedMapType Map(Scalar* data)
{ return UnalignedMapType(data); }
- inline static const UnalignedMapType Map(const Scalar* data, int size)
+ inline static const UnalignedMapType Map(const Scalar* data, Index size)
{ return UnalignedMapType(data, size); }
- inline static UnalignedMapType Map(Scalar* data, int size)
+ inline static UnalignedMapType Map(Scalar* data, Index size)
{ return UnalignedMapType(data, size); }
- inline static const UnalignedMapType Map(const Scalar* data, int rows, int cols)
+ inline static const UnalignedMapType Map(const Scalar* data, Index rows, Index cols)
{ return UnalignedMapType(data, rows, cols); }
- inline static UnalignedMapType Map(Scalar* data, int rows, int cols)
+ inline static UnalignedMapType Map(Scalar* data, Index rows, Index cols)
{ return UnalignedMapType(data, rows, cols); }
inline static const AlignedMapType MapAligned(const Scalar* data)
{ return AlignedMapType(data); }
inline static AlignedMapType MapAligned(Scalar* data)
{ return AlignedMapType(data); }
- inline static const AlignedMapType MapAligned(const Scalar* data, int size)
+ inline static const AlignedMapType MapAligned(const Scalar* data, Index size)
{ return AlignedMapType(data, size); }
- inline static AlignedMapType MapAligned(Scalar* data, int size)
+ inline static AlignedMapType MapAligned(Scalar* data, Index size)
{ return AlignedMapType(data, size); }
- inline static const AlignedMapType MapAligned(const Scalar* data, int rows, int cols)
+ inline static const AlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
{ return AlignedMapType(data, rows, cols); }
- inline static AlignedMapType MapAligned(Scalar* data, int rows, int cols)
+ inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
{ return AlignedMapType(data, rows, cols); }
//@}
using Base::setConstant;
- Derived& setConstant(int size, const Scalar& value);
- Derived& setConstant(int rows, int cols, const Scalar& value);
+ Derived& setConstant(Index size, const Scalar& value);
+ Derived& setConstant(Index rows, Index cols, const Scalar& value);
using Base::setZero;
- Derived& setZero(int size);
- Derived& setZero(int rows, int cols);
+ Derived& setZero(Index size);
+ Derived& setZero(Index rows, Index cols);
using Base::setOnes;
- Derived& setOnes(int size);
- Derived& setOnes(int rows, int cols);
+ Derived& setOnes(Index size);
+ Derived& setOnes(Index rows, Index cols);
using Base::setRandom;
- Derived& setRandom(int size);
- Derived& setRandom(int rows, int cols);
+ Derived& setRandom(Index size);
+ Derived& setRandom(Index rows, Index cols);
#ifdef EIGEN_DENSESTORAGEBASE_PLUGIN
#include EIGEN_DENSESTORAGEBASE_PLUGIN
@@ -474,7 +478,7 @@
}
template<typename T0, typename T1>
- EIGEN_STRONG_INLINE void _init2(int rows, int cols, typename ei_enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+ EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename ei_enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
{
ei_assert(rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
&& cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
@@ -526,7 +530,8 @@
template <typename Derived, typename OtherDerived, bool IsVector>
struct ei_conservative_resize_like_impl
{
- static void run(DenseBase<Derived>& _this, int rows, int cols)
+ typedef typename Derived::Index Index;
+ static void run(DenseBase<Derived>& _this, Index rows, Index cols)
{
if (_this.rows() == rows && _this.cols() == cols) return;
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
@@ -540,8 +545,8 @@
{
// The storage order does not allow us to use reallocation.
typename Derived::PlainObject tmp(rows,cols);
- const int common_rows = std::min(rows, _this.rows());
- const int common_cols = std::min(cols, _this.cols());
+ const Index common_rows = std::min(rows, _this.rows());
+ const Index common_cols = std::min(cols, _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
_this.derived().swap(tmp);
}
@@ -551,10 +556,10 @@
{
if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
- // Note: Here is space for improvement. Basically, for conservativeResize(int,int),
+ // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
// neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
- // dimensions is dynamic, one could use either conservativeResize(int rows, NoChange_t) or
- // conservativeResize(NoChange_t, int cols). For these methods new static asserts like
+ // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
+ // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
// EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
@@ -562,8 +567,8 @@
if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
(!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns
{
- const int new_rows = other.rows() - _this.rows();
- const int new_cols = other.cols() - _this.cols();
+ const Index new_rows = other.rows() - _this.rows();
+ const Index new_cols = other.cols() - _this.cols();
_this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
if (new_rows>0)
_this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
@@ -574,8 +579,8 @@
{
// The storage order does not allow us to use reallocation.
typename Derived::PlainObject tmp(other);
- const int common_rows = std::min(tmp.rows(), _this.rows());
- const int common_cols = std::min(tmp.cols(), _this.cols());
+ const Index common_rows = std::min(tmp.rows(), _this.rows());
+ const Index common_cols = std::min(tmp.cols(), _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
_this.derived().swap(tmp);
}
@@ -585,10 +590,11 @@
template <typename Derived, typename OtherDerived>
struct ei_conservative_resize_like_impl<Derived,OtherDerived,true>
{
- static void run(DenseBase<Derived>& _this, int size)
+ typedef typename Derived::Index Index;
+ static void run(DenseBase<Derived>& _this, Index size)
{
- const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
- const int new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
+ const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
+ const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
_this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
}
@@ -596,10 +602,10 @@
{
if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
- const int num_new_elements = other.size() - _this.size();
+ const Index num_new_elements = other.size() - _this.size();
- const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
- const int new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
+ const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
+ const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
_this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
if (num_new_elements > 0)
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
index 9ae7d79..a4326a2 100644
--- a/Eigen/src/Core/Diagonal.h
+++ b/Eigen/src/Core/Diagonal.h
@@ -30,33 +30,34 @@
* \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
*
* \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
- * \param Index the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
+ * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
* A positive value means a superdiagonal, a negative value means a subdiagonal.
* You can also use Dynamic so the index can be set at runtime.
*
* The matrix is not required to be square.
*
* This class represents an expression of the main diagonal, or any sub/super diagonal
- * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(int) and most of the
+ * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
* time this is the only way it is used.
*
- * \sa MatrixBase::diagonal(), MatrixBase::diagonal(int)
+ * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
*/
-template<typename MatrixType, int Index>
-struct ei_traits<Diagonal<MatrixType,Index> >
+template<typename MatrixType, int DiagIndex>
+struct ei_traits<Diagonal<MatrixType,DiagIndex> >
: ei_traits<MatrixType>
{
typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename MatrixType::StorageKind StorageKind;
enum {
- AbsIndex = Index<0 ? -Index : Index, // only used if Index != Dynamic
- RowsAtCompileTime = (int(Index) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+ AbsDiagIndex = DiagIndex<0 ? -DiagIndex : DiagIndex, // only used if DiagIndex != Dynamic
+ RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
: (EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime,
- MatrixType::ColsAtCompileTime) - AbsIndex),
+ MatrixType::ColsAtCompileTime) - AbsDiagIndex),
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
- : Index == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime)
- : (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsIndex),
+ : DiagIndex == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime)
+ : (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsDiagIndex),
MaxColsAtCompileTime = 1,
Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit) & ~RowMajorBit,
CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
@@ -66,61 +67,62 @@
};
};
-template<typename MatrixType, int Index> class Diagonal
- : public ei_dense_xpr_base< Diagonal<MatrixType,Index> >::type
+template<typename MatrixType, int DiagIndex> class Diagonal
+ : public ei_dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
{
- // some compilers may fail to optimize std::max etc in case of compile-time constants...
- EIGEN_STRONG_INLINE int absIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
- EIGEN_STRONG_INLINE int rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
- EIGEN_STRONG_INLINE int colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
-
public:
typedef typename ei_dense_xpr_base<Diagonal>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
- inline Diagonal(const MatrixType& matrix, int index = Index) : m_matrix(matrix), m_index(index) {}
+ inline Diagonal(const MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
- inline int rows() const
+ inline Index rows() const
{ return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
- inline int cols() const { return 1; }
+ inline Index cols() const { return 1; }
- inline int innerStride() const
+ inline Index innerStride() const
{
return m_matrix.outerStride() + 1;
}
- inline int outerStride() const
+ inline Index outerStride() const
{
return 0;
}
- inline Scalar& coeffRef(int row, int)
+ inline Scalar& coeffRef(Index row, Index)
{
return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
}
- inline CoeffReturnType coeff(int row, int) const
+ inline CoeffReturnType coeff(Index row, Index) const
{
return m_matrix.coeff(row+rowOffset(), row+colOffset());
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
}
- inline CoeffReturnType coeff(int index) const
+ inline CoeffReturnType coeff(Index index) const
{
return m_matrix.coeff(index+rowOffset(), index+colOffset());
}
protected:
const typename MatrixType::Nested m_matrix;
- const ei_int_if_dynamic<Index> m_index;
+ const ei_variable_if_dynamic<Index, DiagIndex> m_index;
+
+ private:
+ // some compilers may fail to optimize std::max etc in case of compile-time constants...
+ EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
+ EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
+ EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
};
/** \returns an expression of the main diagonal of the matrix \c *this
@@ -146,12 +148,12 @@
return Diagonal<Derived, 0>(derived());
}
-/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
*
* \c *this is not required to be square.
*
- * The template parameter \a Index represent a super diagonal if \a Index > 0
- * and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal.
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
*
* Example: \include MatrixBase_diagonal_int.cpp
* Output: \verbinclude MatrixBase_diagonal_int.out
@@ -159,45 +161,45 @@
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
inline Diagonal<Derived, Dynamic>
-MatrixBase<Derived>::diagonal(int index)
+MatrixBase<Derived>::diagonal(Index index)
{
return Diagonal<Derived, Dynamic>(derived(), index);
}
-/** This is the const version of diagonal(int). */
+/** This is the const version of diagonal(Index). */
template<typename Derived>
inline const Diagonal<Derived, Dynamic>
-MatrixBase<Derived>::diagonal(int index) const
+MatrixBase<Derived>::diagonal(Index index) const
{
return Diagonal<Derived, Dynamic>(derived(), index);
}
-/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
*
* \c *this is not required to be square.
*
- * The template parameter \a Index represent a super diagonal if \a Index > 0
- * and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal.
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
*
* Example: \include MatrixBase_diagonal_template_int.cpp
* Output: \verbinclude MatrixBase_diagonal_template_int.out
*
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
-template<int Index>
-inline Diagonal<Derived,Index>
+template<int DiagIndex>
+inline Diagonal<Derived,DiagIndex>
MatrixBase<Derived>::diagonal()
{
- return Diagonal<Derived,Index>(derived());
+ return Diagonal<Derived,DiagIndex>(derived());
}
/** This is the const version of diagonal<int>(). */
template<typename Derived>
-template<int Index>
-inline const Diagonal<Derived,Index>
+template<int DiagIndex>
+inline const Diagonal<Derived,DiagIndex>
MatrixBase<Derived>::diagonal() const
{
- return Diagonal<Derived,Index>(derived());
+ return Diagonal<Derived,DiagIndex>(derived());
}
#endif // EIGEN_DIAGONAL_H
diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h
index 774b0d7..8d3b458 100644
--- a/Eigen/src/Core/DiagonalMatrix.h
+++ b/Eigen/src/Core/DiagonalMatrix.h
@@ -33,6 +33,8 @@
public:
typedef typename ei_traits<Derived>::DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
@@ -61,8 +63,8 @@
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
- inline int rows() const { return diagonal().size(); }
- inline int cols() const { return diagonal().size(); }
+ inline Index rows() const { return diagonal().size(); }
+ inline Index cols() const { return diagonal().size(); }
template<typename MatrixDerived>
const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
@@ -100,6 +102,7 @@
: ei_traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
{
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
+ typedef Dense StorageKind;
};
template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
@@ -111,6 +114,8 @@
typedef typename ei_traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
typedef const DiagonalMatrix& Nested;
typedef _Scalar Scalar;
+ typedef typename ei_traits<DiagonalMatrix>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
#endif
protected:
@@ -128,7 +133,7 @@
inline DiagonalMatrix() {}
/** Constructs a diagonal matrix with given dimension */
- inline DiagonalMatrix(int dim) : m_diagonal(dim) {}
+ inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
/** 2D constructor. */
inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
@@ -170,15 +175,15 @@
#endif
/** Resizes to given size. */
- inline void resize(int size) { m_diagonal.resize(size); }
+ inline void resize(Index size) { m_diagonal.resize(size); }
/** Sets all coefficients to zero. */
inline void setZero() { m_diagonal.setZero(); }
/** Resizes and sets all coefficients to zero. */
- inline void setZero(int size) { m_diagonal.setZero(size); }
+ inline void setZero(Index size) { m_diagonal.setZero(size); }
/** Sets this matrix to be the identity matrix of the current size. */
inline void setIdentity() { m_diagonal.setOnes(); }
/** Sets this matrix to be the identity matrix of the given size. */
- inline void setIdentity(int size) { m_diagonal.setOnes(size); }
+ inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
};
/** \class DiagonalWrapper
@@ -198,6 +203,7 @@
{
typedef _DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar;
+ typedef typename DiagonalVectorType::StorageKind StorageKind;
enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
@@ -257,13 +263,13 @@
{
if(cols() != rows()) return false;
RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
+ for(Index j = 0; j < cols(); ++j)
{
RealScalar absOnDiagonal = ei_abs(coeff(j,j));
if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
}
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < j; ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < j; ++i)
{
if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h
index 868b441..f3af814 100644
--- a/Eigen/src/Core/DiagonalProduct.h
+++ b/Eigen/src/Core/DiagonalProduct.h
@@ -57,23 +57,23 @@
ei_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
- const Scalar coeff(int row, int col) const
+ const Scalar coeff(Index row, Index col) const
{
return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
enum {
StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor,
InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
};
- const int indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
+ const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
if((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight))
diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h
index 4bd8187..6e54dac 100644
--- a/Eigen/src/Core/Dot.h
+++ b/Eigen/src/Core/Dot.h
@@ -159,11 +159,11 @@
bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
{
typename Derived::Nested nested(derived());
- for(int i = 0; i < cols(); ++i)
+ for(Index i = 0; i < cols(); ++i)
{
if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
return false;
- for(int j = 0; j < i; ++j)
+ for(Index j = 0; j < i; ++j)
if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
return false;
}
diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h
index e583fdd..c9d3bd8 100644
--- a/Eigen/src/Core/EigenBase.h
+++ b/Eigen/src/Core/EigenBase.h
@@ -39,6 +39,9 @@
{
// typedef typename ei_plain_matrix_type<Derived>::type PlainObject;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+
/** \returns a reference to the derived object */
Derived& derived() { return *static_cast<Derived*>(this); }
/** \returns a const reference to the derived object */
@@ -48,12 +51,12 @@
{ return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- inline int rows() const { return derived().rows(); }
+ inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- inline int cols() const { return derived().cols(); }
+ inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
- inline int size() const { return rows() * cols(); }
+ inline Index size() const { return rows() * cols(); }
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
template<typename Dest> inline void evalTo(Dest& dst) const
diff --git a/Eigen/src/Core/Flagged.h b/Eigen/src/Core/Flagged.h
index 9413b74..7936f9d 100644
--- a/Eigen/src/Core/Flagged.h
+++ b/Eigen/src/Core/Flagged.h
@@ -58,51 +58,51 @@
inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int outerStride() const { return m_matrix.outerStride(); }
- inline int innerStride() const { return m_matrix.innerStride(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(row, col);
}
- inline const Scalar coeff(int index) const
+ inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_matrix.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_matrix.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
}
diff --git a/Eigen/src/Core/ForceAlignedAccess.h b/Eigen/src/Core/ForceAlignedAccess.h
index eedd577..7db138b 100644
--- a/Eigen/src/Core/ForceAlignedAccess.h
+++ b/Eigen/src/Core/ForceAlignedAccess.h
@@ -50,51 +50,51 @@
inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int outerStride() const { return m_expression.outerStride(); }
- inline int innerStride() const { return m_expression.innerStride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<Aligned>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<Aligned>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
}
diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h
index a42f36b..d559dee 100644
--- a/Eigen/src/Core/Functors.h
+++ b/Eigen/src/Core/Functors.h
@@ -464,8 +464,10 @@
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { }
EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { }
- EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(int, int = 0) const { return ei_pset1(m_other); }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(Index, Index = 0) const { return ei_pset1(m_other); }
const Scalar m_other;
};
template<typename Scalar>
@@ -474,7 +476,8 @@
template<typename Scalar> struct ei_scalar_identity_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_identity_op)
- EIGEN_STRONG_INLINE const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
};
template<typename Scalar>
struct ei_functor_traits<ei_scalar_identity_op<Scalar> >
@@ -497,8 +500,10 @@
m_packetStep(ei_pset1(ei_packet_traits<Scalar>::size*step)),
m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset<Scalar>(-ei_packet_traits<Scalar>::size)))) {}
- EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(int) const { return m_base = ei_padd(m_base,m_packetStep); }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(Index) const { return m_base = ei_padd(m_base,m_packetStep); }
const Scalar m_low;
const Scalar m_step;
@@ -518,8 +523,10 @@
m_low(low), m_step(step),
m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset<Scalar>(0)) {}
- EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(int i) const
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i) const
{ return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1<Scalar>(i),m_interPacket))); }
const Scalar m_low;
@@ -541,8 +548,10 @@
{
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {}
- EIGEN_STRONG_INLINE const Scalar operator() (int i, int = 0) const { return impl(i); }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(int i, int = 0) const { return impl.packetOp(i); }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i, Index = 0) const { return impl(i); }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i, Index = 0) const { return impl.packetOp(i); }
// This proxy object handles the actual required temporaries, the different
// implementations (random vs. sequential access) as well as the piping
// correct piping to size 2/4 packet operations.
diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h
index 432da42..2996409 100644
--- a/Eigen/src/Core/Fuzzy.h
+++ b/Eigen/src/Core/Fuzzy.h
@@ -201,13 +201,14 @@
struct ei_fuzzy_selector<Derived,OtherDerived,false>
{
typedef typename Derived::RealScalar RealScalar;
+ typedef typename Derived::Index Index;
static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
{
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
typename Derived::Nested nested(self);
typename OtherDerived::Nested otherNested(other);
- for(int i = 0; i < self.cols(); ++i)
+ for(Index i = 0; i < self.cols(); ++i)
if((nested.col(i) - otherNested.col(i)).squaredNorm()
> std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec)
return false;
@@ -216,7 +217,7 @@
static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
{
typename Derived::Nested nested(self);
- for(int i = 0; i < self.cols(); ++i)
+ for(Index i = 0; i < self.cols(); ++i)
if(nested.col(i).squaredNorm() > ei_abs2(other * prec))
return false;
return true;
@@ -227,7 +228,7 @@
ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
typename Derived::Nested nested(self);
typename OtherDerived::Nested otherNested(other);
- for(int i = 0; i < self.cols(); ++i)
+ for(Index i = 0; i < self.cols(); ++i)
if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec)
return false;
return true;
diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h
index 3da92d2..f9b2f08 100644
--- a/Eigen/src/Core/IO.h
+++ b/Eigen/src/Core/IO.h
@@ -157,8 +157,9 @@
{
const typename Derived::Nested m = _m;
typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
- int width = 0;
+ Index width = 0;
std::streamsize explicit_precision;
if(fmt.precision == StreamPrecision)
@@ -185,26 +186,26 @@
if(align_cols)
{
// compute the largest width
- for(int j = 1; j < m.cols(); ++j)
- for(int i = 0; i < m.rows(); ++i)
+ for(Index j = 1; j < m.cols(); ++j)
+ for(Index i = 0; i < m.rows(); ++i)
{
std::stringstream sstr;
if(explicit_precision) sstr.precision(explicit_precision);
sstr << m.coeff(i,j);
- width = std::max<int>(width, int(sstr.str().length()));
+ width = std::max<Index>(width, Index(sstr.str().length()));
}
}
std::streamsize old_precision = 0;
if(explicit_precision) old_precision = s.precision(explicit_precision);
s << fmt.matPrefix;
- for(int i = 0; i < m.rows(); ++i)
+ for(Index i = 0; i < m.rows(); ++i)
{
if (i)
s << fmt.rowSpacer;
s << fmt.rowPrefix;
if(width) s.width(width);
s << m.coeff(i, 0);
- for(int j = 1; j < m.cols(); ++j)
+ for(Index j = 1; j < m.cols(); ++j)
{
s << fmt.coeffSeparator;
if (width) s.width(width);
diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h
index a22779b..6ca24b7 100644
--- a/Eigen/src/Core/Map.h
+++ b/Eigen/src/Core/Map.h
@@ -109,12 +109,12 @@
EIGEN_DENSE_PUBLIC_INTERFACE(Map)
- inline int innerStride() const
+ inline Index innerStride() const
{
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
}
- inline int outerStride() const
+ inline Index outerStride() const
{
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
: IsVectorAtCompileTime ? this->size()
@@ -139,7 +139,7 @@
* \param size the size of the vector expression
* \param stride optional Stride object, passing the strides.
*/
- inline Map(const Scalar* data, int size, const StrideType& stride = StrideType())
+ inline Map(const Scalar* data, Index size, const StrideType& stride = StrideType())
: Base(data, size), m_stride(stride)
{
PlainObjectType::Base::_check_template_params();
@@ -152,7 +152,7 @@
* \param cols the number of columns of the matrix expression
* \param stride optional Stride object, passing the strides.
*/
- inline Map(const Scalar* data, int rows, int cols, const StrideType& stride = StrideType())
+ inline Map(const Scalar* data, Index rows, Index cols, const StrideType& stride = StrideType())
: Base(data, rows, cols), m_stride(stride)
{
PlainObjectType::Base::_check_template_params();
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
index 08b81c1..8cdd452 100644
--- a/Eigen/src/Core/MapBase.h
+++ b/Eigen/src/Core/MapBase.h
@@ -44,8 +44,13 @@
SizeAtCompileTime = Base::SizeAtCompileTime
};
+
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename Base::PacketScalar PacketScalar;
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
using Base::derived;
// using Base::RowsAtCompileTime;
// using Base::ColsAtCompileTime;
@@ -82,8 +87,8 @@
typedef typename Base::CoeffReturnType CoeffReturnType;
- inline int rows() const { return m_rows.value(); }
- inline int cols() const { return m_cols.value(); }
+ inline Index rows() const { return m_rows.value(); }
+ inline Index cols() const { return m_cols.value(); }
/** Returns a pointer to the first coefficient of the matrix or vector.
*
@@ -93,50 +98,50 @@
*/
inline const Scalar* data() const { return m_data; }
- inline const Scalar& coeff(int row, int col) const
+ inline const Scalar& coeff(Index row, Index col) const
{
return m_data[col * colStride() + row * rowStride()];
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return const_cast<Scalar*>(m_data)[col * colStride() + row * rowStride()];
}
- inline const Scalar& coeff(int index) const
+ inline const Scalar& coeff(Index index) const
{
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
return m_data[index * innerStride()];
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
return const_cast<Scalar*>(m_data)[index * innerStride()];
}
template<int LoadMode>
- inline PacketScalar packet(int row, int col) const
+ inline PacketScalar packet(Index row, Index col) const
{
return ei_ploadt<Scalar, LoadMode>
(m_data + (col * colStride() + row * rowStride()));
}
template<int LoadMode>
- inline PacketScalar packet(int index) const
+ inline PacketScalar packet(Index index) const
{
return ei_ploadt<Scalar, LoadMode>(m_data + index * innerStride());
}
template<int StoreMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
ei_pstoret<Scalar, PacketScalar, StoreMode>
(const_cast<Scalar*>(m_data) + (col * colStride() + row * rowStride()), x);
}
template<int StoreMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
ei_pstoret<Scalar, PacketScalar, StoreMode>
(const_cast<Scalar*>(m_data) + index * innerStride(), x);
@@ -148,10 +153,10 @@
checkSanity();
}
- inline MapBase(const Scalar* data, int size)
+ inline MapBase(const Scalar* data, Index size)
: m_data(data),
- m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime),
- m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime)
+ m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
+ m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
ei_assert(size >= 0);
@@ -159,7 +164,7 @@
checkSanity();
}
- inline MapBase(const Scalar* data, int rows, int cols)
+ inline MapBase(const Scalar* data, Index rows, Index cols)
: m_data(data), m_rows(rows), m_cols(cols)
{
ei_assert( (data == 0)
@@ -187,8 +192,8 @@
}
const Scalar* EIGEN_RESTRICT m_data;
- const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
+ const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+ const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
};
#endif // EIGEN_MAPBASE_H
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index cc77799..53e5762 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -657,7 +657,7 @@
{
static inline Scalar run(Scalar x, Scalar y)
{
- int res = 1;
+ Scalar res = 1;
ei_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
if(y & 1) res *= x;
y >>= 1;
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index c61a3f3..4407b0d 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -206,7 +206,7 @@
* is called a null matrix. This constructor is the unique way to create null matrices: resizing
* a matrix to 0 is not supported.
*
- * \sa resize(int,int)
+ * \sa resize(Index,Index)
*/
EIGEN_STRONG_INLINE explicit Matrix() : Base()
{
@@ -225,7 +225,7 @@
* it is redundant to pass the dimension here, so it makes more sense to use the default
* constructor Matrix() instead.
*/
- EIGEN_STRONG_INLINE explicit Matrix(int dim)
+ EIGEN_STRONG_INLINE explicit Matrix(Index dim)
: Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
{
Base::_check_template_params();
@@ -248,7 +248,7 @@
* This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead. */
- Matrix(int rows, int cols);
+ Matrix(Index rows, Index cols);
/** \brief Constructs an initialized 2D vector with given coefficients */
Matrix(const Scalar& x, const Scalar& y);
#endif
@@ -321,8 +321,8 @@
void swap(MatrixBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other)
{ this->_swap(other.derived()); }
- inline int innerStride() const { return 1; }
- inline int outerStride() const { return this->innerSize(); }
+ inline Index innerStride() const { return 1; }
+ inline Index outerStride() const { return this->innerSize(); }
/////////// Geometry module ///////////
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 9e2afe7..633b010 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -56,14 +56,14 @@
{
public:
#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** The base class for a given storage type. */
typedef MatrixBase StorageBaseType;
-
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
typedef DenseBase<Derived> Base;
-
using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime;
@@ -97,14 +97,6 @@
#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is the "real scalar" type; if the \a Scalar type is already real numbers
- * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
- * \a Scalar is \a std::complex<T> then RealScalar is \a T.
- *
- * \sa class NumTraits
- */
- typedef typename NumTraits<Scalar>::Real RealScalar;
-
/** type of the equivalent square matrix */
typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
@@ -112,7 +104,7 @@
/** \returns the size of the main diagonal, which is min(rows(),cols()).
* \sa rows(), cols(), SizeAtCompileTime. */
- inline int diagonalSize() const { return std::min(rows(),cols()); }
+ inline Index diagonalSize() const { return std::min(rows(),cols()); }
/** \brief The plain matrix type corresponding to this expression.
*
@@ -211,8 +203,8 @@
template<int Index> Diagonal<Derived,Index> diagonal();
template<int Index> const Diagonal<Derived,Index> diagonal() const;
- Diagonal<Derived, Dynamic> diagonal(int index);
- const Diagonal<Derived, Dynamic> diagonal(int index) const;
+ Diagonal<Derived, Dynamic> diagonal(Index index);
+ const Diagonal<Derived, Dynamic> diagonal(Index index) const;
template<unsigned int Mode> TriangularView<Derived, Mode> part();
template<unsigned int Mode> const TriangularView<Derived, Mode> part() const;
@@ -224,9 +216,9 @@
template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const;
static const IdentityReturnType Identity();
- static const IdentityReturnType Identity(int rows, int cols);
- static const BasisReturnType Unit(int size, int i);
- static const BasisReturnType Unit(int i);
+ static const IdentityReturnType Identity(Index rows, Index cols);
+ static const BasisReturnType Unit(Index size, Index i);
+ static const BasisReturnType Unit(Index i);
static const BasisReturnType UnitX();
static const BasisReturnType UnitY();
static const BasisReturnType UnitZ();
@@ -235,7 +227,7 @@
const DiagonalWrapper<Derived> asDiagonal() const;
Derived& setIdentity();
- Derived& setIdentity(int rows, int cols);
+ Derived& setIdentity(Index rows, Index cols);
bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
@@ -329,7 +321,7 @@
template<typename OtherDerived>
PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
PlainObject unitOrthogonal(void) const;
- Matrix<Scalar,3,1> eulerAngles(int a0, int a1, int a2) const;
+ Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
const ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
enum {
SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
@@ -362,9 +354,9 @@
///////// Jacobi module /////////
template<typename OtherScalar>
- void applyOnTheLeft(int p, int q, const PlanarRotation<OtherScalar>& j);
+ void applyOnTheLeft(Index p, Index q, const PlanarRotation<OtherScalar>& j);
template<typename OtherScalar>
- void applyOnTheRight(int p, int q, const PlanarRotation<OtherScalar>& j);
+ void applyOnTheRight(Index p, Index q, const PlanarRotation<OtherScalar>& j);
///////// MatrixFunctions module /////////
@@ -398,17 +390,17 @@
inline const Cwise<Derived> cwise() const;
inline Cwise<Derived> cwise();
- VectorBlock<Derived> start(int size);
- const VectorBlock<Derived> start(int size) const;
- VectorBlock<Derived> end(int size);
- const VectorBlock<Derived> end(int size) const;
+ VectorBlock<Derived> start(Index size);
+ const VectorBlock<Derived> start(Index size) const;
+ VectorBlock<Derived> end(Index size);
+ const VectorBlock<Derived> end(Index size) const;
template<int Size> VectorBlock<Derived,Size> start();
template<int Size> const VectorBlock<Derived,Size> start() const;
template<int Size> VectorBlock<Derived,Size> end();
template<int Size> const VectorBlock<Derived,Size> end() const;
- Minor<Derived> minor(int row, int col);
- const Minor<Derived> minor(int row, int col) const;
+ Minor<Derived> minor(Index row, Index col);
+ const Minor<Derived> minor(Index row, Index col) const;
#endif
protected:
diff --git a/Eigen/src/Core/MatrixStorage.h b/Eigen/src/Core/MatrixStorage.h
index f1b92ae..aff83a6 100644
--- a/Eigen/src/Core/MatrixStorage.h
+++ b/Eigen/src/Core/MatrixStorage.h
@@ -97,12 +97,12 @@
inline explicit ei_matrix_storage() {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()) {}
- inline ei_matrix_storage(int,int,int) {}
+ inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); }
- inline static int rows(void) {return _Rows;}
- inline static int cols(void) {return _Cols;}
- inline void conservativeResize(int,int,int) {}
- inline void resize(int,int,int) {}
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -113,12 +113,12 @@
public:
inline explicit ei_matrix_storage() {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) {}
- inline ei_matrix_storage(int,int,int) {}
+ inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(ei_matrix_storage& ) {}
- inline static int rows(void) {return _Rows;}
- inline static int cols(void) {return _Cols;}
- inline void conservativeResize(int,int,int) {}
- inline void resize(int,int,int) {}
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return 0; }
inline T *data() { return 0; }
};
@@ -127,19 +127,19 @@
template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dynamic, Dynamic, _Options>
{
ei_matrix_array<T,Size,_Options> m_data;
- int m_rows;
- int m_cols;
+ DenseIndex m_rows;
+ DenseIndex m_cols;
public:
inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {}
+ inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return m_cols;}
- inline void conservativeResize(int, int rows, int cols) { m_rows = rows; m_cols = cols; }
- inline void resize(int, int rows, int cols) { m_rows = rows; m_cols = cols; }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+ inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -148,17 +148,17 @@
template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<T, Size, Dynamic, _Cols, _Options>
{
ei_matrix_array<T,Size,_Options> m_data;
- int m_rows;
+ DenseIndex m_rows;
public:
inline explicit ei_matrix_storage() : m_rows(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {}
- inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {}
+ inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return _Cols;}
- inline void conservativeResize(int, int rows, int) { m_rows = rows; }
- inline void resize(int, int rows, int) { m_rows = rows; }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return _Cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+ inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -167,17 +167,17 @@
template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<T, Size, _Rows, Dynamic, _Options>
{
ei_matrix_array<T,Size,_Options> m_data;
- int m_cols;
+ DenseIndex m_cols;
public:
inline explicit ei_matrix_storage() : m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {}
- inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {}
+ inline ei_matrix_storage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return _Rows;}
- inline int cols(void) const {return m_cols;}
- inline void conservativeResize(int, int, int cols) { m_cols = cols; }
- inline void resize(int, int, int cols) { m_cols = cols; }
+ inline DenseIndex rows(void) const {return _Rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+ inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -186,27 +186,27 @@
template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic, _Options>
{
T *m_data;
- int m_rows;
- int m_cols;
+ DenseIndex m_rows;
+ DenseIndex m_cols;
public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int rows, int cols)
+ inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex cols)
: m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
{ EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return m_cols;}
- inline void conservativeResize(int size, int rows, int cols)
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
{
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = rows;
m_cols = cols;
}
- void resize(int size, int rows, int cols)
+ void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
{
if(size != m_rows*m_cols)
{
@@ -228,22 +228,22 @@
template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic, _Options>
{
T *m_data;
- int m_cols;
+ DenseIndex m_cols;
public:
inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int, int cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+ inline ei_matrix_storage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
{ EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- inline static int rows(void) {return _Rows;}
- inline int cols(void) const {return m_cols;}
- inline void conservativeResize(int size, int, int cols)
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
{
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = cols;
}
- void resize(int size, int, int cols)
+ void resize(DenseIndex size, DenseIndex, DenseIndex cols)
{
if(size != _Rows*m_cols)
{
@@ -264,22 +264,22 @@
template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols, _Options>
{
T *m_data;
- int m_rows;
+ DenseIndex m_rows;
public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
- inline ei_matrix_storage(int size, int rows, int) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+ inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
{ EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline int rows(void) const {return m_rows;}
- inline static int cols(void) {return _Cols;}
- inline void conservativeResize(int size, int rows, int)
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
{
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = rows;
}
- void resize(int size, int rows, int)
+ void resize(DenseIndex size, DenseIndex rows, DenseIndex)
{
if(size != m_rows*_Cols)
{
diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h
index a8ca28e..ececf27 100644
--- a/Eigen/src/Core/NestByValue.h
+++ b/Eigen/src/Core/NestByValue.h
@@ -51,51 +51,51 @@
inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int outerStride() const { return m_expression.outerStride(); }
- inline int innerStride() const { return m_expression.innerStride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 156f043..93e9787 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -216,10 +216,11 @@
template<> struct ei_outer_product_selector<ColMajor> {
template<typename ProductType, typename Dest>
EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
// FIXME make sure lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too
- const int cols = dest.cols();
- for (int j=0; j<cols; ++j)
+ const Index cols = dest.cols();
+ for (Index j=0; j<cols; ++j)
dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
}
};
@@ -227,10 +228,11 @@
template<> struct ei_outer_product_selector<RowMajor> {
template<typename ProductType, typename Dest>
EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
// FIXME make sure rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too
- const int rows = dest.rows();
- for (int i=0; i<rows; ++i)
+ const Index rows = dest.rows();
+ for (Index i=0; i<rows; ++i)
dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
}
};
@@ -383,9 +385,10 @@
template<typename ProductType, typename Dest>
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
{
+ typedef typename Dest::Index Index;
// TODO makes sure dest is sequentially stored in memory, otherwise use a temp
- const int size = prod.rhs().rows();
- for(int k=0; k<size; ++k)
+ const Index size = prod.rhs().rows();
+ for(Index k=0; k<size; ++k)
dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
}
};
@@ -395,9 +398,10 @@
template<typename ProductType, typename Dest>
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
{
+ typedef typename Dest::Index Index;
// TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
- const int rows = prod.rows();
- for(int i=0; i<rows; ++i)
+ const Index rows = prod.rows();
+ for(Index i=0; i<rows; ++i)
dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
}
};
diff --git a/Eigen/src/Core/ProductBase.h b/Eigen/src/Core/ProductBase.h
index 611ca27..36626f8 100644
--- a/Eigen/src/Core/ProductBase.h
+++ b/Eigen/src/Core/ProductBase.h
@@ -100,8 +100,8 @@
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
- inline int rows() const { return m_lhs.rows(); }
- inline int cols() const { return m_rhs.cols(); }
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
template<typename Dest>
inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
@@ -133,7 +133,7 @@
const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
{ return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
- const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(int index) const
+ const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
{ return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
protected:
@@ -146,10 +146,10 @@
private:
// discard coeff methods
- void coeff(int,int) const;
- void coeffRef(int,int);
- void coeff(int) const;
- void coeffRef(int);
+ void coeff(Index,Index) const;
+ void coeffRef(Index,Index);
+ void coeff(Index) const;
+ void coeffRef(Index);
};
// here we need to overload the nested rule for products
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index ef0f760..3fd5de7 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -176,15 +176,16 @@
struct ei_redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func)
{
ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
Scalar res;
res = mat.coeffByOuterInner(0, 0);
- for(int i = 1; i < mat.innerSize(); ++i)
+ for(Index i = 1; i < mat.innerSize(); ++i)
res = func(res, mat.coeffByOuterInner(0, i));
- for(int i = 1; i < mat.outerSize(); ++i)
- for(int j = 0; j < mat.innerSize(); ++j)
+ for(Index i = 1; i < mat.outerSize(); ++i)
+ for(Index j = 0; j < mat.innerSize(); ++j)
res = func(res, mat.coeffByOuterInner(i, j));
return res;
}
@@ -200,37 +201,38 @@
{
typedef typename Derived::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func)
{
- const int size = mat.size();
- const int packetSize = ei_packet_traits<Scalar>::size;
- const int alignedStart = ei_first_aligned(mat);
+ const Index size = mat.size();
+ const Index packetSize = ei_packet_traits<Scalar>::size;
+ const Index alignedStart = ei_first_aligned(mat);
enum {
alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit)
? Aligned : Unaligned
};
- const int alignedSize = ((size-alignedStart)/packetSize)*packetSize;
- const int alignedEnd = alignedStart + alignedSize;
+ const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize;
+ const Index alignedEnd = alignedStart + alignedSize;
Scalar res;
if(alignedSize)
{
PacketScalar packet_res = mat.template packet<alignment>(alignedStart);
- for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
+ for(Index index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
packet_res = func.packetOp(packet_res, mat.template packet<alignment>(index));
res = func.predux(packet_res);
- for(int index = 0; index < alignedStart; ++index)
+ for(Index index = 0; index < alignedStart; ++index)
res = func(res,mat.coeff(index));
- for(int index = alignedEnd; index < size; ++index)
+ for(Index index = alignedEnd; index < size; ++index)
res = func(res,mat.coeff(index));
}
else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{
res = mat.coeff(0);
- for(int index = 1; index < size; ++index)
+ for(Index index = 1; index < size; ++index)
res = func(res,mat.coeff(index));
}
@@ -243,26 +245,27 @@
{
typedef typename Derived::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func)
{
- const int innerSize = mat.innerSize();
- const int outerSize = mat.outerSize();
+ const Index innerSize = mat.innerSize();
+ const Index outerSize = mat.outerSize();
enum {
packetSize = ei_packet_traits<Scalar>::size
};
- const int packetedInnerSize = ((innerSize)/packetSize)*packetSize;
+ const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res;
if(packetedInnerSize)
{
PacketScalar packet_res = mat.template packet<Unaligned>(0,0);
- for(int j=0; j<outerSize; ++j)
- for(int i=(j==0?packetSize:0); i<packetedInnerSize; i+=int(packetSize))
+ for(Index j=0; j<outerSize; ++j)
+ for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i));
res = func.predux(packet_res);
- for(int j=0; j<outerSize; ++j)
- for(int i=packetedInnerSize; i<innerSize; ++i)
+ for(Index j=0; j<outerSize; ++j)
+ for(Index i=packetedInnerSize; i<innerSize; ++i)
res = func(res, mat.coeffByOuterInner(j,i));
}
else // too small to vectorize anything.
diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h
index b2e581c..665d480 100644
--- a/Eigen/src/Core/ReturnByValue.h
+++ b/Eigen/src/Core/ReturnByValue.h
@@ -57,14 +57,15 @@
{
public:
typedef typename ei_traits<Derived>::ReturnType ReturnType;
+
typedef typename ei_dense_xpr_base<ReturnByValue>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
template<typename Dest>
inline void evalTo(Dest& dst) const
{ static_cast<const Derived* const>(this)->evalTo(dst); }
- inline int rows() const { return static_cast<const Derived* const>(this)->rows(); }
- inline int cols() const { return static_cast<const Derived* const>(this)->cols(); }
+ inline Index rows() const { return static_cast<const Derived* const>(this)->rows(); }
+ inline Index cols() const { return static_cast<const Derived* const>(this)->cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
@@ -72,10 +73,10 @@
Unusable(const Unusable&) {}
Unusable& operator=(const Unusable&) {return *this;}
};
- const Unusable& coeff(int) const { return *reinterpret_cast<const Unusable*>(this); }
- const Unusable& coeff(int,int) const { return *reinterpret_cast<const Unusable*>(this); }
- Unusable& coeffRef(int) { return *reinterpret_cast<Unusable*>(this); }
- Unusable& coeffRef(int,int) { return *reinterpret_cast<Unusable*>(this); }
+ const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }
+ const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
+ Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
+ Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
#endif
};
diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h
index 277108d..eed3f93 100644
--- a/Eigen/src/Core/SelfAdjointView.h
+++ b/Eigen/src/Core/SelfAdjointView.h
@@ -65,6 +65,8 @@
typedef TriangularBase<SelfAdjointView> Base;
typedef typename ei_traits<SelfAdjointView>::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
enum {
Mode = ei_traits<SelfAdjointView>::Mode
};
@@ -73,15 +75,15 @@
inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
{ ei_assert(ei_are_flags_consistent<Mode>::ret); }
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int outerStride() const { return m_matrix.outerStride(); }
- inline int innerStride() const { return m_matrix.innerStride(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
*/
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
Base::check_coordinates_internal(row, col);
return m_matrix.coeff(row, col);
@@ -90,7 +92,7 @@
/** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part
*/
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col);
@@ -230,11 +232,12 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- for(int i = 0; i < j; ++i)
+ for(Index i = 0; i < j; ++i)
{
dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j));
@@ -249,9 +252,10 @@
{
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int i = 0; i < dst.rows(); ++i)
+ typedef typename Derived1::Index Index;
+ for(Index i = 0; i < dst.rows(); ++i)
{
- for(int j = 0; j < i; ++j)
+ for(Index j = 0; j < i; ++j)
{
dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j));
diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h
index f8f8a9f..4a85e11 100644
--- a/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -55,28 +55,28 @@
inline SelfCwiseBinaryOp(MatrixType& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int outerStride() const { return m_matrix.outerStride(); }
- inline int innerStride() const { return m_matrix.innerStride(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
inline const Scalar* data() const { return m_matrix.data(); }
// note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data()
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(row, col);
}
// note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data()
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(index);
}
template<typename OtherDerived>
- void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other)
+ void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows()
@@ -86,7 +86,7 @@
}
template<typename OtherDerived>
- void copyCoeff(int index, const DenseBase<OtherDerived>& other)
+ void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_matrix.size());
@@ -95,7 +95,7 @@
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int row, int col, const DenseBase<OtherDerived>& other)
+ void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows()
@@ -105,7 +105,7 @@
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int index, const DenseBase<OtherDerived>& other)
+ void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_matrix.size());
diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h
index f74c6ee..083c9ce 100644
--- a/Eigen/src/Core/SolveTriangular.h
+++ b/Eigen/src/Core/SolveTriangular.h
@@ -56,29 +56,30 @@
typedef typename Rhs::Scalar Scalar;
typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::ExtractType ActualLhsType;
+ typedef typename Lhs::Index Index;
enum {
IsLower = ((Mode&Lower)==Lower)
};
static void run(const Lhs& lhs, Rhs& other)
{
- static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
- const int size = lhs.cols();
- for(int pi=IsLower ? 0 : size;
+ const Index size = lhs.cols();
+ for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{
- int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
+ Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
- int r = IsLower ? pi : size - pi; // remaining size
+ Index r = IsLower ? pi : size - pi; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
// 2 - it is slighlty faster at runtime
- int startRow = IsLower ? pi : pi-actualPanelWidth;
- int startCol = IsLower ? 0 : pi;
+ Index startRow = IsLower ? pi : pi-actualPanelWidth;
+ Index startCol = IsLower ? 0 : pi;
VectorBlock<Rhs,Dynamic> target(other,startRow,actualPanelWidth);
ei_cache_friendly_product_rowmajor_times_vector<LhsProductTraits::NeedToConjugate,false>(
@@ -87,10 +88,10 @@
target, Scalar(-1));
}
- for(int k=0; k<actualPanelWidth; ++k)
+ for(Index k=0; k<actualPanelWidth; ++k)
{
- int i = IsLower ? pi+k : pi-k-1;
- int s = IsLower ? pi : i+1;
+ Index i = IsLower ? pi+k : pi-k-1;
+ Index s = IsLower ? pi : i+1;
if (k>0)
other.coeffRef(i) -= (lhs.row(i).segment(s,k).transpose().cwiseProduct(other.segment(s,k))).sum();
@@ -109,6 +110,7 @@
typedef typename ei_packet_traits<Scalar>::type Packet;
typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::ExtractType ActualLhsType;
+ typedef typename Lhs::Index Index;
enum {
PacketSize = ei_packet_traits<Scalar>::size,
IsLower = ((Mode&Lower)==Lower)
@@ -116,30 +118,30 @@
static void run(const Lhs& lhs, Rhs& other)
{
- static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
- const int size = lhs.cols();
- for(int pi=IsLower ? 0 : size;
+ const Index size = lhs.cols();
+ for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{
- int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
- int startBlock = IsLower ? pi : pi-actualPanelWidth;
- int endBlock = IsLower ? pi + actualPanelWidth : 0;
+ Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
+ Index startBlock = IsLower ? pi : pi-actualPanelWidth;
+ Index endBlock = IsLower ? pi + actualPanelWidth : 0;
- for(int k=0; k<actualPanelWidth; ++k)
+ for(Index k=0; k<actualPanelWidth; ++k)
{
- int i = IsLower ? pi+k : pi-k-1;
+ Index i = IsLower ? pi+k : pi-k-1;
if(!(Mode & UnitDiag))
other.coeffRef(i) /= lhs.coeff(i,i);
- int r = actualPanelWidth - k - 1; // remaining size
- int s = IsLower ? i+1 : i-r;
+ Index r = actualPanelWidth - k - 1; // remaining size
+ Index s = IsLower ? i+1 : i-r;
if (r>0)
other.segment(s,r) -= other.coeffRef(i) * Block<Lhs,Dynamic,1>(lhs, s, i, r, 1);
}
- int r = IsLower ? size - endBlock : startBlock; // remaining size
+ Index r = IsLower ? size - endBlock : startBlock; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
@@ -168,7 +170,7 @@
}
};
-template <typename Scalar, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
struct ei_triangular_solve_matrix;
// the rhs is a matrix
@@ -176,12 +178,13 @@
struct ei_triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,StorageOrder,Dynamic>
{
typedef typename Rhs::Scalar Scalar;
+ typedef typename Rhs::Index Index;
typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
static void run(const Lhs& lhs, Rhs& rhs)
{
const ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
- ei_triangular_solve_matrix<Scalar,Side,Mode,LhsProductTraits::NeedToConjugate,StorageOrder,
+ ei_triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,StorageOrder,
(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeff(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride());
}
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index c2ce937..d2bed92 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -54,15 +54,15 @@
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const
{
- const int blockSize = 4096;
+ const Index blockSize = 4096;
RealScalar scale = 0;
RealScalar invScale = 1;
RealScalar ssq = 0; // sum of square
enum {
Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
};
- int n = size();
- int bi = ei_first_aligned(derived());
+ Index n = size();
+ Index bi = ei_first_aligned(derived());
if (bi>0)
ei_stable_norm_kernel(this->head(bi), ssq, scale, invScale);
for (; bi<n; bi+=blockSize)
@@ -83,11 +83,11 @@
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const
{
- static int nmax = -1;
+ static Index nmax = -1;
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
if(nmax <= 0)
{
- int nbig, ibeta, it, iemin, iemax, iexp;
+ Index nbig, ibeta, it, iemin, iemax, iexp;
RealScalar abig, eps;
// This program calculates the machine-dependent constants
// bl, b2, slm, s2m, relerr overfl, nmax
@@ -97,7 +97,7 @@
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment
// statements can be replaced
- nbig = std::numeric_limits<int>::max(); // largest integer
+ nbig = std::numeric_limits<Index>::max(); // largest integer
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
@@ -121,12 +121,12 @@
if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n
else nmax = nbig;
}
- int n = size();
+ Index n = size();
RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
- for(int j=0; j<n; ++j)
+ for(Index j=0; j<n; ++j)
{
RealScalar ax = ei_abs(coeff(j));
if(ax > ab2) abig += ei_abs2(ax*s2m);
diff --git a/Eigen/src/Core/Stride.h b/Eigen/src/Core/Stride.h
index d960dd2..afae034 100644
--- a/Eigen/src/Core/Stride.h
+++ b/Eigen/src/Core/Stride.h
@@ -86,8 +86,8 @@
inline int inner() const { return m_inner.value(); }
protected:
- ei_int_if_dynamic<OuterStrideAtCompileTime> m_outer;
- ei_int_if_dynamic<InnerStrideAtCompileTime> m_inner;
+ ei_variable_if_dynamic<int, OuterStrideAtCompileTime> m_outer;
+ ei_variable_if_dynamic<int, InnerStrideAtCompileTime> m_inner;
};
/** \brief Convenience specialization of Stride to specify only an inner stride */
diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h
index 02ff8ad..8e5994a 100644
--- a/Eigen/src/Core/Swap.h
+++ b/Eigen/src/Core/Swap.h
@@ -45,23 +45,23 @@
inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int outerStride() const { return m_expression.outerStride(); }
- inline int innerStride() const { return m_expression.innerStride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<typename OtherDerived>
- void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other)
+ void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows()
@@ -72,7 +72,7 @@
}
template<typename OtherDerived>
- void copyCoeff(int index, const DenseBase<OtherDerived>& other)
+ void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_expression.size());
@@ -82,7 +82,7 @@
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int row, int col, const DenseBase<OtherDerived>& other)
+ void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows()
@@ -95,7 +95,7 @@
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int index, const DenseBase<OtherDerived>& other)
+ void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_expression.size());
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h
index 6cb8642..38d942e 100644
--- a/Eigen/src/Core/Transpose.h
+++ b/Eigen/src/Core/Transpose.h
@@ -72,8 +72,8 @@
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
- inline int rows() const { return m_matrix.cols(); }
- inline int cols() const { return m_matrix.rows(); }
+ inline Index rows() const { return m_matrix.cols(); }
+ inline Index cols() const { return m_matrix.rows(); }
/** \returns the nested expression */
const typename ei_cleantype<typename MatrixType::Nested>::type&
@@ -107,51 +107,51 @@
typedef typename ei_TransposeImpl_base<MatrixType>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
- inline int innerStride() const { return derived().nestedExpression().innerStride(); }
- inline int outerStride() const { return derived().nestedExpression().outerStride(); }
+ inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
+ inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
inline Scalar* data() { return derived().nestedExpression().data(); }
inline const Scalar* data() const { return derived().nestedExpression().data(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return const_cast_derived().nestedExpression().coeffRef(col, row);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return const_cast_derived().nestedExpression().coeffRef(index);
}
- inline const CoeffReturnType coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return derived().nestedExpression().coeff(col, row);
}
- inline const CoeffReturnType coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return derived().nestedExpression().coeff(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return derived().nestedExpression().template packet<LoadMode>(col, row);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
const_cast_derived().nestedExpression().template writePacket<LoadMode>(col, row, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return derived().nestedExpression().template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
const_cast_derived().nestedExpression().template writePacket<LoadMode>(index, x);
}
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index 3eb52a5..47c11ce 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -45,31 +45,33 @@
MaxColsAtCompileTime = ei_traits<Derived>::MaxColsAtCompileTime
};
typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
- inline int rows() const { return derived().rows(); }
- inline int cols() const { return derived().cols(); }
- inline int outerStride() const { return derived().outerStride(); }
- inline int innerStride() const { return derived().innerStride(); }
+ inline Index rows() const { return derived().rows(); }
+ inline Index cols() const { return derived().cols(); }
+ inline Index outerStride() const { return derived().outerStride(); }
+ inline Index innerStride() const { return derived().innerStride(); }
- inline Scalar coeff(int row, int col) const { return derived().coeff(row,col); }
- inline Scalar& coeffRef(int row, int col) { return derived().coeffRef(row,col); }
+ inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); }
+ inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
/** \see MatrixBase::copyCoeff(row,col)
*/
template<typename Other>
- EIGEN_STRONG_INLINE void copyCoeff(int row, int col, Other& other)
+ EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
{
derived().coeffRef(row, col) = other.coeff(row, col);
}
- inline Scalar operator()(int row, int col) const
+ inline Scalar operator()(Index row, Index col) const
{
check_coordinates(row, col);
return coeff(row,col);
}
- inline Scalar& operator()(int row, int col)
+ inline Scalar& operator()(Index row, Index col)
{
check_coordinates(row, col);
return coeffRef(row,col);
@@ -87,7 +89,7 @@
protected:
- void check_coordinates(int row, int col)
+ void check_coordinates(Index row, Index col)
{
EIGEN_ONLY_USED_FOR_DEBUG(row);
EIGEN_ONLY_USED_FOR_DEBUG(col);
@@ -99,12 +101,12 @@
}
#ifdef EIGEN_INTERNAL_DEBUGGING
- void check_coordinates_internal(int row, int col)
+ void check_coordinates_internal(Index row, Index col)
{
check_coordinates(row, col);
}
#else
- void check_coordinates_internal(int , int ) {}
+ void check_coordinates_internal(Index , Index ) {}
#endif
};
@@ -156,6 +158,9 @@
typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested;
using TriangularBase<TriangularView<_MatrixType, _Mode> >::evalToLazy;
+ typedef typename ei_traits<TriangularView>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+
enum {
Mode = _Mode,
TransposeMode = (Mode & Upper ? Lower : 0)
@@ -167,10 +172,10 @@
inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
{ ei_assert(ei_are_flags_consistent<Mode>::ret); }
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int outerStride() const { return m_matrix.outerStride(); }
- inline int innerStride() const { return m_matrix.innerStride(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
/** \sa MatrixBase::operator+=() */
template<typename Other> TriangularView& operator+=(const Other& other) { return *this = m_matrix + other; }
@@ -194,7 +199,7 @@
/** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part
*/
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
Base::check_coordinates_internal(row, col);
return m_matrix.coeff(row, col);
@@ -203,7 +208,7 @@
/** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part
*/
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col);
@@ -371,15 +376,16 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- int maxi = std::min(j, dst.rows()-1);
- for(int i = 0; i <= maxi; ++i)
+ Index maxi = std::min(j, dst.rows()-1);
+ for(Index i = 0; i <= maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
- for(int i = maxi+1; i < dst.rows(); ++i)
+ for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -388,15 +394,16 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- for(int i = j; i < dst.rows(); ++i)
+ for(Index i = j; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
- int maxi = std::min(j, dst.rows());
+ Index maxi = std::min(j, dst.rows());
if (ClearOpposite)
- for(int i = 0; i < maxi; ++i)
+ for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -405,15 +412,16 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- int maxi = std::min(j, dst.rows());
- for(int i = 0; i < maxi; ++i)
+ Index maxi = std::min(j, dst.rows());
+ for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
- for(int i = maxi; i < dst.rows(); ++i)
+ for(Index i = maxi; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -422,15 +430,16 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- for(int i = j+1; i < dst.rows(); ++i)
+ for(Index i = j+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
- int maxi = std::min(j, dst.rows()-1);
+ Index maxi = std::min(j, dst.rows()-1);
if (ClearOpposite)
- for(int i = 0; i <= maxi; ++i)
+ for(Index i = 0; i <= maxi; ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -439,16 +448,17 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- int maxi = std::min(j, dst.rows());
- for(int i = 0; i < maxi; ++i)
+ Index maxi = std::min(j, dst.rows());
+ for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
{
- for(int i = maxi+1; i < dst.rows(); ++i)
+ for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -458,16 +468,17 @@
template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
{
+ typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src)
{
- for(int j = 0; j < dst.cols(); ++j)
+ for(Index j = 0; j < dst.cols(); ++j)
{
- int maxi = std::min(j, dst.rows());
- for(int i = maxi+1; i < dst.rows(); ++i)
+ Index maxi = std::min(j, dst.rows());
+ for(Index i = maxi+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src);
if (ClearOpposite)
{
- for(int i = 0; i < maxi; ++i)
+ for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = 0;
}
}
@@ -638,18 +649,18 @@
bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
{
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
+ for(Index j = 0; j < cols(); ++j)
{
- int maxi = std::min(j, rows()-1);
- for(int i = 0; i <= maxi; ++i)
+ Index maxi = std::min(j, rows()-1);
+ for(Index i = 0; i <= maxi; ++i)
{
RealScalar absValue = ei_abs(coeff(i,j));
if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
}
}
RealScalar threshold = maxAbsOnUpperPart * prec;
- for(int j = 0; j < cols(); ++j)
- for(int i = j+1; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = j+1; i < rows(); ++i)
if(ei_abs(coeff(i, j)) > threshold) return false;
return true;
}
@@ -663,17 +674,17 @@
bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
{
RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
- for(int i = j; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = j; i < rows(); ++i)
{
RealScalar absValue = ei_abs(coeff(i,j));
if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
}
RealScalar threshold = maxAbsOnLowerPart * prec;
- for(int j = 1; j < cols(); ++j)
+ for(Index j = 1; j < cols(); ++j)
{
- int maxi = std::min(j, rows()-1);
- for(int i = 0; i < maxi; ++i)
+ Index maxi = std::min(j, rows()-1);
+ for(Index i = 0; i < maxi; ++i)
if(ei_abs(coeff(i, j)) > threshold) return false;
}
return true;
diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h
index adb69b6..c3212b8 100644
--- a/Eigen/src/Core/VectorBlock.h
+++ b/Eigen/src/Core/VectorBlock.h
@@ -34,7 +34,7 @@
* \param Size size of the sub-vector we are taking at compile time (optional)
*
* This class represents an expression of either a fixed-size or dynamic-size sub-vector.
- * It is the return type of DenseBase::segment(int,int) and DenseBase::segment<int>(int) and
+ * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
* most of the time this is the only way it is used.
*
* However, if you want to directly maniputate sub-vector expressions,
@@ -53,7 +53,7 @@
* \include class_FixedVectorBlock.cpp
* Output: \verbinclude class_FixedVectorBlock.out
*
- * \sa class Block, DenseBase::segment(int,int,int,int), DenseBase::segment(int,int)
+ * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
*/
template<typename VectorType, int Size>
struct ei_traits<VectorBlock<VectorType, Size> >
@@ -81,7 +81,7 @@
/** Dynamic-size constructor
*/
- inline VectorBlock(const VectorType& vector, int start, int size)
+ inline VectorBlock(const VectorType& vector, Index start, Index size)
: Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start,
IsColVector ? size : 1, IsColVector ? 1 : size)
@@ -91,7 +91,7 @@
/** Fixed-size constructor
*/
- inline VectorBlock(const VectorType& vector, int start)
+ inline VectorBlock(const VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
@@ -113,20 +113,20 @@
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
- * \sa class Block, segment(int)
+ * \sa class Block, segment(Index)
*/
template<typename Derived>
inline VectorBlock<Derived> DenseBase<Derived>
- ::segment(int start, int size)
+ ::segment(Index start, Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), start, size);
}
-/** This is the const version of segment(int,int).*/
+/** This is the const version of segment(Index,Index).*/
template<typename Derived>
inline const VectorBlock<Derived>
-DenseBase<Derived>::segment(int start, int size) const
+DenseBase<Derived>::segment(Index start, Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), start, size);
@@ -145,20 +145,20 @@
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
- * \sa class Block, block(int,int)
+ * \sa class Block, block(Index,Index)
*/
template<typename Derived>
inline VectorBlock<Derived>
-DenseBase<Derived>::head(int size)
+DenseBase<Derived>::head(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size);
}
-/** This is the const version of head(int).*/
+/** This is the const version of head(Index).*/
template<typename Derived>
inline const VectorBlock<Derived>
-DenseBase<Derived>::head(int size) const
+DenseBase<Derived>::head(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size);
@@ -177,20 +177,20 @@
* when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
- * \sa class Block, block(int,int)
+ * \sa class Block, block(Index,Index)
*/
template<typename Derived>
inline VectorBlock<Derived>
-DenseBase<Derived>::tail(int size)
+DenseBase<Derived>::tail(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size);
}
-/** This is the const version of tail(int).*/
+/** This is the const version of tail(Index).*/
template<typename Derived>
inline const VectorBlock<Derived>
-DenseBase<Derived>::tail(int size) const
+DenseBase<Derived>::tail(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size);
@@ -212,17 +212,17 @@
template<typename Derived>
template<int Size>
inline VectorBlock<Derived,Size>
-DenseBase<Derived>::segment(int start)
+DenseBase<Derived>::segment(Index start)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived,Size>(derived(), start);
}
-/** This is the const version of segment<int>(int).*/
+/** This is the const version of segment<int>(Index).*/
template<typename Derived>
template<int Size>
inline const VectorBlock<Derived,Size>
-DenseBase<Derived>::segment(int start) const
+DenseBase<Derived>::segment(Index start) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived,Size>(derived(), start);
diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h
index e6f02b7..2e96cfe 100644
--- a/Eigen/src/Core/Visitor.h
+++ b/Eigen/src/Core/Visitor.h
@@ -52,13 +52,14 @@
template<typename Visitor, typename Derived>
struct ei_visitor_impl<Visitor, Derived, Dynamic>
{
+ typedef typename Derived::Index Index;
inline static void run(const Derived& mat, Visitor& visitor)
{
visitor.init(mat.coeff(0,0), 0, 0);
- for(int i = 1; i < mat.rows(); ++i)
+ for(Index i = 1; i < mat.rows(); ++i)
visitor(mat.coeff(i, 0), i, 0);
- for(int j = 1; j < mat.cols(); ++j)
- for(int i = 0; i < mat.rows(); ++i)
+ for(Index j = 1; j < mat.cols(); ++j)
+ for(Index i = 0; i < mat.rows(); ++i)
visitor(mat.coeff(i, j), i, j);
}
};
@@ -70,16 +71,16 @@
* \code
* struct MyVisitor {
* // called for the first coefficient
- * void init(const Scalar& value, int i, int j);
+ * void init(const Scalar& value, Index i, Index j);
* // called for all other coefficients
- * void operator() (const Scalar& value, int i, int j);
+ * void operator() (const Scalar& value, Index i, Index j);
* };
* \endcode
*
* \note compared to one or two \em for \em loops, visitors offer automatic
* unrolling for small fixed size matrix.
*
- * \sa minCoeff(int*,int*), maxCoeff(int*,int*), DenseBase::redux()
+ * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
*/
template<typename Derived>
template<typename Visitor>
@@ -96,12 +97,14 @@
/** \internal
* \brief Base class to implement min and max visitors
*/
-template <typename Scalar>
+template <typename Derived>
struct ei_coeff_visitor
{
- int row, col;
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ Index row, col;
Scalar res;
- inline void init(const Scalar& value, int i, int j)
+ inline void init(const Scalar& value, Index i, Index j)
{
res = value;
row = i;
@@ -112,12 +115,14 @@
/** \internal
* \brief Visitor computing the min coefficient with its value and coordinates
*
- * \sa DenseBase::minCoeff(int*, int*)
+ * \sa DenseBase::minCoeff(Index*, Index*)
*/
-template <typename Scalar>
-struct ei_min_coeff_visitor : ei_coeff_visitor<Scalar>
+template <typename Derived>
+struct ei_min_coeff_visitor : ei_coeff_visitor<Derived>
{
- void operator() (const Scalar& value, int i, int j)
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ void operator() (const Scalar& value, Index i, Index j)
{
if(value < this->res)
{
@@ -138,12 +143,14 @@
/** \internal
* \brief Visitor computing the max coefficient with its value and coordinates
*
- * \sa DenseBase::maxCoeff(int*, int*)
+ * \sa DenseBase::maxCoeff(Index*, Index*)
*/
-template <typename Scalar>
-struct ei_max_coeff_visitor : ei_coeff_visitor<Scalar>
+template <typename Derived>
+struct ei_max_coeff_visitor : ei_coeff_visitor<Derived>
{
- void operator() (const Scalar& value, int i, int j)
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ void operator() (const Scalar& value, Index i, Index j)
{
if(value > this->res)
{
@@ -164,13 +171,13 @@
/** \returns the minimum of all coefficients of *this
* and puts in *row and *col its location.
*
- * \sa DenseBase::minCoeff(int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff()
+ * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/
template<typename Derived>
typename ei_traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(int* row, int* col) const
+DenseBase<Derived>::minCoeff(Index* row, Index* col) const
{
- ei_min_coeff_visitor<Scalar> minVisitor;
+ ei_min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*row = minVisitor.row;
if (col) *col = minVisitor.col;
@@ -180,14 +187,14 @@
/** \returns the minimum of all coefficients of *this
* and puts in *index its location.
*
- * \sa DenseBase::minCoeff(int*,int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff()
+ * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/
template<typename Derived>
typename ei_traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(int* index) const
+DenseBase<Derived>::minCoeff(Index* index) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_min_coeff_visitor<Scalar> minVisitor;
+ ei_min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
return minVisitor.res;
@@ -196,13 +203,13 @@
/** \returns the maximum of all coefficients of *this
* and puts in *row and *col its location.
*
- * \sa DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff()
+ * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
typename ei_traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(int* row, int* col) const
+DenseBase<Derived>::maxCoeff(Index* row, Index* col) const
{
- ei_max_coeff_visitor<Scalar> maxVisitor;
+ ei_max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*row = maxVisitor.row;
if (col) *col = maxVisitor.col;
@@ -212,14 +219,14 @@
/** \returns the maximum of all coefficients of *this
* and puts in *index its location.
*
- * \sa DenseBase::maxCoeff(int*,int*), DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff()
+ * \sa DenseBase::maxCoeff(Index*,Index*), DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
typename ei_traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(int* index) const
+DenseBase<Derived>::maxCoeff(Index* index) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_max_coeff_visitor<Scalar> maxVisitor;
+ ei_max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
return maxVisitor.res;
diff --git a/Eigen/src/Core/products/CoeffBasedProduct.h b/Eigen/src/Core/products/CoeffBasedProduct.h
index 2f7b32c..0c39cbd 100644
--- a/Eigen/src/Core/products/CoeffBasedProduct.h
+++ b/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -39,10 +39,10 @@
* Note that here the inner-loops should always be unrolled.
*/
-template<int Traversal, int Index, typename Lhs, typename Rhs, typename RetScalar>
+template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl;
-template<int StorageOrder, int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
+template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl;
template<typename LhsNested, typename RhsNested, int NestingFlags>
@@ -159,10 +159,10 @@
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
Scalar res;
ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
@@ -172,17 +172,17 @@
/* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
* which is why we don't set the LinearAccessBit.
*/
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
Scalar res;
- const int row = RowsAtCompileTime == 1 ? 0 : index;
- const int col = RowsAtCompileTime == 1 ? index : 0;
+ const Index row = RowsAtCompileTime == 1 ? 0 : index;
+ const Index col = RowsAtCompileTime == 1 ? index : 0;
ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
return res;
}
template<int LoadMode>
- EIGEN_STRONG_INLINE const PacketScalar packet(int row, int col) const
+ EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const
{
PacketScalar res;
ei_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
@@ -205,11 +205,11 @@
const Diagonal<LazyCoeffBasedProductType,0> diagonal() const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
- template<int Index>
- const Diagonal<LazyCoeffBasedProductType,Index> diagonal() const
+ template<int DiagonalIndex>
+ const Diagonal<LazyCoeffBasedProductType,DiagonalIndex> diagonal() const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
- const Diagonal<LazyCoeffBasedProductType,Dynamic> diagonal(int index) const
+ const Diagonal<LazyCoeffBasedProductType,Dynamic> diagonal(Index index) const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); }
protected:
@@ -235,20 +235,22 @@
*** Scalar path - no vectorization ***
**************************************/
-template<int Index, typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<DefaultTraversal, Index, Lhs, Rhs, RetScalar>
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct ei_product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
- ei_product_coeff_impl<DefaultTraversal, Index-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
- res += lhs.coeff(row, Index) * rhs.coeff(Index, col);
+ ei_product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
+ res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
}
@@ -257,11 +259,12 @@
template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
{
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
- for(int i = 1; i < lhs.cols(); ++i)
+ for(Index i = 1; i < lhs.cols(); ++i)
res += lhs.coeff(row, i) * rhs.coeff(i, col);
}
};
@@ -270,43 +273,47 @@
template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, -1, Lhs, Rhs, RetScalar>
{
- EIGEN_STRONG_INLINE static void run(int, int, const Lhs&, const Rhs&, RetScalar&) {}
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index, Index, const Lhs&, const Rhs&, RetScalar&) {}
};
/*******************************************
*** Scalar path with inner vectorization ***
*******************************************/
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar>
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar>
struct ei_product_coeff_vectorized_unroller
{
+ typedef typename Lhs::Index Index;
enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
{
- ei_product_coeff_vectorized_unroller<Index-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
- pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, Index) , rhs.template packet<Aligned>(Index, col) ));
+ ei_product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
+ pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
}
};
template<typename Lhs, typename Rhs, typename PacketScalar>
struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
{
pres = ei_pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
}
};
-template<int Index, typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<InnerVectorizedTraversal, Index, Lhs, Rhs, RetScalar>
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct ei_product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::PacketScalar PacketScalar;
+ typedef typename Lhs::Index Index;
enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
PacketScalar pres;
- ei_product_coeff_vectorized_unroller<Index+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
- ei_product_coeff_impl<DefaultTraversal,Index,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
+ ei_product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
+ ei_product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
res = ei_predux(pres);
}
};
@@ -314,7 +321,8 @@
template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
struct ei_product_coeff_vectorized_dyn_selector
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{
res = lhs.row(row).cwiseProduct(rhs.col(col)).sum();
}
@@ -325,7 +333,8 @@
template<typename Lhs, typename Rhs, int RhsCols>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
{
- EIGEN_STRONG_INLINE static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{
res = lhs.cwiseProduct(rhs.col(col)).sum();
}
@@ -334,7 +343,8 @@
template<typename Lhs, typename Rhs, int LhsRows>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
{
- EIGEN_STRONG_INLINE static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{
res = lhs.row(row).cwiseProduct(rhs).sum();
}
@@ -343,7 +353,8 @@
template<typename Lhs, typename Rhs>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
{
- EIGEN_STRONG_INLINE static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{
res = lhs.cwiseProduct(rhs).sum();
}
@@ -352,7 +363,8 @@
template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{
ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
}
@@ -362,30 +374,33 @@
*** Packet path ***
*******************/
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<RowMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
+struct ei_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{
- ei_product_packet_impl<RowMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
- res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet<LoadMode>(Index, col), res);
+ ei_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
+ res = ei_pmadd(ei_pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
}
};
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<ColMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
+struct ei_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{
- ei_product_packet_impl<ColMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
- res = ei_pmadd(lhs.template packet<LoadMode>(row, Index), ei_pset1(rhs.coeff(Index, col)), res);
+ ei_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
+ res = ei_pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), ei_pset1(rhs.coeff(UnrollingIndex, col)), res);
}
};
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{
res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
}
@@ -394,7 +409,8 @@
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{
res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
}
@@ -403,11 +419,12 @@
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
{
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
- for(int i = 1; i < lhs.cols(); ++i)
+ for(Index i = 1; i < lhs.cols(); ++i)
res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
}
};
@@ -415,11 +432,12 @@
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
{
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
- for(int i = 1; i < lhs.cols(); ++i)
+ for(Index i = 1; i < lhs.cols(); ++i)
res = ei_pmadd(lhs.template packet<LoadMode>(row, i), ei_pset1(rhs.coeff(i, col)), res);
}
};
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index bc697ce..d817155 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -35,40 +35,40 @@
#endif
// optimized GEneral packed Block * packed Panel product kernel
-template<typename Scalar, int mr, int nr, typename Conj>
+template<typename Scalar, typename Index, int mr, int nr, typename Conj>
struct ei_gebp_kernel
{
- void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int rows, int depth, int cols,
- int strideA=-1, int strideB=-1, int offsetA=0, int offsetB=0, Scalar* unpackedB = 0)
+ void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, Scalar* unpackedB = 0)
{
typedef typename ei_packet_traits<Scalar>::type PacketType;
enum { PacketSize = ei_packet_traits<Scalar>::size };
if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth;
Conj cj;
- int packet_cols = (cols/nr) * nr;
- const int peeled_mc = (rows/mr)*mr;
- const int peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0);
- const int peeled_kc = (depth/4)*4;
+ Index packet_cols = (cols/nr) * nr;
+ const Index peeled_mc = (rows/mr)*mr;
+ const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0);
+ const Index peeled_kc = (depth/4)*4;
if(unpackedB==0)
unpackedB = const_cast<Scalar*>(blockB - strideB * nr * PacketSize);
// loops on each micro vertical panel of rhs (depth x nr)
- for(int j2=0; j2<packet_cols; j2+=nr)
+ for(Index j2=0; j2<packet_cols; j2+=nr)
{
// unpack B
{
const Scalar* blB = &blockB[j2*strideB+offsetB*nr];
- int n = depth*nr;
- for(int k=0; k<n; k++)
+ Index n = depth*nr;
+ for(Index k=0; k<n; k++)
ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k]));
/*Scalar* dest = unpackedB;
- for(int k=0; k<n; k+=4*PacketSize)
+ for(Index k=0; k<n; k+=4*PacketSize)
{
#ifdef EIGEN_VECTORIZE_SSE
- const int S = 128;
- const int G = 16;
+ const Index S = 128;
+ const Index G = 16;
_mm_prefetch((const char*)(&blB[S/2+0]), _MM_HINT_T0);
_mm_prefetch((const char*)(&dest[S+0*G]), _MM_HINT_T0);
_mm_prefetch((const char*)(&dest[S+1*G]), _MM_HINT_T0);
@@ -114,7 +114,7 @@
// loops on each micro horizontal panel of lhs (mr x depth)
// => we select a mr x nr micro block of res which is entirely
// stored into mr/packet_size x nr registers.
- for(int i=0; i<peeled_mc; i+=mr)
+ for(Index i=0; i<peeled_mc; i+=mr)
{
const Scalar* blA = &blockA[i*strideA+offsetA*mr];
ei_prefetch(&blA[0]);
@@ -146,7 +146,7 @@
// TODO let's check wether the folowing peeled loop could not be
// optimized via optimal prefetching from one loop to the other
const Scalar* blB = unpackedB;
- for(int k=0; k<peeled_kc; k+=4)
+ for(Index k=0; k<peeled_kc; k+=4)
{
if(nr==2)
{
@@ -257,7 +257,7 @@
blA += 4*mr;
}
// process remaining peeled loop
- for(int k=peeled_kc; k<depth; k++)
+ for(Index k=peeled_kc; k<depth; k++)
{
if(nr==2)
{
@@ -328,7 +328,7 @@
}
if(rows-peeled_mc>=PacketSize)
{
- int i = peeled_mc;
+ Index i = peeled_mc;
const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize];
ei_prefetch(&blA[0]);
@@ -341,7 +341,7 @@
// performs "inner" product
const Scalar* blB = unpackedB;
- for(int k=0; k<peeled_kc; k+=4)
+ for(Index k=0; k<peeled_kc; k+=4)
{
if(nr==2)
{
@@ -417,7 +417,7 @@
blA += 4*PacketSize;
}
// process remaining peeled loop
- for(int k=peeled_kc; k<depth; k++)
+ for(Index k=peeled_kc; k<depth; k++)
{
if(nr==2)
{
@@ -455,7 +455,7 @@
if(nr==4) ei_pstoreu(&res[(j2+2)*resStride + i], C2);
if(nr==4) ei_pstoreu(&res[(j2+3)*resStride + i], C3);
}
- for(int i=peeled_mc2; i<rows; i++)
+ for(Index i=peeled_mc2; i<rows; i++)
{
const Scalar* blA = &blockA[i*strideA+offsetA];
ei_prefetch(&blA[0]);
@@ -464,7 +464,7 @@
Scalar C0(0), C1(0), C2(0), C3(0);
// TODO directly use blockB ???
const Scalar* blB = unpackedB;//&blockB[j2*strideB+offsetB*nr];
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
if(nr==2)
{
@@ -504,16 +504,16 @@
// process remaining rhs/res columns one at a time
// => do the same but with nr==1
- for(int j2=packet_cols; j2<cols; j2++)
+ for(Index j2=packet_cols; j2<cols; j2++)
{
// unpack B
{
const Scalar* blB = &blockB[j2*strideB+offsetB];
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k]));
}
- for(int i=0; i<peeled_mc; i+=mr)
+ for(Index i=0; i<peeled_mc; i+=mr)
{
const Scalar* blA = &blockA[i*strideA+offsetA*mr];
ei_prefetch(&blA[0]);
@@ -526,7 +526,7 @@
C4 = ei_ploadu(&res[(j2+0)*resStride + i + PacketSize]);
const Scalar* blB = unpackedB;
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
PacketType B0, A0, A1, T0, T1;
@@ -545,14 +545,14 @@
}
if(rows-peeled_mc>=PacketSize)
{
- int i = peeled_mc;
+ Index i = peeled_mc;
const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize];
ei_prefetch(&blA[0]);
PacketType C0 = ei_ploadu(&res[(j2+0)*resStride + i]);
const Scalar* blB = unpackedB;
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
C0 = cj.pmadd(ei_pload(blA), ei_pload(blB), C0);
blB += PacketSize;
@@ -561,7 +561,7 @@
ei_pstoreu(&res[(j2+0)*resStride + i], C0);
}
- for(int i=peeled_mc2; i<rows; i++)
+ for(Index i=peeled_mc2; i<rows; i++)
{
const Scalar* blA = &blockA[i*strideA+offsetA];
ei_prefetch(&blA[0]);
@@ -570,7 +570,7 @@
Scalar C0(0);
// FIXME directly use blockB ??
const Scalar* blB = unpackedB;
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
C0 = cj.pmadd(blA[k], blB[k*PacketSize], C0);
res[(j2+0)*resStride + i] += C0;
}
@@ -594,39 +594,39 @@
//
// 32 33 34 35 ...
// 36 36 38 39 ...
-template<typename Scalar, int mr, int StorageOrder, bool Conjugate, bool PanelMode>
+template<typename Scalar, typename Index, int mr, int StorageOrder, bool Conjugate, bool PanelMode>
struct ei_gemm_pack_lhs
{
- void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, int lhsStride, int depth, int rows,
- int stride=0, int offset=0)
+ void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
+ Index stride=0, Index offset=0)
{
enum { PacketSize = ei_packet_traits<Scalar>::size };
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
ei_conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- ei_const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
- int count = 0;
- int peeled_mc = (rows/mr)*mr;
- for(int i=0; i<peeled_mc; i+=mr)
+ ei_const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+ Index count = 0;
+ Index peeled_mc = (rows/mr)*mr;
+ for(Index i=0; i<peeled_mc; i+=mr)
{
if(PanelMode) count += mr * offset;
- for(int k=0; k<depth; k++)
- for(int w=0; w<mr; w++)
+ for(Index k=0; k<depth; k++)
+ for(Index w=0; w<mr; w++)
blockA[count++] = cj(lhs(i+w, k));
if(PanelMode) count += mr * (stride-offset-depth);
}
if(rows-peeled_mc>=PacketSize)
{
if(PanelMode) count += PacketSize*offset;
- for(int k=0; k<depth; k++)
- for(int w=0; w<PacketSize; w++)
+ for(Index k=0; k<depth; k++)
+ for(Index w=0; w<PacketSize; w++)
blockA[count++] = cj(lhs(peeled_mc+w, k));
if(PanelMode) count += PacketSize * (stride-offset-depth);
peeled_mc += PacketSize;
}
- for(int i=peeled_mc; i<rows; i++)
+ for(Index i=peeled_mc; i<rows; i++)
{
if(PanelMode) count += offset;
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
blockA[count++] = cj(lhs(i, k));
if(PanelMode) count += (stride-offset-depth);
}
@@ -640,19 +640,19 @@
// 4 5 6 7 16 17 18 19 25 28
// 8 9 10 11 20 21 22 23 26 29
// . . . . . . . . . .
-template<typename Scalar, int nr, bool PanelMode>
-struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
+template<typename Scalar, typename Index, int nr, bool PanelMode>
+struct ei_gemm_pack_rhs<Scalar, Index, nr, ColMajor, PanelMode>
{
typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size };
- void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols,
- int stride=0, int offset=0)
+ void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols,
+ Index stride=0, Index offset=0)
{
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
bool hasAlpha = alpha != Scalar(1);
- int packet_cols = (cols/nr) * nr;
- int count = 0;
- for(int j2=0; j2<packet_cols; j2+=nr)
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
{
// skip what we have before
if(PanelMode) count += nr * offset;
@@ -661,7 +661,7 @@
const Scalar* b2 = &rhs[(j2+2)*rhsStride];
const Scalar* b3 = &rhs[(j2+3)*rhsStride];
if (hasAlpha)
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
blockB[count+0] = alpha*b0[k];
blockB[count+1] = alpha*b1[k];
@@ -670,7 +670,7 @@
count += nr;
}
else
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
blockB[count+0] = b0[k];
blockB[count+1] = b1[k];
@@ -683,18 +683,18 @@
}
// copy the remaining columns one at a time (nr==1)
- for(int j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols; j2<cols; ++j2)
{
if(PanelMode) count += offset;
const Scalar* b0 = &rhs[(j2+0)*rhsStride];
if (hasAlpha)
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
blockB[count] = alpha*b0[k];
count += 1;
}
else
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
blockB[count] = b0[k];
count += 1;
@@ -705,24 +705,24 @@
};
// this version is optimized for row major matrices
-template<typename Scalar, int nr, bool PanelMode>
-struct ei_gemm_pack_rhs<Scalar, nr, RowMajor, PanelMode>
+template<typename Scalar, typename Index, int nr, bool PanelMode>
+struct ei_gemm_pack_rhs<Scalar, Index, nr, RowMajor, PanelMode>
{
enum { PacketSize = ei_packet_traits<Scalar>::size };
- void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols,
- int stride=0, int offset=0)
+ void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols,
+ Index stride=0, Index offset=0)
{
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
bool hasAlpha = alpha != Scalar(1);
- int packet_cols = (cols/nr) * nr;
- int count = 0;
- for(int j2=0; j2<packet_cols; j2+=nr)
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
{
// skip what we have before
if(PanelMode) count += nr * offset;
if (hasAlpha)
{
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
const Scalar* b0 = &rhs[k*rhsStride + j2];
blockB[count+0] = alpha*b0[0];
@@ -734,7 +734,7 @@
}
else
{
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
const Scalar* b0 = &rhs[k*rhsStride + j2];
blockB[count+0] = b0[0];
@@ -748,11 +748,11 @@
if(PanelMode) count += nr * (stride-offset-depth);
}
// copy the remaining columns one at a time (nr==1)
- for(int j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols; j2<cols; ++j2)
{
if(PanelMode) count += offset;
const Scalar* b0 = &rhs[j2];
- for(int k=0; k<depth; k++)
+ for(Index k=0; k<depth; k++)
{
blockB[count] = alpha*b0[k*rhsStride];
count += 1;
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
index 7bb26c1..991977c 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -29,21 +29,21 @@
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
- typename Scalar,
+ typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_general_matrix_matrix_product<Scalar,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor>
+struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
- int rows, int cols, int depth,
- const Scalar* lhs, int lhsStride,
- const Scalar* rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index rows, Index cols, Index depth,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha,
- GemmParallelInfo<Scalar>* info = 0)
+ GemmParallelInfo<Scalar, Index>* info = 0)
{
// transpose the product such that the result is column major
- ei_general_matrix_matrix_product<Scalar,
+ ei_general_matrix_matrix_product<Scalar, Index,
RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
@@ -56,20 +56,20 @@
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
- typename Scalar,
+ typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_general_matrix_matrix_product<Scalar,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor>
+struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor>
{
-static void run(int rows, int cols, int depth,
- const Scalar* _lhs, int lhsStride,
- const Scalar* _rhs, int rhsStride,
- Scalar* res, int resStride,
+static void run(Index rows, Index cols, Index depth,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha,
- GemmParallelInfo<Scalar>* info = 0)
+ GemmParallelInfo<Scalar,Index>* info = 0)
{
- ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride);
- ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
@@ -77,19 +77,19 @@
typedef typename ei_packet_traits<Scalar>::type PacketType;
typedef ei_product_blocking_traits<Scalar> Blocking;
- int kc = std::min<int>(Blocking::Max_kc,depth); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc,depth); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
- ei_gemm_pack_rhs<Scalar, Blocking::nr, RhsStorageOrder> pack_rhs;
- ei_gemm_pack_lhs<Scalar, Blocking::mr, LhsStorageOrder> pack_lhs;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, RhsStorageOrder> pack_rhs;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, LhsStorageOrder> pack_lhs;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
- int tid = omp_get_thread_num();
- int threads = omp_get_num_threads();
+ Index tid = omp_get_thread_num();
+ Index threads = omp_get_num_threads();
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8;
@@ -98,9 +98,9 @@
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
- for(int k=0; k<depth; k+=kc)
+ for(Index k=0; k<depth; k+=kc)
{
- const int actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
+ const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
@@ -121,9 +121,9 @@
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
- for(int shift=0; shift<threads; ++shift)
+ for(Index shift=0; shift<threads; ++shift)
{
- int j = (tid+shift)%threads;
+ Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
@@ -135,9 +135,9 @@
}
// Then keep going as usual with the remaining A'
- for(int i=mc; i<rows; i+=mc)
+ for(Index i=mc; i<rows; i+=mc)
{
- const int actual_mc = std::min(i+mc,rows)-i;
+ const Index actual_mc = std::min(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
@@ -148,7 +148,7 @@
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
- for(int j=0; j<threads; ++j)
+ for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
@@ -168,9 +168,9 @@
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
- for(int k2=0; k2<depth; k2+=kc)
+ for(Index k2=0; k2<depth; k2+=kc)
{
- const int actual_kc = std::min(k2+kc,depth)-k2;
+ const Index actual_kc = std::min(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
@@ -181,9 +181,9 @@
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
- for(int i2=0; i2<rows; i2+=mc)
+ for(Index i2=0; i2<rows; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,rows)-i2;
+ const Index actual_mc = std::min(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
@@ -215,7 +215,7 @@
: ei_traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
-template<typename Scalar, typename Gemm, typename Lhs, typename Rhs, typename Dest>
+template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest>
struct ei_gemm_functor
{
typedef typename Rhs::Scalar BlockBScalar;
@@ -224,7 +224,7 @@
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha)
{}
- void operator() (int row, int rows, int col=0, int cols=-1, GemmParallelInfo<BlockBScalar>* info=0) const
+ void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<BlockBScalar,Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
@@ -237,9 +237,9 @@
}
- int sharedBlockBSize() const
+ Index sharedBlockBSize() const
{
- return std::min<int>(ei_product_blocking_traits<Scalar>::Max_kc,m_rhs.rows()) * m_rhs.cols();
+ return std::min<Index>(ei_product_blocking_traits<Scalar>::Max_kc,m_rhs.rows()) * m_rhs.cols();
}
protected:
@@ -273,9 +273,9 @@
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef ei_gemm_functor<
- Scalar,
+ Scalar, Index,
ei_general_matrix_matrix_product<
- Scalar,
+ Scalar, Index,
(_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
(_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h
index 3296f32..e2c5c76 100644
--- a/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -32,11 +32,11 @@
* same alignment pattern.
* TODO: since rhs gets evaluated only once, no need to evaluate it
*/
-template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename RhsType>
+template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename RhsType>
static EIGEN_DONT_INLINE
void ei_cache_friendly_product_colmajor_times_vector(
- int size,
- const Scalar* lhs, int lhsStride,
+ Index size,
+ const Scalar* lhs, Index lhsStride,
const RhsType& rhs,
Scalar* res,
Scalar alpha)
@@ -59,30 +59,30 @@
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = sizeof(Packet)/sizeof(Scalar);
+ const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
- const int columnsAtOnce = 4;
- const int peels = 2;
- const int PacketAlignedMask = PacketSize-1;
- const int PeelAlignedMask = PacketSize*peels-1;
+ const Index columnsAtOnce = 4;
+ const Index peels = 2;
+ const Index PacketAlignedMask = PacketSize-1;
+ const Index PeelAlignedMask = PacketSize*peels-1;
// How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type.
- int alignedStart = ei_first_aligned(res,size);
- int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
- const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+ Index alignedStart = ei_first_aligned(res,size);
+ Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
+ const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
- const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
- int alignmentPattern = alignmentStep==0 ? AllAligned
+ const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
+ Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(PacketSize/2) ? EvenAligned
: FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices
- const int lhsAlignmentOffset = ei_first_aligned(lhs,size);
+ const Index lhsAlignmentOffset = ei_first_aligned(lhs,size);
// find how many columns do we have to skip to be aligned with the result (if possible)
- int skipColumns = 0;
+ Index skipColumns = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(res)%sizeof(RealScalar)) )
{
@@ -114,11 +114,11 @@
|| (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
}
- int offset1 = (FirstAligned && alignmentStep==1?3:1);
- int offset3 = (FirstAligned && alignmentStep==1?1:3);
+ Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+ Index offset3 = (FirstAligned && alignmentStep==1?1:3);
- int columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
- for (int i=skipColumns; i<columnBound; i+=columnsAtOnce)
+ Index columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
+ for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
{
Packet ptmp0 = ei_pset1(alpha*rhs[i]), ptmp1 = ei_pset1(alpha*rhs[i+offset1]),
ptmp2 = ei_pset1(alpha*rhs[i+2]), ptmp3 = ei_pset1(alpha*rhs[i+offset3]);
@@ -131,7 +131,7 @@
{
/* explicit vectorization */
// process initial unaligned coeffs
- for (int j=0; j<alignedStart; ++j)
+ for (Index j=0; j<alignedStart; ++j)
{
res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]);
@@ -144,11 +144,11 @@
switch(alignmentPattern)
{
case AllAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d);
break;
case EvenAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
break;
case FirstAligned:
@@ -160,7 +160,7 @@
A02 = ei_pload(&lhs2[alignedStart-2]);
A03 = ei_pload(&lhs3[alignedStart-3]);
- for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
+ for (Index j = alignedStart; j<peeledSize; j+=peels*PacketSize)
{
A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12);
@@ -184,11 +184,11 @@
ei_pstore(&res[j+PacketSize],A10);
}
}
- for (int j = peeledSize; j<alignedSize; j+=PacketSize)
+ for (Index j = peeledSize; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
break;
default:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
break;
}
@@ -196,7 +196,7 @@
} // end explicit vectorization
/* process remaining coeffs (or all if there is no explicit vectorization) */
- for (int j=alignedSize; j<size; ++j)
+ for (Index j=alignedSize; j<size; ++j)
{
res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]);
@@ -206,11 +206,11 @@
}
// process remaining first and last columns (at most columnsAtOnce-1)
- int end = rhs.size();
- int start = columnBound;
+ Index end = rhs.size();
+ Index start = columnBound;
do
{
- for (int i=start; i<end; ++i)
+ for (Index i=start; i<end; ++i)
{
Packet ptmp0 = ei_pset1(alpha*rhs[i]);
const Scalar* lhs0 = lhs + i*lhsStride;
@@ -219,20 +219,20 @@
{
/* explicit vectorization */
// process first unaligned result's coeffs
- for (int j=0; j<alignedStart; ++j)
+ for (Index j=0; j<alignedStart; ++j)
res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0));
// process aligned result's coeffs
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
+ for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ei_pstore(&res[j], cj.pmadd(ei_pload(&lhs0[j]), ptmp0, ei_pload(&res[j])));
else
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
+ for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ei_pstore(&res[j], cj.pmadd(ei_ploadu(&lhs0[j]), ptmp0, ei_pload(&res[j])));
}
// process remaining scalars (or all if no explicit vectorization)
- for (int j=alignedSize; j<size; ++j)
+ for (Index j=alignedSize; j<size; ++j)
res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0));
}
if (skipColumns)
@@ -248,10 +248,10 @@
}
// TODO add peeling to mask unaligned load/stores
-template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename ResType>
+template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename ResType>
static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
- const Scalar* lhs, int lhsStride,
- const Scalar* rhs, int rhsSize,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsSize,
ResType& res,
Scalar alpha)
{
@@ -270,32 +270,32 @@
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = sizeof(Packet)/sizeof(Scalar);
+ const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
- const int rowsAtOnce = 4;
- const int peels = 2;
- const int PacketAlignedMask = PacketSize-1;
- const int PeelAlignedMask = PacketSize*peels-1;
- const int size = rhsSize;
+ const Index rowsAtOnce = 4;
+ const Index peels = 2;
+ const Index PacketAlignedMask = PacketSize-1;
+ const Index PeelAlignedMask = PacketSize*peels-1;
+ const Index size = rhsSize;
// How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type
// if that's not the case then vectorization is discarded, see below.
- int alignedStart = ei_first_aligned(rhs, size);
- int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
- const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+ Index alignedStart = ei_first_aligned(rhs, size);
+ Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
+ const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
- const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
- int alignmentPattern = alignmentStep==0 ? AllAligned
+ const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
+ Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(PacketSize/2) ? EvenAligned
: FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices
- const int lhsAlignmentOffset = ei_first_aligned(lhs,size);
+ const Index lhsAlignmentOffset = ei_first_aligned(lhs,size);
// find how many rows do we have to skip to be aligned with rhs (if possible)
- int skipRows = 0;
+ Index skipRows = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(rhs)%sizeof(RealScalar)) )
{
@@ -317,7 +317,7 @@
}
else
{
- skipRows = std::min(skipRows,res.size());
+ skipRows = std::min(skipRows,Index(res.size()));
// note that the skiped columns are processed later.
}
ei_internal_assert( alignmentPattern==NoneAligned
@@ -327,11 +327,11 @@
|| (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
}
- int offset1 = (FirstAligned && alignmentStep==1?3:1);
- int offset3 = (FirstAligned && alignmentStep==1?1:3);
+ Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+ Index offset3 = (FirstAligned && alignmentStep==1?1:3);
- int rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
- for (int i=skipRows; i<rowBound; i+=rowsAtOnce)
+ Index rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
+ for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
{
Scalar tmp0 = Scalar(0), tmp1 = Scalar(0), tmp2 = Scalar(0), tmp3 = Scalar(0);
@@ -346,7 +346,7 @@
// process initial unaligned coeffs
// FIXME this loop get vectorized by the compiler !
- for (int j=0; j<alignedStart; ++j)
+ for (Index j=0; j<alignedStart; ++j)
{
Scalar b = rhs[j];
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
@@ -358,11 +358,11 @@
switch(alignmentPattern)
{
case AllAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d);
break;
case EvenAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
break;
case FirstAligned:
@@ -379,7 +379,7 @@
A02 = ei_pload(&lhs2[alignedStart-2]);
A03 = ei_pload(&lhs3[alignedStart-3]);
- for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
+ for (Index j = alignedStart; j<peeledSize; j+=peels*PacketSize)
{
b = ei_pload(&rhs[j]);
A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
@@ -401,11 +401,11 @@
ptmp3 = cj.pmadd(A13, b, ptmp3);
}
}
- for (int j = peeledSize; j<alignedSize; j+=PacketSize)
+ for (Index j = peeledSize; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
break;
default:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
+ for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
break;
}
@@ -418,7 +418,7 @@
// process remaining coeffs (or all if no explicit vectorization)
// FIXME this loop get vectorized by the compiler !
- for (int j=alignedSize; j<size; ++j)
+ for (Index j=alignedSize; j<size; ++j)
{
Scalar b = rhs[j];
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
@@ -428,35 +428,35 @@
}
// process remaining first and last rows (at most columnsAtOnce-1)
- int end = res.size();
- int start = rowBound;
+ Index end = res.size();
+ Index start = rowBound;
do
{
- for (int i=start; i<end; ++i)
+ for (Index i=start; i<end; ++i)
{
Scalar tmp0 = Scalar(0);
Packet ptmp0 = ei_pset1(tmp0);
const Scalar* lhs0 = lhs + i*lhsStride;
// process first unaligned result's coeffs
// FIXME this loop get vectorized by the compiler !
- for (int j=0; j<alignedStart; ++j)
+ for (Index j=0; j<alignedStart; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]);
if (alignedSize>alignedStart)
{
// process aligned rhs coeffs
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
+ for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ptmp0 = cj.pmadd(ei_pload(&lhs0[j]), ei_pload(&rhs[j]), ptmp0);
else
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
+ for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ptmp0 = cj.pmadd(ei_ploadu(&lhs0[j]), ei_pload(&rhs[j]), ptmp0);
tmp0 += ei_predux(ptmp0);
}
// process remaining scalars
// FIXME this loop get vectorized by the compiler !
- for (int j=alignedSize; j<size; ++j)
+ for (Index j=alignedSize; j<size; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]);
res[i] += alpha*tmp0;
}
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index 6e15e0f..5e4eb6f 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -25,20 +25,20 @@
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
-template<typename BlockBScalar> struct GemmParallelInfo
+template<typename BlockBScalar, typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0), blockB(0) {}
int volatile sync;
int volatile users;
- int rhs_start;
- int rhs_length;
+ Index rhs_start;
+ Index rhs_length;
BlockBScalar* blockB;
};
-template<bool Condition,typename Functor>
-void ei_parallelize_gemm(const Functor& func, int rows, int cols)
+template<bool Condition, typename Functor, typename Index>
+void ei_parallelize_gemm(const Functor& func, Index rows, Index cols)
{
#ifndef EIGEN_HAS_OPENMP
func(0,rows, 0,cols);
@@ -57,16 +57,16 @@
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
- int max_threads = std::max(1,rows / 32);
+ Index max_threads = std::max(1,rows / 32);
// 3 - compute the number of threads we are going to use
- int threads = std::min(omp_get_max_threads(), max_threads);
+ Index threads = std::min<Index>(omp_get_max_threads(), max_threads);
if(threads==1)
return func(0,rows, 0,cols);
- int blockCols = (cols / threads) & ~0x3;
- int blockRows = (rows / threads) & ~0x7;
+ Index blockCols = (cols / threads) & ~Index(0x3);
+ Index blockRows = (rows / threads) & ~Index(0x7);
typedef typename Functor::BlockBScalar BlockBScalar;
BlockBScalar* sharedBlockB = new BlockBScalar[func.sharedBlockBSize()];
@@ -74,13 +74,13 @@
GemmParallelInfo<BlockBScalar>* info = new GemmParallelInfo<BlockBScalar>[threads];
#pragma omp parallel for schedule(static,1) num_threads(threads)
- for(int i=0; i<threads; ++i)
+ for(Index i=0; i<threads; ++i)
{
- int r0 = i*blockRows;
- int actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
+ Index r0 = i*blockRows;
+ Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
- int c0 = i*blockCols;
- int actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
+ Index c0 = i*blockCols;
+ Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index b23876d..31726e6 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -26,41 +26,41 @@
#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
// pack a selfadjoint block diagonal for use with the gebp_kernel
-template<typename Scalar, int mr, int StorageOrder>
+template<typename Scalar, typename Index, int mr, int StorageOrder>
struct ei_symm_pack_lhs
{
enum { PacketSize = ei_packet_traits<Scalar>::size };
template<int BlockRows> inline
- void pack(Scalar* blockA, const ei_const_blas_data_mapper<Scalar,StorageOrder>& lhs, int cols, int i, int& count)
+ void pack(Scalar* blockA, const ei_const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
{
// normal copy
- for(int k=0; k<i; k++)
- for(int w=0; w<BlockRows; w++)
+ for(Index k=0; k<i; k++)
+ for(Index w=0; w<BlockRows; w++)
blockA[count++] = lhs(i+w,k); // normal
// symmetric copy
- int h = 0;
- for(int k=i; k<i+BlockRows; k++)
+ Index h = 0;
+ for(Index k=i; k<i+BlockRows; k++)
{
- for(int w=0; w<h; w++)
+ for(Index w=0; w<h; w++)
blockA[count++] = ei_conj(lhs(k, i+w)); // transposed
blockA[count++] = ei_real(lhs(k,k)); // real (diagonal)
- for(int w=h+1; w<BlockRows; w++)
+ for(Index w=h+1; w<BlockRows; w++)
blockA[count++] = lhs(i+w, k); // normal
++h;
}
// transposed copy
- for(int k=i+BlockRows; k<cols; k++)
- for(int w=0; w<BlockRows; w++)
+ for(Index k=i+BlockRows; k<cols; k++)
+ for(Index w=0; w<BlockRows; w++)
blockA[count++] = ei_conj(lhs(k, i+w)); // transposed
}
- void operator()(Scalar* blockA, const Scalar* _lhs, int lhsStride, int cols, int rows)
+ void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
- ei_const_blas_data_mapper<Scalar,StorageOrder> lhs(_lhs,lhsStride);
- int count = 0;
- int peeled_mc = (rows/mr)*mr;
- for(int i=0; i<peeled_mc; i+=mr)
+ ei_const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
+ Index count = 0;
+ Index peeled_mc = (rows/mr)*mr;
+ for(Index i=0; i<peeled_mc; i+=mr)
{
pack<mr>(blockA, lhs, cols, i, count);
}
@@ -72,34 +72,34 @@
}
// do the same with mr==1
- for(int i=peeled_mc; i<rows; i++)
+ for(Index i=peeled_mc; i<rows; i++)
{
- for(int k=0; k<i; k++)
+ for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal
blockA[count++] = ei_real(lhs(i, i)); // real (diagonal)
- for(int k=i+1; k<cols; k++)
+ for(Index k=i+1; k<cols; k++)
blockA[count++] = ei_conj(lhs(k, i)); // transposed
}
}
};
-template<typename Scalar, int nr, int StorageOrder>
+template<typename Scalar, typename Index, int nr, int StorageOrder>
struct ei_symm_pack_rhs
{
enum { PacketSize = ei_packet_traits<Scalar>::size };
- void operator()(Scalar* blockB, const Scalar* _rhs, int rhsStride, Scalar alpha, int rows, int cols, int k2)
+ void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Scalar alpha, Index rows, Index cols, Index k2)
{
- int end_k = k2 + rows;
- int count = 0;
- ei_const_blas_data_mapper<Scalar,StorageOrder> rhs(_rhs,rhsStride);
- int packet_cols = (cols/nr)*nr;
+ Index end_k = k2 + rows;
+ Index count = 0;
+ ei_const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
+ Index packet_cols = (cols/nr)*nr;
// first part: normal case
- for(int j2=0; j2<k2; j2+=nr)
+ for(Index j2=0; j2<k2; j2+=nr)
{
- for(int k=k2; k<end_k; k++)
+ for(Index k=k2; k<end_k; k++)
{
blockB[count+0] = alpha*rhs(k,j2+0);
blockB[count+1] = alpha*rhs(k,j2+1);
@@ -113,11 +113,11 @@
}
// second part: diagonal block
- for(int j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
+ for(Index j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
{
// again we can split vertically in three different parts (transpose, symmetric, normal)
// transpose
- for(int k=k2; k<j2; k++)
+ for(Index k=k2; k<j2; k++)
{
blockB[count+0] = alpha*ei_conj(rhs(j2+0,k));
blockB[count+1] = alpha*ei_conj(rhs(j2+1,k));
@@ -129,23 +129,23 @@
count += nr;
}
// symmetric
- int h = 0;
- for(int k=j2; k<j2+nr; k++)
+ Index h = 0;
+ for(Index k=j2; k<j2+nr; k++)
{
// normal
- for (int w=0 ; w<h; ++w)
+ for (Index w=0 ; w<h; ++w)
blockB[count+w] = alpha*rhs(k,j2+w);
blockB[count+h] = alpha*rhs(k,k);
// transpose
- for (int w=h+1 ; w<nr; ++w)
+ for (Index w=h+1 ; w<nr; ++w)
blockB[count+w] = alpha*ei_conj(rhs(j2+w,k));
count += nr;
++h;
}
// normal
- for(int k=j2+nr; k<end_k; k++)
+ for(Index k=j2+nr; k<end_k; k++)
{
blockB[count+0] = alpha*rhs(k,j2+0);
blockB[count+1] = alpha*rhs(k,j2+1);
@@ -159,9 +159,9 @@
}
// third part: transposed
- for(int j2=k2+rows; j2<packet_cols; j2+=nr)
+ for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
{
- for(int k=k2; k<end_k; k++)
+ for(Index k=k2; k<end_k; k++)
{
blockB[count+0] = alpha*ei_conj(rhs(j2+0,k));
blockB[count+1] = alpha*ei_conj(rhs(j2+1,k));
@@ -175,11 +175,11 @@
}
// copy the remaining columns one at a time (=> the same with nr==1)
- for(int j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols; j2<cols; ++j2)
{
// transpose
- int half = std::min(end_k,j2);
- for(int k=k2; k<half; k++)
+ Index half = std::min(end_k,j2);
+ for(Index k=k2; k<half; k++)
{
blockB[count] = alpha*ei_conj(rhs(j2,k));
count += 1;
@@ -194,7 +194,7 @@
half--;
// normal
- for(int k=half+1; k<k2+rows; k++)
+ for(Index k=half+1; k<k2+rows; k++)
{
blockB[count] = alpha*rhs(k,j2);
count += 1;
@@ -206,26 +206,26 @@
/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
* the general matrix matrix product.
*/
-template <typename Scalar,
+template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
int ResStorageOrder>
struct ei_product_selfadjoint_matrix;
-template <typename Scalar,
+template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
-struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
+struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
- int rows, int cols,
- const Scalar* lhs, int lhsStride,
- const Scalar* rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index rows, Index cols,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- ei_product_selfadjoint_matrix<Scalar,
+ ei_product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
@@ -235,45 +235,45 @@
}
};
-template <typename Scalar,
+template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
+struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int rows, int cols,
- const Scalar* _lhs, int lhsStride,
- const Scalar* _rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- int size = rows;
+ Index size = rows;
- ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride);
- ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking;
- int kc = std::min<int>(Blocking::Max_kc,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB);
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
- ei_symm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
+ ei_symm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
- for(int k2=0; k2<size; k2+=kc)
+ for(Index k2=0; k2<size; k2+=kc)
{
- const int actual_kc = std::min(k2+kc,size)-k2;
+ const Index actual_kc = std::min(k2+kc,size)-k2;
// we have selected one row panel of rhs and one column panel of lhs
// pack rhs's panel into a sequential chunk of memory
@@ -284,9 +284,9 @@
// 1 - the transposed panel above the diagonal block => transposed packed copy
// 2 - the diagonal block => special packed copy
// 3 - the panel below the diagonal block => generic packed copy
- for(int i2=0; i2<k2; i2+=mc)
+ for(Index i2=0; i2<k2; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,k2)-i2;
+ const Index actual_mc = std::min(i2+mc,k2)-i2;
// transposed packed copy
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
@@ -294,17 +294,17 @@
}
// the block diagonal
{
- const int actual_mc = std::min(k2+kc,size)-k2;
+ const Index actual_mc = std::min(k2+kc,size)-k2;
// symmetric packed copy
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+k2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
}
- for(int i2=k2+kc; i2<size; i2+=mc)
+ for(Index i2=k2+kc; i2<size; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,size)-i2;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder,false>()
+ const Index actual_mc = std::min(i2+mc,size)-i2;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder,false>()
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@@ -317,50 +317,50 @@
};
// matrix * selfadjoint product
-template <typename Scalar,
+template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
+struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int rows, int cols,
- const Scalar* _lhs, int lhsStride,
- const Scalar* _rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- int size = cols;
+ Index size = cols;
- ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking;
- int kc = std::min<int>(Blocking::Max_kc,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB);
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs;
- ei_symm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
+ ei_symm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
- for(int k2=0; k2<size; k2+=kc)
+ for(Index k2=0; k2<size; k2+=kc)
{
- const int actual_kc = std::min(k2+kc,size)-k2;
+ const Index actual_kc = std::min(k2+kc,size)-k2;
pack_rhs(blockB, _rhs, rhsStride, alpha, actual_kc, cols, k2);
// => GEPP
- for(int i2=0; i2<rows; i2+=mc)
+ for(Index i2=0; i2<rows; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,rows)-i2;
+ const Index actual_mc = std::min(i2+mc,rows)-i2;
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@@ -406,7 +406,7 @@
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
- ei_product_selfadjoint_matrix<Scalar,
+ ei_product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(LhsIsUpper,
ei_traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h
index 0f829fd..9af310d 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -30,15 +30,15 @@
* the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency.
*/
-template<typename Scalar, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector(
- int size,
- const Scalar* lhs, int lhsStride,
- const Scalar* _rhs, int rhsIncr,
+ Index size,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsIncr,
Scalar* res, Scalar alpha)
{
typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = sizeof(Packet)/sizeof(Scalar);
+ const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum {
IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
@@ -58,16 +58,16 @@
{
Scalar* r = ei_aligned_stack_new(Scalar, size);
const Scalar* it = _rhs;
- for (int i=0; i<size; ++i, it+=rhsIncr)
+ for (Index i=0; i<size; ++i, it+=rhsIncr)
r[i] = *it;
rhs = r;
}
- int bound = std::max(0,size-8) & 0xfffffffE;
+ Index bound = std::max(Index(0),size-8) & 0xfffffffe;
if (FirstTriangular)
bound = size - bound;
- for (int j=FirstTriangular ? bound : 0;
+ for (Index j=FirstTriangular ? bound : 0;
j<(FirstTriangular ? size : bound);j+=2)
{
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
@@ -136,14 +136,14 @@
res[j] += alpha * (t2 + ei_predux(ptmp2));
res[j+1] += alpha * (t3 + ei_predux(ptmp3));
}
- for (int j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
+ for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
{
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
Scalar t1 = cjAlpha * rhs[j];
Scalar t2 = 0;
res[j] += cj0.pmul(A0[j],t1);
- for (int i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) {
+ for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) {
res[i] += cj0.pmul(A0[i], t1);
t2 += cj1.pmul(A0[i], rhs[i]);
}
@@ -187,7 +187,7 @@
ei_assert(dst.innerStride()==1 && "not implemented yet");
- ei_product_selfadjoint_vector<Scalar, (ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>
+ ei_product_selfadjoint_vector<Scalar, Index, (ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>
(
lhs.rows(), // size
&lhs.coeff(0,0), lhs.outerStride(), // lhs info
diff --git a/Eigen/src/Core/products/SelfadjointProduct.h b/Eigen/src/Core/products/SelfadjointProduct.h
index 01cd33d..bf835b5 100644
--- a/Eigen/src/Core/products/SelfadjointProduct.h
+++ b/Eigen/src/Core/products/SelfadjointProduct.h
@@ -26,52 +26,52 @@
#define EIGEN_SELFADJOINT_PRODUCT_H
/**********************************************************************
-* This file implement a self adjoint product: C += A A^T updating only
-* an half of the selfadjoint matrix C.
+* This file implements a self adjoint product: C += A A^T updating only
+* half of the selfadjoint matrix C.
* It corresponds to the level 3 SYRK Blas routine.
**********************************************************************/
// forward declarations (defined at the end of this file)
-template<typename Scalar, int mr, int nr, typename Conj, int UpLo>
+template<typename Scalar, typename Index, int mr, int nr, typename Conj, int UpLo>
struct ei_sybb_kernel;
/* Optimized selfadjoint product (_SYRK) */
-template <typename Scalar,
+template <typename Scalar, typename Index,
int RhsStorageOrder,
int ResStorageOrder, bool AAT, int UpLo>
struct ei_selfadjoint_product;
// as usual if the result is row major => we transpose the product
-template <typename Scalar, int MatStorageOrder, bool AAT, int UpLo>
-struct ei_selfadjoint_product<Scalar,MatStorageOrder, RowMajor, AAT, UpLo>
+template <typename Scalar, typename Index, int MatStorageOrder, bool AAT, int UpLo>
+struct ei_selfadjoint_product<Scalar, Index, MatStorageOrder, RowMajor, AAT, UpLo>
{
- static EIGEN_STRONG_INLINE void run(int size, int depth, const Scalar* mat, int matStride, Scalar* res, int resStride, Scalar alpha)
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth, const Scalar* mat, Index matStride, Scalar* res, Index resStride, Scalar alpha)
{
- ei_selfadjoint_product<Scalar, MatStorageOrder, ColMajor, !AAT, UpLo==Lower?Upper:Lower>
+ ei_selfadjoint_product<Scalar, Index, MatStorageOrder, ColMajor, !AAT, UpLo==Lower?Upper:Lower>
::run(size, depth, mat, matStride, res, resStride, alpha);
}
};
-template <typename Scalar,
+template <typename Scalar, typename Index,
int MatStorageOrder, bool AAT, int UpLo>
-struct ei_selfadjoint_product<Scalar,MatStorageOrder, ColMajor, AAT, UpLo>
+struct ei_selfadjoint_product<Scalar, Index, MatStorageOrder, ColMajor, AAT, UpLo>
{
static EIGEN_DONT_INLINE void run(
- int size, int depth,
- const Scalar* _mat, int matStride,
- Scalar* res, int resStride,
+ Index size, Index depth,
+ const Scalar* _mat, Index matStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- ei_const_blas_data_mapper<Scalar, MatStorageOrder> mat(_mat,matStride);
+ ei_const_blas_data_mapper<Scalar, Index, MatStorageOrder> mat(_mat,matStride);
if(AAT)
alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking;
- int kc = std::min<int>(Blocking::Max_kc,depth); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc,depth); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size;
@@ -81,21 +81,21 @@
// note that the actual rhs is the transpose/adjoint of mat
typedef ei_conj_helper<NumTraits<Scalar>::IsComplex && !AAT, NumTraits<Scalar>::IsComplex && AAT> Conj;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, Conj> gebp_kernel;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,MatStorageOrder==RowMajor ? ColMajor : RowMajor> pack_rhs;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,MatStorageOrder, false> pack_lhs;
- ei_sybb_kernel<Scalar, Blocking::mr, Blocking::nr, Conj, UpLo> sybb;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, Conj> gebp_kernel;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,MatStorageOrder==RowMajor ? ColMajor : RowMajor> pack_rhs;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,MatStorageOrder, false> pack_lhs;
+ ei_sybb_kernel<Scalar, Index, Blocking::mr, Blocking::nr, Conj, UpLo> sybb;
- for(int k2=0; k2<depth; k2+=kc)
+ for(Index k2=0; k2<depth; k2+=kc)
{
- const int actual_kc = std::min(k2+kc,depth)-k2;
+ const Index actual_kc = std::min(k2+kc,depth)-k2;
// note that the actual rhs is the transpose/adjoint of mat
pack_rhs(blockB, &mat(0,k2), matStride, alpha, actual_kc, size);
- for(int i2=0; i2<size; i2+=mc)
+ for(Index i2=0; i2<size; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,size)-i2;
+ const Index actual_mc = std::min(i2+mc,size)-i2;
pack_lhs(blockA, &mat(i2, k2), matStride, actual_kc, actual_mc);
@@ -111,8 +111,8 @@
if (UpLo==Upper)
{
- int j2 = i2+actual_mc;
- gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(0,size-j2),
+ Index j2 = i2+actual_mc;
+ gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0),size-j2),
-1, -1, 0, 0, allocatedBlockB);
}
}
@@ -138,7 +138,7 @@
enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
- ei_selfadjoint_product<Scalar,
+ ei_selfadjoint_product<Scalar, Index,
_ActualUType::Flags&RowMajorBit ? RowMajor : ColMajor,
ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor,
!UBlasTraits::NeedToConjugate, UpLo>
@@ -158,23 +158,23 @@
// while the selfadjoint block overlapping the diagonal is evaluated into a
// small temporary buffer which is then accumulated into the result using a
// triangular traversal.
-template<typename Scalar, int mr, int nr, typename Conj, int UpLo>
+template<typename Scalar, typename Index, int mr, int nr, typename Conj, int UpLo>
struct ei_sybb_kernel
{
enum {
PacketSize = ei_packet_traits<Scalar>::size,
BlockSize = EIGEN_ENUM_MAX(mr,nr)
};
- void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int size, int depth, Scalar* workspace)
+ void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index size, Index depth, Scalar* workspace)
{
- ei_gebp_kernel<Scalar, mr, nr, Conj> gebp_kernel;
+ ei_gebp_kernel<Scalar, Index, mr, nr, Conj> gebp_kernel;
Matrix<Scalar,BlockSize,BlockSize,ColMajor> buffer;
// let's process the block per panel of actual_mc x BlockSize,
// again, each is split into three parts, etc.
- for (int j=0; j<size; j+=BlockSize)
+ for (Index j=0; j<size; j+=BlockSize)
{
- int actualBlockSize = std::min<int>(BlockSize,size - j);
+ Index actualBlockSize = std::min<Index>(BlockSize,size - j);
const Scalar* actual_b = blockB+j*depth;
if(UpLo==Upper)
@@ -182,16 +182,16 @@
// selfadjoint micro block
{
- int i = j;
+ Index i = j;
buffer.setZero();
// 1 - apply the kernel on the temporary buffer
gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize,
-1, -1, 0, 0, workspace);
// 2 - triangular accumulation
- for(int j1=0; j1<actualBlockSize; ++j1)
+ for(Index j1=0; j1<actualBlockSize; ++j1)
{
Scalar* r = res + (j+j1)*resStride + i;
- for(int i1=UpLo==Lower ? j1 : 0;
+ for(Index i1=UpLo==Lower ? j1 : 0;
UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)
r[i1] += buffer(i1,j1);
}
@@ -199,7 +199,7 @@
if(UpLo==Lower)
{
- int i = j+actualBlockSize;
+ Index i = j+actualBlockSize;
gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize,
-1, -1, 0, 0, workspace);
}
diff --git a/Eigen/src/Core/products/SelfadjointRank2Update.h b/Eigen/src/Core/products/SelfadjointRank2Update.h
index 9b52d5f..e20eb5c 100644
--- a/Eigen/src/Core/products/SelfadjointRank2Update.h
+++ b/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -29,16 +29,16 @@
* It corresponds to the Level2 syr2 BLAS routine
*/
-template<typename Scalar, typename UType, typename VType, int UpLo>
+template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
struct ei_selfadjoint_rank2_update_selector;
-template<typename Scalar, typename UType, typename VType>
-struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,Lower>
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct ei_selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{
- static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha)
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
{
- const int size = u.size();
- for (int i=0; i<size; ++i)
+ const Index size = u.size();
+ for (Index i=0; i<size; ++i)
{
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
(alpha * ei_conj(u.coeff(i))) * v.tail(size-i)
@@ -47,13 +47,13 @@
}
};
-template<typename Scalar, typename UType, typename VType>
-struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,Upper>
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct ei_selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
{
- static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha)
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
{
- const int size = u.size();
- for (int i=0; i<size; ++i)
+ const Index size = u.size();
+ for (Index i=0; i<size; ++i)
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
(alpha * ei_conj(u.coeff(i))) * v.head(i+1)
+ (alpha * ei_conj(v.coeff(i))) * u.head(i+1);
@@ -84,7 +84,7 @@
* VBlasTraits::extractScalarFactor(v.derived());
enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
- ei_selfadjoint_rank2_update_selector<Scalar,
+ ei_selfadjoint_rank2_update_selector<Scalar, Index,
typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::ret>::type,
typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::ret>::type,
(IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 53e7876..25d9ffe 100644
--- a/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -52,29 +52,29 @@
/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
* the general matrix matrix product.
*/
-template <typename Scalar,
+template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder>
struct ei_product_triangular_matrix_matrix;
-template <typename Scalar,
+template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_product_triangular_matrix_matrix<Scalar,Mode,LhsIsTriangular,
+struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
- int size, int otherSize,
- const Scalar* lhs, int lhsStride,
- const Scalar* rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index size, Index otherSize,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- ei_product_triangular_matrix_matrix<Scalar,
+ ei_product_triangular_matrix_matrix<Scalar, Index,
(Mode&UnitDiag) | (Mode&Upper) ? Lower : Upper,
(!LhsIsTriangular),
RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
@@ -87,25 +87,25 @@
};
// implements col-major += alpha * op(triangular) * op(general)
-template <typename Scalar, int Mode,
+template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
+struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int size, int cols,
- const Scalar* _lhs, int lhsStride,
- const Scalar* _rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index size, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- int rows = size;
+ Index rows = size;
- ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride);
- ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
@@ -116,8 +116,8 @@
IsLower = (Mode&Lower) == Lower
};
- int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@@ -129,16 +129,16 @@
triangularBuffer.setZero();
triangularBuffer.diagonal().setOnes();
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
- for(int k2=IsLower ? size : 0;
+ for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size;
IsLower ? k2-=kc : k2+=kc)
{
- const int actual_kc = std::min(IsLower ? k2 : size-k2, kc);
- int actual_k2 = IsLower ? k2-actual_kc : k2;
+ const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
+ Index actual_k2 = IsLower ? k2-actual_kc : k2;
pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, alpha, actual_kc, cols);
@@ -149,21 +149,21 @@
// the block diagonal
{
// for each small vertical panels of lhs
- for (int k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+ for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-k1, SmallPanelWidth);
- int lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
- int startBlock = actual_k2+k1;
- int blockBOffset = k1;
+ Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
+ Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
+ Index startBlock = actual_k2+k1;
+ Index blockBOffset = k1;
// => GEBP with the micro triangular block
// The trick is to pack this micro block while filling the opposite triangular part with zeros.
// To this end we do an extra triangular copy to a small temporary buffer
- for (int k=0;k<actualPanelWidth;++k)
+ for (Index k=0;k<actualPanelWidth;++k)
{
if (!(Mode&UnitDiag))
triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
- for (int i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
+ for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
}
pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
@@ -174,7 +174,7 @@
// GEBP with remaining micro panel
if (lengthTarget>0)
{
- int startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
+ Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
@@ -185,12 +185,12 @@
}
// the part below the diagonal => GEPP
{
- int start = IsLower ? k2 : 0;
- int end = IsLower ? size : actual_k2;
- for(int i2=start; i2<end; i2+=mc)
+ Index start = IsLower ? k2 : 0;
+ Index end = IsLower ? size : actual_k2;
+ for(Index i2=start; i2<end; i2+=mc)
{
- const int actual_mc = std::min(i2+mc,end)-i2;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder,false>()
+ const Index actual_mc = std::min(i2+mc,end)-i2;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder,false>()
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@@ -205,25 +205,25 @@
};
// implements col-major += alpha * op(general) * op(triangular)
-template <typename Scalar, int Mode,
+template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
-struct ei_product_triangular_matrix_matrix<Scalar,Mode,false,
+struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int size, int rows,
- const Scalar* _lhs, int lhsStride,
- const Scalar* _rhs, int rhsStride,
- Scalar* res, int resStride,
+ Index size, Index rows,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
Scalar alpha)
{
- int cols = size;
+ Index cols = size;
- ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride);
- ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
@@ -234,8 +234,8 @@
IsLower = (Mode&Lower) == Lower
};
- int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@@ -246,30 +246,30 @@
triangularBuffer.setZero();
triangularBuffer.diagonal().setOnes();
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
- for(int k2=IsLower ? 0 : size;
+ for(Index k2=IsLower ? 0 : size;
IsLower ? k2<size : k2>0;
IsLower ? k2+=kc : k2-=kc)
{
- const int actual_kc = std::min(IsLower ? size-k2 : k2, kc);
- int actual_k2 = IsLower ? k2 : k2-actual_kc;
- int rs = IsLower ? actual_k2 : size - k2;
+ const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
+ Index actual_k2 = IsLower ? k2 : k2-actual_kc;
+ Index rs = IsLower ? actual_k2 : size - k2;
Scalar* geb = blockB+actual_kc*actual_kc;
pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, alpha, actual_kc, rs);
// pack the triangular part of the rhs padding the unrolled blocks with zeros
{
- for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth);
- int actual_j2 = actual_k2 + j2;
- int panelOffset = IsLower ? j2+actualPanelWidth : 0;
- int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index actual_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
// general part
pack_rhs_panel(blockB+j2*actual_kc,
&rhs(actual_k2+panelOffset, actual_j2), rhsStride, alpha,
@@ -277,11 +277,11 @@
actual_kc, panelOffset);
// append the triangular part via a temporary buffer
- for (int j=0;j<actualPanelWidth;++j)
+ for (Index j=0;j<actualPanelWidth;++j)
{
if (!(Mode&UnitDiag))
triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
- for (int k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
+ for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
}
@@ -292,18 +292,18 @@
}
}
- for (int i2=0; i2<rows; i2+=mc)
+ for (Index i2=0; i2<rows; i2+=mc)
{
- const int actual_mc = std::min(mc,rows-i2);
+ const Index actual_mc = std::min(mc,rows-i2);
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
// triangular kernel
{
- for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth);
- int panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
- int blockOffset = IsLower ? j2 : 0;
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
+ Index blockOffset = IsLower ? j2 : 0;
gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride,
blockA, blockB+j2*actual_kc,
@@ -349,7 +349,7 @@
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
- ei_product_triangular_matrix_matrix<Scalar,
+ ei_product_triangular_matrix_matrix<Scalar, Index,
Mode, LhsIsTriangular,
(ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
(ei_traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h
index ee4c45c..1a2b183 100644
--- a/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -33,34 +33,35 @@
struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef typename Rhs::Index Index;
enum {
IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag
};
static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha)
{
- static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs);
typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs);
- int size = lhs.cols();
- for (int pi=0; pi<size; pi+=PanelWidth)
+ Index size = lhs.cols();
+ for (Index pi=0; pi<size; pi+=PanelWidth)
{
- int actualPanelWidth = std::min(PanelWidth, size-pi);
- for (int k=0; k<actualPanelWidth; ++k)
+ Index actualPanelWidth = std::min(PanelWidth, size-pi);
+ for (Index k=0; k<actualPanelWidth; ++k)
{
- int i = pi + k;
- int s = IsLower ? (HasUnitDiag ? i+1 : i ) : pi;
- int r = IsLower ? actualPanelWidth-k : k+1;
+ Index i = pi + k;
+ Index s = IsLower ? (HasUnitDiag ? i+1 : i ) : pi;
+ Index r = IsLower ? actualPanelWidth-k : k+1;
if ((!HasUnitDiag) || (--r)>0)
res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);
if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i);
}
- int r = IsLower ? size - pi - actualPanelWidth : pi;
+ Index r = IsLower ? size - pi - actualPanelWidth : pi;
if (r>0)
{
- int s = IsLower ? pi+actualPanelWidth : 0;
+ Index s = IsLower ? pi+actualPanelWidth : 0;
ei_cache_friendly_product_colmajor_times_vector<ConjLhs,ConjRhs>(
r,
&(lhs.const_cast_derived().coeffRef(s,pi)), lhs.outerStride(),
@@ -76,33 +77,34 @@
struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
+ typedef typename Rhs::Index Index;
enum {
IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag
};
static void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha)
{
- static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs);
typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs);
- int size = lhs.cols();
- for (int pi=0; pi<size; pi+=PanelWidth)
+ Index size = lhs.cols();
+ for (Index pi=0; pi<size; pi+=PanelWidth)
{
- int actualPanelWidth = std::min(PanelWidth, size-pi);
- for (int k=0; k<actualPanelWidth; ++k)
+ Index actualPanelWidth = std::min(PanelWidth, size-pi);
+ for (Index k=0; k<actualPanelWidth; ++k)
{
- int i = pi + k;
- int s = IsLower ? pi : (HasUnitDiag ? i+1 : i);
- int r = IsLower ? k+1 : actualPanelWidth-k;
+ Index i = pi + k;
+ Index s = IsLower ? pi : (HasUnitDiag ? i+1 : i);
+ Index r = IsLower ? k+1 : actualPanelWidth-k;
if ((!HasUnitDiag) || (--r)>0)
res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();
if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i);
}
- int r = IsLower ? pi : size - pi - actualPanelWidth;
+ Index r = IsLower ? pi : size - pi - actualPanelWidth;
if (r>0)
{
- int s = IsLower ? 0 : pi + actualPanelWidth;
+ Index s = IsLower ? 0 : pi + actualPanelWidth;
Block<Result,Dynamic,1> target(res,pi,0,actualPanelWidth,1);
ei_cache_friendly_product_rowmajor_times_vector<ConjLhs,ConjRhs>(
&(lhs.const_cast_derived().coeffRef(pi,s)), lhs.outerStride(),
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h
index 1774081..1d80225 100644
--- a/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -26,16 +26,16 @@
#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
// if the rhs is row major, let's transpose the product
-template <typename Scalar, int Side, int Mode, bool Conjugate, int TriStorageOrder>
-struct ei_triangular_solve_matrix<Scalar,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
+struct ei_triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
{
static EIGEN_DONT_INLINE void run(
- int size, int cols,
- const Scalar* tri, int triStride,
- Scalar* _other, int otherStride)
+ Index size, Index cols,
+ const Scalar* tri, Index triStride,
+ Scalar* _other, Index otherStride)
{
ei_triangular_solve_matrix<
- Scalar, Side==OnTheLeft?OnTheRight:OnTheLeft,
+ Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
(Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
NumTraits<Scalar>::IsComplex && Conjugate,
TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
@@ -45,17 +45,17 @@
/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
*/
-template <typename Scalar, int Mode, bool Conjugate, int TriStorageOrder>
-struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct ei_triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int size, int otherSize,
- const Scalar* _tri, int triStride,
- Scalar* _other, int otherStride)
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride)
{
- int cols = otherSize;
- ei_const_blas_data_mapper<Scalar, TriStorageOrder> tri(_tri,triStride);
- ei_blas_data_mapper<Scalar, ColMajor> other(_other,otherStride);
+ Index cols = otherSize;
+ ei_const_blas_data_mapper<Scalar, Index, TriStorageOrder> tri(_tri,triStride);
+ ei_blas_data_mapper<Scalar, Index, ColMajor> other(_other,otherStride);
typedef ei_product_blocking_traits<Scalar> Blocking;
enum {
@@ -63,8 +63,8 @@
IsLower = (Mode&Lower) == Lower
};
- int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@@ -72,15 +72,15 @@
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_conj_if<Conjugate> conj;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<Conjugate,false> > gebp_kernel;
- ei_gemm_pack_lhs<Scalar,Blocking::mr,TriStorageOrder> pack_lhs;
- ei_gemm_pack_rhs<Scalar, Blocking::nr, ColMajor, true> pack_rhs;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<Conjugate,false> > gebp_kernel;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,TriStorageOrder> pack_lhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, ColMajor, true> pack_rhs;
- for(int k2=IsLower ? 0 : size;
+ for(Index k2=IsLower ? 0 : size;
IsLower ? k2<size : k2>0;
IsLower ? k2+=kc : k2-=kc)
{
- const int actual_kc = std::min(IsLower ? size-k2 : k2, kc);
+ const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
@@ -97,45 +97,45 @@
// and the remaining small part A2 which is processed using gebp with appropriate block strides
{
// for each small vertical panels of lhs
- for (int k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+ for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-k1, SmallPanelWidth);
+ Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
// tr solve
- for (int k=0; k<actualPanelWidth; ++k)
+ for (Index k=0; k<actualPanelWidth; ++k)
{
// TODO write a small kernel handling this (can be shared with trsv)
- int i = IsLower ? k2+k1+k : k2-k1-k-1;
- int s = IsLower ? k2+k1 : i+1;
- int rs = actualPanelWidth - k - 1; // remaining size
+ Index i = IsLower ? k2+k1+k : k2-k1-k-1;
+ Index s = IsLower ? k2+k1 : i+1;
+ Index rs = actualPanelWidth - k - 1; // remaining size
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
- for (int j=0; j<cols; ++j)
+ for (Index j=0; j<cols; ++j)
{
if (TriStorageOrder==RowMajor)
{
Scalar b = 0;
const Scalar* l = &tri(i,s);
Scalar* r = &other(s,j);
- for (int i3=0; i3<k; ++i3)
+ for (Index i3=0; i3<k; ++i3)
b += conj(l[i3]) * r[i3];
other(i,j) = (other(i,j) - b)*a;
}
else
{
- int s = IsLower ? i+1 : i-rs;
+ Index s = IsLower ? i+1 : i-rs;
Scalar b = (other(i,j) *= a);
Scalar* r = &other(s,j);
const Scalar* l = &tri(s,i);
- for (int i3=0;i3<rs;++i3)
+ for (Index i3=0;i3<rs;++i3)
r[i3] -= b * conj(l[i3]);
}
}
}
- int lengthTarget = actual_kc-k1-actualPanelWidth;
- int startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
- int blockBOffset = IsLower ? k1 : lengthTarget;
+ Index lengthTarget = actual_kc-k1-actualPanelWidth;
+ Index startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
+ Index blockBOffset = IsLower ? k1 : lengthTarget;
// update the respective rows of B from other
pack_rhs(blockB, _other+startBlock, otherStride, -1, actualPanelWidth, cols, actual_kc, blockBOffset);
@@ -143,7 +143,7 @@
// GEBP
if (lengthTarget>0)
{
- int startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
+ Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
@@ -155,11 +155,11 @@
// R2 = A2 * B => GEPP
{
- int start = IsLower ? k2+kc : 0;
- int end = IsLower ? size : k2-kc;
- for(int i2=start; i2<end; i2+=mc)
+ Index start = IsLower ? k2+kc : 0;
+ Index end = IsLower ? size : k2-kc;
+ for(Index i2=start; i2<end; i2+=mc)
{
- const int actual_mc = std::min(mc,end-i2);
+ const Index actual_mc = std::min(mc,end-i2);
if (actual_mc>0)
{
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
@@ -177,17 +177,17 @@
/* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
*/
-template <typename Scalar, int Mode, bool Conjugate, int TriStorageOrder>
-struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct ei_triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
{
static EIGEN_DONT_INLINE void run(
- int size, int otherSize,
- const Scalar* _tri, int triStride,
- Scalar* _other, int otherStride)
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride)
{
- int rows = otherSize;
- ei_const_blas_data_mapper<Scalar, TriStorageOrder> rhs(_tri,triStride);
- ei_blas_data_mapper<Scalar, ColMajor> lhs(_other,otherStride);
+ Index rows = otherSize;
+ ei_const_blas_data_mapper<Scalar, Index, TriStorageOrder> rhs(_tri,triStride);
+ ei_blas_data_mapper<Scalar, Index, ColMajor> lhs(_other,otherStride);
typedef ei_product_blocking_traits<Scalar> Blocking;
enum {
@@ -196,8 +196,8 @@
IsLower = (Mode&Lower) == Lower
};
- int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction
- int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction
+ Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
+ Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size;
@@ -205,20 +205,20 @@
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_conj_if<Conjugate> conj;
- ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<false,Conjugate> > gebp_kernel;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs;
- ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
- ei_gemm_pack_lhs<Scalar, Blocking::mr, ColMajor, false, true> pack_lhs_panel;
+ ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<false,Conjugate> > gebp_kernel;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
+ ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
+ ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, ColMajor, false, true> pack_lhs_panel;
- for(int k2=IsLower ? size : 0;
+ for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size;
IsLower ? k2-=kc : k2+=kc)
{
- const int actual_kc = std::min(IsLower ? k2 : size-k2, kc);
- int actual_k2 = IsLower ? k2-actual_kc : k2 ;
+ const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
+ Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
- int startPanel = IsLower ? 0 : k2+actual_kc;
- int rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
+ Index startPanel = IsLower ? 0 : k2+actual_kc;
+ Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
Scalar* geb = blockB+actual_kc*actual_kc;
if (rs>0) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, -1, actual_kc, rs);
@@ -226,12 +226,12 @@
// triangular packing (we only pack the panels off the diagonal,
// neglecting the blocks overlapping the diagonal
{
- for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth);
- int actual_j2 = actual_k2 + j2;
- int panelOffset = IsLower ? j2+actualPanelWidth : 0;
- int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index actual_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
if (panelLength>0)
pack_rhs_panel(blockB+j2*actual_kc,
@@ -241,24 +241,24 @@
}
}
- for(int i2=0; i2<rows; i2+=mc)
+ for(Index i2=0; i2<rows; i2+=mc)
{
- const int actual_mc = std::min(mc,rows-i2);
+ const Index actual_mc = std::min(mc,rows-i2);
// triangular solver kernel
{
// for each small block of the diagonal (=> vertical panels of rhs)
- for (int j2 = IsLower
- ? (actual_kc - ((actual_kc%SmallPanelWidth) ? (actual_kc%SmallPanelWidth)
- : SmallPanelWidth))
+ for (Index j2 = IsLower
+ ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)
+ : Index(SmallPanelWidth)))
: 0;
IsLower ? j2>=0 : j2<actual_kc;
IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)
{
- int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth);
- int absolute_j2 = actual_k2 + j2;
- int panelOffset = IsLower ? j2+actualPanelWidth : 0;
- int panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index absolute_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
// GEBP
if(panelLength>0)
@@ -272,20 +272,20 @@
}
// unblocked triangular solve
- for (int k=0; k<actualPanelWidth; ++k)
+ for (Index k=0; k<actualPanelWidth; ++k)
{
- int j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
+ Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
Scalar* r = &lhs(i2,j);
- for (int k3=0; k3<k; ++k3)
+ for (Index k3=0; k3<k; ++k3)
{
Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
- for (int i=0; i<actual_mc; ++i)
+ for (Index i=0; i<actual_mc; ++i)
r[i] -= a[i] * b;
}
Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
- for (int i=0; i<actual_mc; ++i)
+ for (Index i=0; i<actual_mc; ++i)
r[i] *= b;
}
diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h
index 0fe6d29..6cbd266 100644
--- a/Eigen/src/Core/util/BlasUtil.h
+++ b/Eigen/src/Core/util/BlasUtil.h
@@ -29,29 +29,29 @@
// implement and control fast level 2 and level 3 BLAS-like routines.
// forward declarations
-template<typename Scalar, int mr, int nr, typename Conj>
+template<typename Scalar, typename Index, int mr, int nr, typename Conj>
struct ei_gebp_kernel;
-template<typename Scalar, int nr, int StorageOrder, bool PanelMode=false>
+template<typename Scalar, typename Index, int nr, int StorageOrder, bool PanelMode=false>
struct ei_gemm_pack_rhs;
-template<typename Scalar, int mr, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+template<typename Scalar, typename Index, int mr, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct ei_gemm_pack_lhs;
template<
- typename Scalar,
+ typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder>
struct ei_general_matrix_matrix_product;
-template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename RhsType>
+template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename RhsType>
static void ei_cache_friendly_product_colmajor_times_vector(
- int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha);
+ Index size, const Scalar* lhs, Index lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha);
-template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename ResType>
+template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename ResType>
static void ei_cache_friendly_product_rowmajor_times_vector(
- const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res, Scalar alpha);
+ const Scalar* lhs, Index lhsStride, const Scalar* rhs, Index rhsSize, ResType& res, Scalar alpha);
// Provides scalar/packet-wise product and product with accumulation
// with optional conjugation of the arguments.
@@ -98,29 +98,29 @@
// Lightweight helper class to access matrix coefficients.
// Yes, this is somehow redundant with Map<>, but this version is much much lighter,
// and so I hope better compilation performance (time and code quality).
-template<typename Scalar, int StorageOrder>
+template<typename Scalar, typename Index, int StorageOrder>
class ei_blas_data_mapper
{
public:
- ei_blas_data_mapper(Scalar* data, int stride) : m_data(data), m_stride(stride) {}
- EIGEN_STRONG_INLINE Scalar& operator()(int i, int j)
+ ei_blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j)
{ return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
protected:
Scalar* EIGEN_RESTRICT m_data;
- int m_stride;
+ Index m_stride;
};
// lightweight helper class to access matrix coefficients (const version)
-template<typename Scalar, int StorageOrder>
+template<typename Scalar, typename Index, int StorageOrder>
class ei_const_blas_data_mapper
{
public:
- ei_const_blas_data_mapper(const Scalar* data, int stride) : m_data(data), m_stride(stride) {}
- EIGEN_STRONG_INLINE const Scalar& operator()(int i, int j) const
+ ei_const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const
{ return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
protected:
const Scalar* EIGEN_RESTRICT m_data;
- int m_stride;
+ Index m_stride;
};
// Defines various constant controlling level 3 blocking
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index 7871824..312a144 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -94,6 +94,14 @@
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
#endif
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+#ifndef EIGEN_DEFAULT_SPARSE_INDEX_TYPE
+#define EIGEN_DEFAULT_SPARSE_INDEX_TYPE int
+#endif
+
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
@@ -266,6 +274,8 @@
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \
+ typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::ei_index<StorageKind>::type Index; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::ei_traits<Derived>::Flags, \
@@ -281,6 +291,8 @@
typedef typename Base::PacketScalar PacketScalar; \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \
+ typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::ei_index<StorageKind>::type Index; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index f5cdd91..6b202db 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -379,10 +379,10 @@
* other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
* example with Scalar=double on certain 32-bit platforms, see bug #79.
*
- * There is also the variant ei_first_aligned(const MatrixBase&, Integer) defined in Coeffs.h.
+ * There is also the variant ei_first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
*/
-template<typename Scalar, typename Integer>
-inline static Integer ei_first_aligned(const Scalar* array, Integer size)
+template<typename Scalar, typename Index>
+inline static Index ei_first_aligned(const Scalar* array, Index size)
{
typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size,
@@ -403,7 +403,7 @@
}
else
{
- return std::min<Integer>( (PacketSize - (Integer((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
+ return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
& PacketAlignedMask, size);
}
}
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 667418b..4dee814 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -42,27 +42,35 @@
ei_no_assignment_operator& operator=(const ei_no_assignment_operator&);
};
-/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around an int variable that
+template<typename StorageKind> struct ei_index {};
+
+template<>
+struct ei_index<Dense>
+{ typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE type; };
+
+typedef typename ei_index<Dense>::type DenseIndex;
+
+/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
* can be accessed using value() and setValue().
* Otherwise, this class is an empty structure and value() just returns the template parameter Value.
*/
-template<int Value> class ei_int_if_dynamic
+template<typename T, int Value> class ei_variable_if_dynamic
{
public:
- EIGEN_EMPTY_STRUCT_CTOR(ei_int_if_dynamic)
- explicit ei_int_if_dynamic(int v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == Value); }
- static int value() { return Value; }
- void setValue(int) {}
+ EIGEN_EMPTY_STRUCT_CTOR(ei_variable_if_dynamic)
+ explicit ei_variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == T(Value)); }
+ static T value() { return T(Value); }
+ void setValue(T) {}
};
-template<> class ei_int_if_dynamic<Dynamic>
+template<typename T> class ei_variable_if_dynamic<T, Dynamic>
{
- int m_value;
- ei_int_if_dynamic() { ei_assert(false); }
+ T m_value;
+ ei_variable_if_dynamic() { ei_assert(false); }
public:
- explicit ei_int_if_dynamic(int value) : m_value(value) {}
- int value() const { return m_value; }
- void setValue(int value) { m_value = value; }
+ explicit ei_variable_if_dynamic(T value) : m_value(value) {}
+ T value() const { return m_value; }
+ void setValue(T value) { m_value = value; }
};
template<typename T> struct ei_functor_traits
diff --git a/Eigen/src/Eigen2Support/Block.h b/Eigen/src/Eigen2Support/Block.h
index eb17a27..c5e1a9c 100644
--- a/Eigen/src/Eigen2Support/Block.h
+++ b/Eigen/src/Eigen2Support/Block.h
@@ -40,11 +40,11 @@
* when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation.
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
inline Block<Derived> DenseBase<Derived>
- ::corner(CornerType type, int cRows, int cCols)
+ ::corner(CornerType type, Index cRows, Index cCols)
{
switch(type)
{
@@ -61,10 +61,10 @@
}
}
-/** This is the const version of corner(CornerType, int, int).*/
+/** This is the const version of corner(CornerType, Index, Index).*/
template<typename Derived>
inline const Block<Derived>
-DenseBase<Derived>::corner(CornerType type, int cRows, int cCols) const
+DenseBase<Derived>::corner(CornerType type, Index cRows, Index cCols) const
{
switch(type)
{
@@ -91,7 +91,7 @@
* Example: \include MatrixBase_template_int_int_corner_enum.cpp
* Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
*
- * \sa class Block, block(int,int,int,int)
+ * \sa class Block, block(Index,Index,Index,Index)
*/
template<typename Derived>
template<int CRows, int CCols>
diff --git a/Eigen/src/Eigen2Support/Minor.h b/Eigen/src/Eigen2Support/Minor.h
index e7e164a..3bf913b 100644
--- a/Eigen/src/Eigen2Support/Minor.h
+++ b/Eigen/src/Eigen2Support/Minor.h
@@ -44,6 +44,7 @@
{
typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename MatrixType::StorageKind StorageKind;
enum {
RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ?
int(MatrixType::RowsAtCompileTime) - 1 : Dynamic,
@@ -68,7 +69,7 @@
EIGEN_DENSE_PUBLIC_INTERFACE(Minor)
inline Minor(const MatrixType& matrix,
- int row, int col)
+ Index row, Index col)
: m_matrix(matrix), m_row(row), m_col(col)
{
ei_assert(row >= 0 && row < matrix.rows()
@@ -77,22 +78,22 @@
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor)
- inline int rows() const { return m_matrix.rows() - 1; }
- inline int cols() const { return m_matrix.cols() - 1; }
+ inline Index rows() const { return m_matrix.rows() - 1; }
+ inline Index cols() const { return m_matrix.cols() - 1; }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col));
}
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col));
}
protected:
const typename MatrixType::Nested m_matrix;
- const int m_row, m_col;
+ const Index m_row, m_col;
};
/** \nonstableyet
@@ -107,7 +108,7 @@
*/
template<typename Derived>
inline Minor<Derived>
-MatrixBase<Derived>::minor(int row, int col)
+MatrixBase<Derived>::minor(Index row, Index col)
{
return Minor<Derived>(derived(), row, col);
}
@@ -116,7 +117,7 @@
* This is the const version of minor(). */
template<typename Derived>
inline const Minor<Derived>
-MatrixBase<Derived>::minor(int row, int col) const
+MatrixBase<Derived>::minor(Index row, Index col) const
{
return Minor<Derived>(derived(), row, col);
}
diff --git a/Eigen/src/Eigen2Support/VectorBlock.h b/Eigen/src/Eigen2Support/VectorBlock.h
index 4ddc425..c6ac434 100644
--- a/Eigen/src/Eigen2Support/VectorBlock.h
+++ b/Eigen/src/Eigen2Support/VectorBlock.h
@@ -26,37 +26,37 @@
#ifndef EIGEN_VECTORBLOCK2_H
#define EIGEN_VECTORBLOCK2_H
-/** \deprecated use DenseMase::head(int) */
+/** \deprecated use DenseMase::head(Index) */
template<typename Derived>
inline VectorBlock<Derived>
-MatrixBase<Derived>::start(int size)
+MatrixBase<Derived>::start(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size);
}
-/** \deprecated use DenseMase::head(int) */
+/** \deprecated use DenseMase::head(Index) */
template<typename Derived>
inline const VectorBlock<Derived>
-MatrixBase<Derived>::start(int size) const
+MatrixBase<Derived>::start(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size);
}
-/** \deprecated use DenseMase::tail(int) */
+/** \deprecated use DenseMase::tail(Index) */
template<typename Derived>
inline VectorBlock<Derived>
-MatrixBase<Derived>::end(int size)
+MatrixBase<Derived>::end(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size);
}
-/** \deprecated use DenseMase::tail(int) */
+/** \deprecated use DenseMase::tail(Index) */
template<typename Derived>
inline const VectorBlock<Derived>
-MatrixBase<Derived>::end(int size) const
+MatrixBase<Derived>::end(Index size) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size);
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index 5f956e4..f56815c 100644
--- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -68,6 +68,7 @@
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType.
*
@@ -110,7 +111,7 @@
* according to the specified problem \a size.
* \sa ComplexEigenSolver()
*/
- ComplexEigenSolver(int size)
+ ComplexEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_schur(size),
@@ -216,7 +217,7 @@
{
// this code is inspired from Jampack
assert(matrix.cols() == matrix.rows());
- const int n = matrix.cols();
+ const Index n = matrix.cols();
const RealScalar matrixnorm = matrix.norm();
// Step 1: Do a complex Schur decomposition, A = U T U^*
@@ -227,11 +228,11 @@
// Step 2: Compute X such that T = X D X^(-1), where D is the diagonal of T.
// The matrix X is unit triangular.
m_matX = EigenvectorType::Zero(n, n);
- for(int k=n-1 ; k>=0 ; k--)
+ for(Index k=n-1 ; k>=0 ; k--)
{
m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
// Compute X(i,k) using the (i,k) entry of the equation X T = D X
- for(int i=k-1 ; i>=0 ; i--)
+ for(Index i=k-1 ; i>=0 ; i--)
{
m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
if(k-i-1>0)
@@ -250,16 +251,16 @@
// Step 3: Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
m_eivec.noalias() = m_schur.matrixU() * m_matX;
// .. and normalize the eigenvectors
- for(int k=0 ; k<n ; k++)
+ for(Index k=0 ; k<n ; k++)
{
m_eivec.col(k).normalize();
}
m_isInitialized = true;
// Step 4: Sort the eigenvalues
- for (int i=0; i<n; i++)
+ for (Index i=0; i<n; i++)
{
- int k;
+ Index k;
m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
if (k != 0)
{
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
index c69e3ea..673cb46 100644
--- a/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -71,8 +71,8 @@
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
-
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType.
*
@@ -100,7 +100,7 @@
*
* \sa compute() for an example.
*/
- ComplexSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+ ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size,size),
m_matU(size,size),
m_hess(size),
@@ -197,8 +197,8 @@
bool m_matUisUptodate;
private:
- bool subdiagonalEntryIsNeglegible(int i);
- ComplexScalar computeShift(int iu, int iter);
+ bool subdiagonalEntryIsNeglegible(Index i);
+ ComplexScalar computeShift(Index iu, Index iter);
void reduceToTriangularForm(bool skipU);
friend struct ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
};
@@ -244,7 +244,7 @@
* compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
* return true, else return false. */
template<typename MatrixType>
-inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(int i)
+inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
{
RealScalar d = ei_norm1(m_matT.coeff(i,i)) + ei_norm1(m_matT.coeff(i+1,i+1));
RealScalar sd = ei_norm1(m_matT.coeff(i+1,i));
@@ -259,7 +259,7 @@
/** Compute the shift in the current QR iteration. */
template<typename MatrixType>
-typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(int iu, int iter)
+typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
{
if (iter == 10 || iter == 20)
{
@@ -356,9 +356,9 @@
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active submatrix).
// Rows iu+1,...,end are already brought in triangular form.
- int iu = m_matT.cols() - 1;
- int il;
- int iter = 0; // number of iterations we are working on the (iu,iu) element
+ Index iu = m_matT.cols() - 1;
+ Index il;
+ Index iter = 0; // number of iterations we are working on the (iu,iu) element
while(true)
{
@@ -395,7 +395,7 @@
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
if(!skipU) m_matU.applyOnTheRight(il, il+1, rot);
- for(int i=il+1 ; i<iu ; i++)
+ for(Index i=il+1 ; i<iu ; i++)
{
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index b619dc2..5400fda 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -90,6 +90,7 @@
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType.
*
@@ -128,7 +129,7 @@
* according to the specified problem \a size.
* \sa EigenSolver()
*/
- EigenSolver(int size)
+ EigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_isInitialized(false),
@@ -285,9 +286,9 @@
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
{
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- int n = m_eivec.cols();
+ Index n = m_eivec.cols();
MatrixType matD = MatrixType::Zero(n,n);
- for (int i=0; i<n; ++i)
+ for (Index i=0; i<n; ++i)
{
if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i))))
matD.coeffRef(i,i) = ei_real(m_eivalues.coeff(i));
@@ -305,9 +306,9 @@
typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
{
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- int n = m_eivec.cols();
+ Index n = m_eivec.cols();
EigenvectorsType matV(n,n);
- for (int j=0; j<n; ++j)
+ for (Index j=0; j<n; ++j)
{
if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(j)), ei_real(m_eivalues.coeff(j))))
{
@@ -317,7 +318,7 @@
else
{
// we have a pair of complex eigen values
- for (int i=0; i<n; ++i)
+ for (Index i=0; i<n; ++i)
{
matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
@@ -342,7 +343,7 @@
// Compute eigenvalues from matT
m_eivalues.resize(matrix.cols());
- int i = 0;
+ Index i = 0;
while (i < matrix.cols())
{
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
@@ -390,14 +391,14 @@
template<typename MatrixType>
void EigenSolver<MatrixType>::computeEigenvectors()
{
- const int size = m_eivec.cols();
+ const Index size = m_eivec.cols();
const Scalar eps = NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur
Scalar norm = 0.0;
- for (int j = 0; j < size; ++j)
+ for (Index j = 0; j < size; ++j)
{
- norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum();
+ norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
}
// Backsubstitute to find vectors of upper triangular form
@@ -406,7 +407,7 @@
return;
}
- for (int n = size-1; n >= 0; n--)
+ for (Index n = size-1; n >= 0; n--)
{
Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag();
@@ -415,10 +416,10 @@
if (q == 0)
{
Scalar lastr=0, lastw=0;
- int l = n;
+ Index l = n;
m_matT.coeffRef(n,n) = 1.0;
- for (int i = n-1; i >= 0; i--)
+ for (Index i = n-1; i >= 0; i--)
{
Scalar w = m_matT.coeff(i,i) - p;
Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
@@ -461,7 +462,7 @@
else if (q < 0) // Complex vector
{
Scalar lastra=0, lastsa=0, lastw=0;
- int l = n-1;
+ Index l = n-1;
// Last vector component imaginary so matrix is triangular
if (ei_abs(m_matT.coeff(n,n-1)) > ei_abs(m_matT.coeff(n-1,n)))
@@ -477,7 +478,7 @@
}
m_matT.coeffRef(n,n-1) = 0.0;
m_matT.coeffRef(n,n) = 1.0;
- for (int i = n-2; i >= 0; i--)
+ for (Index i = n-2; i >= 0; i--)
{
Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
@@ -535,7 +536,7 @@
}
// Back transformation to get eigenvectors of original matrix
- for (int j = size-1; j >= 0; j--)
+ for (Index j = size-1; j >= 0; j--)
{
m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
m_eivec.col(j) = m_tmp;
diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
index 7a80aed..1111ffb 100644
--- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h
+++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -81,6 +81,7 @@
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
/** \brief Type for vector of Householder coefficients.
*
@@ -104,7 +105,7 @@
*
* \sa compute() for an example.
*/
- HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size)
+ HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size),
m_temp(size)
{
@@ -276,12 +277,12 @@
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
{
assert(matA.rows()==matA.cols());
- int n = matA.rows();
+ Index n = matA.rows();
temp.resize(n);
- for (int i = 0; i<n-1; ++i)
+ for (Index i = 0; i<n-1; ++i)
{
// let's consider the vector v = i-th column starting at position i+1
- int remainingSize = n-i-1;
+ Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
@@ -321,6 +322,7 @@
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
+ typedef typename MatrixType::Index Index;
public:
/** \brief Constructor.
*
@@ -337,13 +339,13 @@
inline void evalTo(ResultType& result) const
{
result = m_hess.packedMatrix();
- int n = result.rows();
+ Index n = result.rows();
if (n>2)
result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
}
- int rows() const { return m_hess.packedMatrix().rows(); }
- int cols() const { return m_hess.packedMatrix().cols(); }
+ Index rows() const { return m_hess.packedMatrix().rows(); }
+ Index cols() const { return m_hess.packedMatrix().cols(); }
protected:
const HessenbergDecomposition<MatrixType>& m_hess;
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
index f9d49c6..92ff448 100644
--- a/Eigen/src/Eigenvalues/RealSchur.h
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -77,6 +77,8 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef typename MatrixType::Index Index;
+
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
@@ -91,7 +93,7 @@
*
* \sa compute() for an example.
*/
- RealSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+ RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size, size),
m_matU(size, size),
m_workspaceVector(size),
@@ -177,11 +179,11 @@
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- int findSmallSubdiagEntry(int iu, Scalar norm);
- void splitOffTwoRows(int iu, Scalar exshift);
- void computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo);
- void initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector);
- void performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace);
+ Index findSmallSubdiagEntry(Index iu, Scalar norm);
+ void splitOffTwoRows(Index iu, Scalar exshift);
+ void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
+ void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
+ void performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace);
};
@@ -204,14 +206,14 @@
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
- int iu = m_matU.cols() - 1;
- int iter = 0; // iteration count
+ Index iu = m_matU.cols() - 1;
+ Index iter = 0; // iteration count
Scalar exshift = 0.0; // sum of exceptional shifts
Scalar norm = computeNormOfT();
while (iu >= 0)
{
- int il = findSmallSubdiagEntry(iu, norm);
+ Index il = findSmallSubdiagEntry(iu, norm);
// Check for convergence
if (il == iu) // One root found
@@ -233,7 +235,7 @@
Vector3s firstHouseholderVector, shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1; // (Could check iteration count here.)
- int im;
+ Index im;
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
performFrancisQRStep(il, im, iu, firstHouseholderVector, workspace);
}
@@ -246,21 +248,21 @@
template<typename MatrixType>
inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
{
- const int size = m_matU.cols();
+ const Index size = m_matU.cols();
// FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = m_matT.upper().cwiseAbs().sum()
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
Scalar norm = 0.0;
- for (int j = 0; j < size; ++j)
- norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum();
+ for (Index j = 0; j < size; ++j)
+ norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
return norm;
}
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline int RealSchur<MatrixType>::findSmallSubdiagEntry(int iu, Scalar norm)
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
{
- int res = iu;
+ Index res = iu;
while (res > 0)
{
Scalar s = ei_abs(m_matT.coeff(res-1,res-1)) + ei_abs(m_matT.coeff(res,res));
@@ -275,9 +277,9 @@
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
template<typename MatrixType>
-inline void RealSchur<MatrixType>::splitOffTwoRows(int iu, Scalar exshift)
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, Scalar exshift)
{
- const int size = m_matU.cols();
+ const Index size = m_matU.cols();
// The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
@@ -307,7 +309,7 @@
/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
template<typename MatrixType>
-inline void RealSchur<MatrixType>::computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo)
+inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
{
shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
@@ -317,7 +319,7 @@
if (iter == 10)
{
exshift += shiftInfo.coeff(0);
- for (int i = 0; i <= iu; ++i)
+ for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
Scalar s = ei_abs(m_matT.coeff(iu,iu-1)) + ei_abs(m_matT.coeff(iu-1,iu-2));
shiftInfo.coeffRef(0) = Scalar(0.75) * s;
@@ -338,7 +340,7 @@
s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
exshift += s;
- for (int i = 0; i <= iu; ++i)
+ for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= s;
shiftInfo.setConstant(Scalar(0.964));
}
@@ -347,7 +349,7 @@
/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
template<typename MatrixType>
-inline void RealSchur<MatrixType>::initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector)
+inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
{
Vector3s& v = firstHouseholderVector; // alias to save typing
@@ -373,14 +375,14 @@
/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
template<typename MatrixType>
-inline void RealSchur<MatrixType>::performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace)
+inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace)
{
assert(im >= il);
assert(im <= iu-2);
- const int size = m_matU.cols();
+ const Index size = m_matU.cols();
- for (int k = im; k <= iu-2; ++k)
+ for (Index k = im; k <= iu-2; ++k)
{
bool firstIteration = (k == im);
@@ -422,7 +424,7 @@
}
// clean up pollution due to round-off errors
- for (int i = im+2; i <= iu; ++i)
+ for (Index i = im+2; i <= iu; ++i)
{
m_matT.coeffRef(i,i-2) = Scalar(0);
if (i > im+2)
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 7634364..2c53655 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -82,6 +82,7 @@
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
/** \brief Real scalar type for \p _MatrixType.
*
@@ -105,7 +106,7 @@
* perform decompositions via compute(const MatrixType&, bool) or
* compute(const MatrixType&, const MatrixType&, bool). This constructor
* can only be used if \p _MatrixType is a fixed-size matrix; use
- * SelfAdjointEigenSolver(int) for dynamic-size matrices.
+ * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
@@ -132,7 +133,7 @@
*
* \sa compute(const MatrixType&, bool) for an example
*/
- SelfAdjointEigenSolver(int size)
+ SelfAdjointEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_tridiag(size),
@@ -379,8 +380,8 @@
* Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
* "implicit symmetric QR step with Wilkinson shift"
*/
-template<typename RealScalar, typename Scalar>
-static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n);
+template<typename RealScalar, typename Scalar, typename Index>
+static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
@@ -389,7 +390,7 @@
m_eigenvectorsOk = computeEigenvectors;
#endif
assert(matrix.cols() == matrix.rows());
- int n = matrix.cols();
+ Index n = matrix.cols();
m_eivalues.resize(n,1);
m_eivec.resize(n,n);
@@ -407,11 +408,11 @@
if (computeEigenvectors)
m_eivec = m_tridiag.matrixQ();
- int end = n-1;
- int start = 0;
+ Index end = n-1;
+ Index start = 0;
while (end>0)
{
- for (int i = start; i<end; ++i)
+ for (Index i = start; i<end; ++i)
if (ei_isMuchSmallerThan(ei_abs(m_subdiag[i]),(ei_abs(diag[i])+ei_abs(diag[i+1]))))
m_subdiag[i] = 0;
@@ -430,9 +431,9 @@
// Sort eigenvalues and corresponding vectors.
// TODO make the sort optional ?
// TODO use a better sort algorithm !!
- for (int i = 0; i < n-1; ++i)
+ for (Index i = 0; i < n-1; ++i)
{
- int k;
+ Index k;
m_eivalues.segment(i,n-i).minCoeff(&k);
if (k > 0)
{
@@ -473,7 +474,7 @@
{
// transform back the eigen vectors: evecs = inv(U) * evecs
cholB.matrixU().solveInPlace(m_eivec);
- for (int i=0; i<m_eivec.cols(); ++i)
+ for (Index i=0; i<m_eivec.cols(); ++i)
m_eivec.col(i) = m_eivec.col(i).normalized();
}
return *this;
@@ -482,8 +483,8 @@
#endif // EIGEN_HIDE_HEAVY_CODE
#ifndef EIGEN_EXTERN_INSTANTIATIONS
-template<typename RealScalar, typename Scalar>
-static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n)
+template<typename RealScalar, typename Scalar, typename Index>
+static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e2 = ei_abs2(subdiag[end-1]);
@@ -491,7 +492,7 @@
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
- for (int k = start; k < end; ++k)
+ for (Index k = start; k < end; ++k)
{
PlanarRotation<RealScalar> rot;
rot.makeGivens(x, z);
diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h
index 6ea852a..02917f2 100644
--- a/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -67,6 +67,7 @@
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
enum {
Size = MatrixType::RowsAtCompileTime,
@@ -107,7 +108,7 @@
*
* \sa compute() for an example.
*/
- Tridiagonalization(int size = Size==Dynamic ? 2 : Size)
+ Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1)
{}
@@ -324,7 +325,7 @@
const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
Tridiagonalization<MatrixType>::subDiagonal() const
{
- int n = m_matrix.rows();
+ Index n = m_matrix.rows();
return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal();
}
@@ -334,7 +335,7 @@
{
// FIXME should this function (and other similar ones) rather take a matrix as argument
// and fill it ? (to avoid temporaries)
- int n = m_matrix.rows();
+ Index n = m_matrix.rows();
MatrixType matT = m_matrix;
matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast<Scalar>().conjugate();
if (n>2)
@@ -363,10 +364,10 @@
void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
{
assert(matA.rows()==matA.cols());
- int n = matA.rows();
- for (int i = 0; i<n-1; ++i)
+ Index n = matA.rows();
+ for (Index i = 0; i<n-1; ++i)
{
- int remainingSize = n-i-1;
+ Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
@@ -391,7 +392,7 @@
template<typename MatrixType>
void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
- int n = mat.rows();
+ Index n = mat.rows();
ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1);
if (n==3 && (!NumTraits<Scalar>::IsComplex) )
{
diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h
index f3bee6f..9d9d96f 100644
--- a/Eigen/src/Geometry/AlignedBox.h
+++ b/Eigen/src/Geometry/AlignedBox.h
@@ -45,6 +45,7 @@
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef NumTraits<Scalar> ScalarTraits;
+ typedef DenseIndex Index;
typedef typename ScalarTraits::Real RealScalar;
typedef typename ScalarTraits::NonInteger NonInteger;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
@@ -72,7 +73,7 @@
{ if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */
- inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
+ inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); }
/** Constructs a box with extremities \a _min and \a _max. */
@@ -91,7 +92,7 @@
~AlignedBox() {}
/** \returns the dimension in which the box holds */
- inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
+ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
/** \deprecated use isEmpty */
inline bool isNull() const { return isEmpty(); }
@@ -157,8 +158,8 @@
VectorType res;
- int mult = 1;
- for(int d=0; d<dim(); ++d)
+ Index mult = 1;
+ for(Index d=0; d<dim(); ++d)
{
if( mult & corner ) res[d] = m_max[d];
else res[d] = m_min[d];
@@ -172,7 +173,7 @@
inline VectorType sample() const
{
VectorType r;
- for(int d=0; d<dim(); ++d)
+ for(Index d=0; d<dim(); ++d)
{
if(!ScalarTraits::IsInteger)
{
@@ -311,7 +312,7 @@
const typename ei_nested<Derived,2*AmbientDim>::type p(a_p.derived());
Scalar dist2 = 0.;
Scalar aux;
- for (int k=0; k<dim(); ++k)
+ for (Index k=0; k<dim(); ++k)
{
if( m_min[k] > p[k] )
{
@@ -332,7 +333,7 @@
{
Scalar dist2 = 0.;
Scalar aux;
- for (int k=0; k<dim(); ++k)
+ for (Index k=0; k<dim(); ++k)
{
if( m_min[k] > b.m_max[k] )
{
diff --git a/Eigen/src/Geometry/EulerAngles.h b/Eigen/src/Geometry/EulerAngles.h
index 13d2376..d910cbc 100644
--- a/Eigen/src/Geometry/EulerAngles.h
+++ b/Eigen/src/Geometry/EulerAngles.h
@@ -43,7 +43,7 @@
*/
template<typename Derived>
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
-MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const
+MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{
/* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
@@ -52,10 +52,10 @@
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
- const int odd = ((a0+1)%3 == a1) ? 0 : 1;
- const int i = a0;
- const int j = (a0 + 1 + odd)%3;
- const int k = (a0 + 2 - odd)%3;
+ const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
+ const Index i = a0;
+ const Index j = (a0 + 1 + odd)%3;
+ const Index k = (a0 + 2 - odd)%3;
if (a0==a2)
{
diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h
index caea1db..3077f09 100644
--- a/Eigen/src/Geometry/Homogeneous.h
+++ b/Eigen/src/Geometry/Homogeneous.h
@@ -77,10 +77,10 @@
: m_matrix(matrix)
{}
- inline int rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
- inline int cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
+ inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
+ inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
@@ -223,12 +223,13 @@
: public ReturnByValue<ei_homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
{
typedef typename ei_cleantype<typename Lhs::Nested>::type LhsNested;
+ typedef typename MatrixType::Index Index;
ei_homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
: m_lhs(lhs), m_rhs(rhs)
{}
- inline int rows() const { return m_lhs.rows(); }
- inline int cols() const { return m_rhs.cols(); }
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> void evalTo(Dest& dst) const
{
@@ -261,12 +262,13 @@
: public ReturnByValue<ei_homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
{
typedef typename ei_cleantype<typename Rhs::Nested>::type RhsNested;
+ typedef typename MatrixType::Index Index;
ei_homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs)
{}
- inline int rows() const { return m_lhs.rows(); }
- inline int cols() const { return m_rhs.cols(); }
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> void evalTo(Dest& dst) const
{
diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h
index 1d0b299..8450c9d 100644
--- a/Eigen/src/Geometry/Hyperplane.h
+++ b/Eigen/src/Geometry/Hyperplane.h
@@ -51,10 +51,11 @@
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef DenseIndex Index;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
- typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic
+ typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
? Dynamic
- : int(AmbientDimAtCompileTime)+1,1> Coefficients;
+ : Index(AmbientDimAtCompileTime)+1,1> Coefficients;
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
/** Default constructor without initialization */
@@ -62,7 +63,7 @@
/** Constructs a dynamic-size hyperplane with \a _dim the dimension
* of the ambient space */
- inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {}
+ inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
/** Construct a plane from its normal \a n and a point \a e onto the plane.
* \warning the vector normal is assumed to be normalized.
@@ -122,7 +123,7 @@
~Hyperplane() {}
/** \returns the dimension in which the plane holds */
- inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : AmbientDimAtCompileTime; }
+ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
/** normalizes \c *this */
void normalize(void)
diff --git a/Eigen/src/Geometry/OrthoMethods.h b/Eigen/src/Geometry/OrthoMethods.h
index 265507e..ed790cc 100644
--- a/Eigen/src/Geometry/OrthoMethods.h
+++ b/Eigen/src/Geometry/OrthoMethods.h
@@ -137,12 +137,13 @@
typedef typename ei_plain_matrix_type<Derived>::type VectorType;
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Derived::Index Index;
typedef Matrix<Scalar,2,1> Vector2;
inline static VectorType run(const Derived& src)
{
VectorType perp = VectorType::Zero(src.size());
- int maxi = 0;
- int sndi = 0;
+ Index maxi = 0;
+ Index sndi = 0;
src.cwiseAbs().maxCoeff(&maxi);
if (maxi==0)
sndi = 1;
diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h
index 1846a44..45c2338 100644
--- a/Eigen/src/Geometry/ParametrizedLine.h
+++ b/Eigen/src/Geometry/ParametrizedLine.h
@@ -47,6 +47,7 @@
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef DenseIndex Index;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
/** Default constructor without initialization */
@@ -54,7 +55,7 @@
/** Constructs a dynamic-size line with \a _dim the dimension
* of the ambient space */
- inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {}
+ inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
/** Initializes a parametrized line of direction \a direction and origin \a origin.
* \warning the vector direction is assumed to be normalized.
@@ -71,7 +72,7 @@
~ParametrizedLine() {}
/** \returns the dimension in which the line holds */
- inline int dim() const { return m_direction.size(); }
+ inline Index dim() const { return m_direction.size(); }
const VectorType& origin() const { return m_origin; }
VectorType& origin() { return m_origin; }
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index 4e054d9..7d52ebf 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -617,6 +617,7 @@
struct ei_quaternionbase_assign_impl<Other,3,3>
{
typedef typename Other::Scalar Scalar;
+ typedef DenseIndex Index;
template<class Derived> inline static void run(QuaternionBase<Derived>& q, const Other& mat)
{
// This algorithm comes from "Quaternion Calculus and Fast Animation",
@@ -633,13 +634,13 @@
}
else
{
- int i = 0;
+ DenseIndex i = 0;
if (mat.coeff(1,1) > mat.coeff(0,0))
i = 1;
if (mat.coeff(2,2) > mat.coeff(i,i))
i = 2;
- int j = (i+1)%3;
- int k = (j+1)%3;
+ DenseIndex j = (i+1)%3;
+ DenseIndex k = (j+1)%3;
t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
q.coeffs().coeffRef(i) = Scalar(0.5) * t;
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index 6a7bb9a..207497f 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -174,6 +174,7 @@
};
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
+ typedef DenseIndex Index;
/** type of the matrix used to represent the transformation */
typedef Matrix<Scalar,Rows,HDim> MatrixType;
/** type of the matrix used to represent the linear part of the transformation */
@@ -270,11 +271,11 @@
#endif
/** shortcut for m_matrix(row,col);
- * \sa MatrixBase::operaror(int,int) const */
- inline Scalar operator() (int row, int col) const { return m_matrix(row,col); }
+ * \sa MatrixBase::operaror(Index,Index) const */
+ inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
/** shortcut for m_matrix(row,col);
- * \sa MatrixBase::operaror(int,int) */
- inline Scalar& operator() (int row, int col) { return m_matrix(row,col); }
+ * \sa MatrixBase::operaror(Index,Index) */
+ inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
/** \returns a read-only expression of the transformation matrix */
inline const MatrixType& matrix() const { return m_matrix; }
@@ -1141,7 +1142,7 @@
static ResultType run(const TransformType& tr, const Other& other)
{
TransformType res;
- const int Rows = Mode==Projective ? HDim : Dim;
+ enum { Rows = Mode==Projective ? HDim : Dim };
res.matrix().template block<Rows,HDim>(0,0).noalias() = (tr.linearExt() * other);
res.translationExt() += tr.translationExt();
if(Mode!=Affine)
diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h
index 262d27a..5b9fd77 100644
--- a/Eigen/src/Geometry/Umeyama.h
+++ b/Eigen/src/Geometry/Umeyama.h
@@ -109,6 +109,7 @@
typedef typename ei_umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
typedef typename ei_traits<TransformationMatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Derived::Index Index;
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename ei_traits<OtherDerived>::Scalar>::ret),
@@ -120,8 +121,8 @@
typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
typedef typename ei_plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
- const int m = src.rows(); // dimension
- const int n = src.cols(); // number of measurements
+ const Index m = src.rows(); // dimension
+ const Index n = src.cols(); // number of measurements
// required for demeaning ...
const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
@@ -151,7 +152,7 @@
// Eq. (40) and (43)
const VectorType& d = svd.singularValues();
- int rank = 0; for (int i=0; i<m; ++i) if (!ei_isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
+ Index rank = 0; for (Index i=0; i<m; ++i) if (!ei_isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
if (rank == m-1) {
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h
index ab7702b..90c5bf8 100644
--- a/Eigen/src/Householder/HouseholderSequence.h
+++ b/Eigen/src/Householder/HouseholderSequence.h
@@ -53,6 +53,7 @@
struct ei_traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef typename VectorsType::Scalar Scalar;
+ typedef typename VectorsType::StorageKind StorageKind;
enum {
RowsAtCompileTime = Side==OnTheLeft ? ei_traits<VectorsType>::RowsAtCompileTime
: ei_traits<VectorsType>::ColsAtCompileTime,
@@ -69,9 +70,10 @@
{
typedef Block<VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
- static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k)
+ typedef typename VectorsType::Index Index;
+ static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
- const int start = k+1+h.m_shift;
+ Index start = k+1+h.m_shift;
return Block<VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
}
};
@@ -81,9 +83,10 @@
{
typedef Transpose<Block<VectorsType, 1, Dynamic> > EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
- static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k)
+ typedef typename VectorsType::Index Index;
+ static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
- const int start = k+1+h.m_shift;
+ Index start = k+1+h.m_shift;
return Block<VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
}
};
@@ -106,6 +109,7 @@
MaxColsAtCompileTime = ei_traits<HouseholderSequence>::MaxColsAtCompileTime
};
typedef typename ei_traits<HouseholderSequence>::Scalar Scalar;
+ typedef typename VectorsType::Index Index;
typedef typename ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
EssentialVectorType;
@@ -126,15 +130,15 @@
{
}
- HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift)
+ HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, Index actualVectors, Index shift)
: m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(actualVectors), m_shift(shift)
{
}
- int rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
- int cols() const { return rows(); }
+ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
+ Index cols() const { return rows(); }
- const EssentialVectorType essentialVector(int k) const
+ const EssentialVectorType essentialVector(Index k) const
{
ei_assert(k >= 0 && k < m_actualVectors);
return ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);
@@ -154,13 +158,13 @@
/** \internal */
template<typename DestType> void evalTo(DestType& dst) const
{
- int vecs = m_actualVectors;
+ Index vecs = m_actualVectors;
dst.setIdentity(rows(), rows());
Matrix<Scalar, DestType::RowsAtCompileTime, 1,
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows());
- for(int k = vecs-1; k >= 0; --k)
+ for(Index k = vecs-1; k >= 0; --k)
{
- int cornerSize = rows() - k - m_shift;
+ Index cornerSize = rows() - k - m_shift;
if(m_trans)
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
@@ -174,9 +178,9 @@
template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
{
Matrix<Scalar,1,Dest::RowsAtCompileTime> temp(dst.rows());
- for(int k = 0; k < m_actualVectors; ++k)
+ for(Index k = 0; k < m_actualVectors; ++k)
{
- int actual_k = m_trans ? m_actualVectors-k-1 : k;
+ Index actual_k = m_trans ? m_actualVectors-k-1 : k;
dst.rightCols(rows()-m_shift-actual_k)
.applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
}
@@ -186,9 +190,9 @@
template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
{
Matrix<Scalar,1,Dest::ColsAtCompileTime> temp(dst.cols());
- for(int k = 0; k < m_actualVectors; ++k)
+ for(Index k = 0; k < m_actualVectors; ++k)
{
- int actual_k = m_trans ? k : m_actualVectors-k-1;
+ Index actual_k = m_trans ? k : m_actualVectors-k-1;
dst.bottomRows(rows()-m_shift-actual_k)
.applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
}
@@ -218,8 +222,8 @@
typename VectorsType::Nested m_vectors;
typename CoeffsType::Nested m_coeffs;
bool m_trans;
- int m_actualVectors;
- int m_shift;
+ Index m_actualVectors;
+ Index m_shift;
};
template<typename VectorsType, typename CoeffsType>
@@ -229,7 +233,9 @@
}
template<typename VectorsType, typename CoeffsType>
-HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift)
+HouseholderSequence<VectorsType,CoeffsType> householderSequence
+ (const VectorsType& v, const CoeffsType& h,
+ bool trans, typename VectorsType::Index actualVectors, typename VectorsType::Index shift)
{
return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h, trans, actualVectors, shift);
}
@@ -241,7 +247,9 @@
}
template<typename VectorsType, typename CoeffsType>
-HouseholderSequence<VectorsType,CoeffsType> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift)
+HouseholderSequence<VectorsType,CoeffsType> rightHouseholderSequence
+ (const VectorsType& v, const CoeffsType& h, bool trans,
+ typename VectorsType::Index actualVectors, typename VectorsType::Index shift)
{
return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h, trans, actualVectors, shift);
}
diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h
index 024a130..f34e183 100644
--- a/Eigen/src/Jacobi/Jacobi.h
+++ b/Eigen/src/Jacobi/Jacobi.h
@@ -74,7 +74,7 @@
PlanarRotation adjoint() const { return PlanarRotation(ei_conj(m_c), -m_s); }
template<typename Derived>
- bool makeJacobi(const MatrixBase<Derived>&, int p, int q);
+ bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
@@ -89,7 +89,7 @@
/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
* \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
*
- * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, int, int), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
bool PlanarRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
@@ -133,7 +133,7 @@
*/
template<typename Scalar>
template<typename Derived>
-inline bool PlanarRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, int p, int q)
+inline bool PlanarRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
{
return makeJacobi(ei_real(m.coeff(p,p)), m.coeff(p,q), ei_real(m.coeff(q,q)));
}
@@ -277,7 +277,7 @@
*/
template<typename Derived>
template<typename OtherScalar>
-inline void MatrixBase<Derived>::applyOnTheLeft(int p, int q, const PlanarRotation<OtherScalar>& j)
+inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const PlanarRotation<OtherScalar>& j)
{
RowXpr x(this->row(p));
RowXpr y(this->row(q));
@@ -292,7 +292,7 @@
*/
template<typename Derived>
template<typename OtherScalar>
-inline void MatrixBase<Derived>::applyOnTheRight(int p, int q, const PlanarRotation<OtherScalar>& j)
+inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const PlanarRotation<OtherScalar>& j)
{
ColXpr x(this->col(p));
ColXpr y(this->col(q));
@@ -303,11 +303,12 @@
template<typename VectorX, typename VectorY, typename OtherScalar>
void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation<OtherScalar>& j)
{
+ typedef typename VectorX::Index Index;
typedef typename VectorX::Scalar Scalar;
ei_assert(_x.size() == _y.size());
- int size = _x.size();
- int incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0);
- int incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0);
+ Index size = _x.size();
+ Index incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0);
+ Index incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0);
Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
@@ -318,14 +319,14 @@
typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size, Peeling = 2 };
- int alignedStart = ei_first_aligned(y, size);
- int alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
+ Index alignedStart = ei_first_aligned(y, size);
+ Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
const Packet pc = ei_pset1(Scalar(j.c()));
const Packet ps = ei_pset1(Scalar(j.s()));
ei_conj_helper<NumTraits<Scalar>::IsComplex,false> cj;
- for(int i=0; i<alignedStart; ++i)
+ for(Index i=0; i<alignedStart; ++i)
{
Scalar xi = x[i];
Scalar yi = y[i];
@@ -338,7 +339,7 @@
if(ei_first_aligned(x, size)==alignedStart)
{
- for(int i=alignedStart; i<alignedEnd; i+=PacketSize)
+ for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
{
Packet xi = ei_pload(px);
Packet yi = ei_pload(py);
@@ -350,8 +351,8 @@
}
else
{
- int peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
- for(int i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+ Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
+ for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
{
Packet xi = ei_ploadu(px);
Packet xi1 = ei_ploadu(px+PacketSize);
@@ -373,7 +374,7 @@
}
}
- for(int i=alignedEnd; i<size; ++i)
+ for(Index i=alignedEnd; i<size; ++i)
{
Scalar xi = x[i];
Scalar yi = y[i];
@@ -383,7 +384,7 @@
}
else
{
- for(int i=0; i<size; ++i)
+ for(Index i=0; i<size; ++i)
{
Scalar xi = *x;
Scalar yi = *y;
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 1b0e67b..da4dce6 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -68,8 +68,10 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename ei_plain_row_type<MatrixType, int>::type IntRowVectorType;
- typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType;
+ typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+ typedef typename ei_plain_row_type<MatrixType, Index>::type IntRowVectorType;
+ typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
@@ -87,7 +89,7 @@
* according to the specified problem \a size.
* \sa FullPivLU()
*/
- FullPivLU(int rows, int cols);
+ FullPivLU(Index rows, Index cols);
/** Constructor.
*
@@ -124,7 +126,7 @@
*
* \sa rank()
*/
- inline int nonzeroPivots() const
+ inline Index nonzeroPivots() const
{
ei_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots;
@@ -301,12 +303,12 @@
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
- inline int rank() const
+ inline Index rank() const
{
ei_assert(m_isInitialized && "LU is not initialized.");
RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold();
- int result = 0;
- for(int i = 0; i < m_nonzero_pivots; ++i)
+ Index result = 0;
+ for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (ei_abs(m_lu.coeff(i,i)) > premultiplied_threshold);
return result;
}
@@ -317,7 +319,7 @@
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
- inline int dimensionOfKernel() const
+ inline Index dimensionOfKernel() const
{
ei_assert(m_isInitialized && "LU is not initialized.");
return cols() - rank();
@@ -378,8 +380,8 @@
MatrixType reconstructedMatrix() const;
- inline int rows() const { return m_lu.rows(); }
- inline int cols() const { return m_lu.cols(); }
+ inline Index rows() const { return m_lu.rows(); }
+ inline Index cols() const { return m_lu.cols(); }
protected:
MatrixType m_lu;
@@ -387,7 +389,7 @@
PermutationQType m_q;
IntColVectorType m_rowsTranspositions;
IntRowVectorType m_colsTranspositions;
- int m_det_pq, m_nonzero_pivots;
+ Index m_det_pq, m_nonzero_pivots;
RealScalar m_maxpivot, m_prescribedThreshold;
bool m_isInitialized, m_usePrescribedThreshold;
};
@@ -399,7 +401,7 @@
}
template<typename MatrixType>
-FullPivLU<MatrixType>::FullPivLU(int rows, int cols)
+FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
: m_lu(rows, cols),
m_p(rows),
m_q(cols),
@@ -429,26 +431,26 @@
m_isInitialized = true;
m_lu = matrix;
- const int size = matrix.diagonalSize();
- const int rows = matrix.rows();
- const int cols = matrix.cols();
+ const Index size = matrix.diagonalSize();
+ const Index rows = matrix.rows();
+ const Index cols = matrix.cols();
// will store the transpositions, before we accumulate them at the end.
// can't accumulate on-the-fly because that will be done in reverse order for the rows.
m_rowsTranspositions.resize(matrix.rows());
m_colsTranspositions.resize(matrix.cols());
- int number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
+ Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
RealScalar cutoff(0);
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
{
// First, we need to find the pivot.
// biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
- int row_of_biggest_in_corner, col_of_biggest_in_corner;
+ Index row_of_biggest_in_corner, col_of_biggest_in_corner;
RealScalar biggest_in_corner;
biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
.cwiseAbs()
@@ -468,7 +470,7 @@
// before exiting, make sure to initialize the still uninitialized transpositions
// in a sane state without destroying what we already have.
m_nonzero_pivots = k;
- for(int i = k; i < size; ++i)
+ for(Index i = k; i < size; ++i)
{
m_rowsTranspositions.coeffRef(i) = i;
m_colsTranspositions.coeffRef(i) = i;
@@ -505,11 +507,11 @@
// permutations P and Q
m_p.setIdentity(rows);
- for(int k = size-1; k >= 0; --k)
+ for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_q.setIdentity(cols);
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@@ -531,7 +533,7 @@
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{
ei_assert(m_isInitialized && "LU is not initialized.");
- const int smalldim = std::min(m_lu.rows(), m_lu.cols());
+ const Index smalldim = std::min(m_lu.rows(), m_lu.cols());
// LU
MatrixType res(m_lu.rows(),m_lu.cols());
// FIXME the .toDenseMatrix() should not be needed...
@@ -564,7 +566,7 @@
template<typename Dest> void evalTo(Dest& dst) const
{
- const int cols = dec().matrixLU().cols(), dimker = cols - rank();
+ const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
if(dimker == 0)
{
// The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
@@ -590,10 +592,10 @@
* independent vectors in Ker U.
*/
- Matrix<int, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+ Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
- int p = 0;
- for(int i = 0; i < dec().nonzeroPivots(); ++i)
+ Index p = 0;
+ for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
ei_internal_assert(p == rank());
@@ -605,14 +607,14 @@
Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
m(dec().matrixLU().block(0, 0, rank(), cols));
- for(int i = 0; i < rank(); ++i)
+ for(Index i = 0; i < rank(); ++i)
{
if(i) m.row(i).head(i).setZero();
m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
}
m.block(0, 0, rank(), rank());
m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
- for(int i = 0; i < rank(); ++i)
+ for(Index i = 0; i < rank(); ++i)
m.col(i).swap(m.col(pivots.coeff(i)));
// ok, we have our trapezoid matrix, we can apply the triangular solver.
@@ -624,13 +626,13 @@
);
// now we must undo the column permutation that we had applied!
- for(int i = rank()-1; i >= 0; --i)
+ for(Index i = rank()-1; i >= 0; --i)
m.col(i).swap(m.col(pivots.coeff(i)));
// see the negative sign in the next line, that's what we were talking about above.
- for(int i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
- for(int i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
- for(int k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
+ for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
+ for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
+ for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
}
};
@@ -658,15 +660,15 @@
return;
}
- Matrix<int, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+ Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
- int p = 0;
- for(int i = 0; i < dec().nonzeroPivots(); ++i)
+ Index p = 0;
+ for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
ei_internal_assert(p == rank());
- for(int i = 0; i < rank(); ++i)
+ for(Index i = 0; i < rank(); ++i)
dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
}
};
@@ -689,10 +691,10 @@
* Step 4: result = Q * c;
*/
- const int rows = dec().rows(), cols = dec().cols(),
+ const Index rows = dec().rows(), cols = dec().cols(),
nonzero_pivots = dec().nonzeroPivots();
ei_assert(rhs().rows() == rows);
- const int smalldim = std::min(rows, cols);
+ const Index smalldim = std::min(rows, cols);
if(nonzero_pivots == 0)
{
@@ -724,9 +726,9 @@
.solveInPlace(c.topRows(nonzero_pivots));
// Step 4
- for(int i = 0; i < nonzero_pivots; ++i)
+ for(Index i = 0; i < nonzero_pivots; ++i)
dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i);
- for(int i = nonzero_pivots; i < dec().matrixLU().cols(); ++i)
+ for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i)
dst.row(dec().permutationQ().indices().coeff(i)).setZero();
}
};
diff --git a/Eigen/src/LU/Inverse.h b/Eigen/src/LU/Inverse.h
index 1e9d69a..ed1724d 100644
--- a/Eigen/src/LU/Inverse.h
+++ b/Eigen/src/LU/Inverse.h
@@ -281,7 +281,8 @@
template<typename MatrixType>
struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> >
{
- typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename MatrixType::Index Index;
+ typedef typename ei_eval<MatrixType>::type MatrixTypeNested;
typedef typename ei_cleantype<MatrixTypeNested>::type MatrixTypeNestedCleaned;
const MatrixTypeNested m_matrix;
@@ -290,8 +291,8 @@
: m_matrix(matrix)
{}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
template<typename Dest> inline void evalTo(Dest& dst) const
{
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index 695b7d7..39c348e 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -71,7 +71,9 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename ei_plain_col_type<MatrixType, int>::type PermutationVectorType;
+ typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+ typedef typename ei_plain_col_type<MatrixType, Index>::type PermutationVectorType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
@@ -89,7 +91,7 @@
* according to the specified problem \a size.
* \sa PartialPivLU()
*/
- PartialPivLU(int size);
+ PartialPivLU(Index size);
/** Constructor.
*
@@ -178,14 +180,14 @@
MatrixType reconstructedMatrix() const;
- inline int rows() const { return m_lu.rows(); }
- inline int cols() const { return m_lu.cols(); }
+ inline Index rows() const { return m_lu.rows(); }
+ inline Index cols() const { return m_lu.cols(); }
protected:
MatrixType m_lu;
PermutationType m_p;
PermutationVectorType m_rowsTranspositions;
- int m_det_p;
+ Index m_det_p;
bool m_isInitialized;
};
@@ -200,7 +202,7 @@
}
template<typename MatrixType>
-PartialPivLU<MatrixType>::PartialPivLU(int size)
+PartialPivLU<MatrixType>::PartialPivLU(Index size)
: m_lu(size, size),
m_p(size),
m_rowsTranspositions(size),
@@ -233,6 +235,7 @@
typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
/** \internal performs the LU decomposition in-place of the matrix \a lu
* using an unblocked algorithm.
@@ -246,14 +249,14 @@
* undefined coefficients (to avoid generating inf/nan values). Returns true
* otherwise.
*/
- static bool unblocked_lu(MatrixType& lu, int* row_transpositions, int& nb_transpositions)
+ static bool unblocked_lu(MatrixType& lu, Index* row_transpositions, Index& nb_transpositions)
{
- const int rows = lu.rows();
- const int size = std::min(lu.rows(),lu.cols());
+ const Index rows = lu.rows();
+ const Index size = std::min(lu.rows(),lu.cols());
nb_transpositions = 0;
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
{
- int row_of_biggest_in_col;
+ Index row_of_biggest_in_col;
RealScalar biggest_in_corner
= lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
row_of_biggest_in_col += k;
@@ -265,7 +268,7 @@
// the blocked_lu code can't guarantee the same.
// before exiting, make sure to initialize the still uninitialized row_transpositions
// in a sane state without destroying what we already have.
- for(int i = k; i < size; i++)
+ for(Index i = k; i < size; i++)
row_transpositions[i] = i;
return false;
}
@@ -280,8 +283,8 @@
if(k<rows-1)
{
- int rrows = rows-k-1;
- int rsize = size-k-1;
+ Index rrows = rows-k-1;
+ Index rsize = size-k-1;
lu.col(k).tail(rrows) /= lu.coeff(k,k);
lu.bottomRightCorner(rrows,rsize).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rsize);
}
@@ -306,12 +309,12 @@
* 1 - reduce the number of instanciations to the strict minimum
* 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
*/
- static bool blocked_lu(int rows, int cols, Scalar* lu_data, int luStride, int* row_transpositions, int& nb_transpositions, int maxBlockSize=256)
+ static bool blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, Index* row_transpositions, Index& nb_transpositions, Index maxBlockSize=256)
{
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
MatrixType lu(lu1,0,0,rows,cols);
- const int size = std::min(rows,cols);
+ const Index size = std::min(rows,cols);
// if the matrix is too small, no blocking:
if(size<=16)
@@ -321,19 +324,19 @@
// automatically adjust the number of subdivisions to the size
// of the matrix so that there is enough sub blocks:
- int blockSize;
+ Index blockSize;
{
blockSize = size/8;
blockSize = (blockSize/16)*16;
- blockSize = std::min(std::max(blockSize,8), maxBlockSize);
+ blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize);
}
nb_transpositions = 0;
- for(int k = 0; k < size; k+=blockSize)
+ for(Index k = 0; k < size; k+=blockSize)
{
- int bs = std::min(size-k,blockSize); // actual size of the block
- int trows = rows - k - bs; // trailing rows
- int tsize = size - k - bs; // trailing size
+ Index bs = std::min(size-k,blockSize); // actual size of the block
+ Index trows = rows - k - bs; // trailing rows
+ Index tsize = size - k - bs; // trailing size
// partition the matrix:
// A00 | A01 | A02
@@ -346,7 +349,7 @@
BlockType A21(lu,k+bs,k,trows,bs);
BlockType A22(lu,k+bs,k+bs,trows,tsize);
- int nb_transpositions_in_panel;
+ Index nb_transpositions_in_panel;
// recursively calls the blocked LU algorithm with a very small
// blocking size:
if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
@@ -355,23 +358,23 @@
// end quickly with undefined coefficients, just avoid generating inf/nan values.
// before exiting, make sure to initialize the still uninitialized row_transpositions
// in a sane state without destroying what we already have.
- for(int i=k; i<size; ++i)
+ for(Index i=k; i<size; ++i)
row_transpositions[i] = i;
return false;
}
nb_transpositions += nb_transpositions_in_panel;
// update permutations and apply them to A10
- for(int i=k; i<k+bs; ++i)
+ for(Index i=k; i<k+bs; ++i)
{
- int piv = (row_transpositions[i] += k);
+ Index piv = (row_transpositions[i] += k);
A_0.row(i).swap(A_0.row(piv));
}
if(trows)
{
// apply permutations to A_2
- for(int i=k;i<k+bs; ++i)
+ for(Index i=k;i<k+bs; ++i)
A_2.row(i).swap(A_2.row(row_transpositions[i]));
// A12 = A11^-1 A12
@@ -387,7 +390,7 @@
/** \internal performs the LU decomposition with partial pivoting in-place.
*/
template<typename MatrixType, typename IntVector>
-void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, int& nb_transpositions)
+void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, typename MatrixType::Index& nb_transpositions)
{
ei_assert(lu.cols() == row_transpositions.size());
ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
@@ -403,16 +406,16 @@
m_lu = matrix;
ei_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
- const int size = matrix.rows();
+ const Index size = matrix.rows();
m_rowsTranspositions.resize(size);
- int nb_transpositions;
+ Index nb_transpositions;
ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
m_det_p = (nb_transpositions%2) ? -1 : 1;
m_p.setIdentity(size);
- for(int k = size-1; k >= 0; --k)
+ for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_isInitialized = true;
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index fbc80ad..b4bcfd5 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -56,10 +56,11 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
- typedef typename ei_plain_row_type<MatrixType, int>::type IntRowVectorType;
+ typedef typename ei_plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
typedef typename ei_plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
@@ -85,7 +86,7 @@
* according to the specified problem \a size.
* \sa ColPivHouseholderQR()
*/
- ColPivHouseholderQR(int rows, int cols)
+ ColPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)),
m_colsPermutation(cols),
@@ -186,12 +187,12 @@
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
- inline int rank() const
+ inline Index rank() const
{
ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold();
- int result = 0;
- for(int i = 0; i < m_nonzero_pivots; ++i)
+ Index result = 0;
+ for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (ei_abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result;
}
@@ -202,7 +203,7 @@
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
- inline int dimensionOfKernel() const
+ inline Index dimensionOfKernel() const
{
ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return cols() - rank();
@@ -260,8 +261,8 @@
(*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
}
- inline int rows() const { return m_qr.rows(); }
- inline int cols() const { return m_qr.cols(); }
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
@@ -320,7 +321,7 @@
*
* \sa rank()
*/
- inline int nonzeroPivots() const
+ inline Index nonzeroPivots() const
{
ei_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots;
@@ -340,8 +341,8 @@
RealRowVectorType m_colSqNorms;
bool m_isInitialized, m_usePrescribedThreshold;
RealScalar m_prescribedThreshold, m_maxpivot;
- int m_nonzero_pivots;
- int m_det_pq;
+ Index m_nonzero_pivots;
+ Index m_det_pq;
};
#ifndef EIGEN_HIDE_HEAVY_CODE
@@ -365,9 +366,9 @@
template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int size = matrix.diagonalSize();
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = matrix.diagonalSize();
m_qr = matrix;
m_hCoeffs.resize(size);
@@ -375,10 +376,10 @@
m_temp.resize(cols);
m_colsTranspositions.resize(matrix.cols());
- int number_of_transpositions = 0;
+ Index number_of_transpositions = 0;
m_colSqNorms.resize(cols);
- for(int k = 0; k < cols; ++k)
+ for(Index k = 0; k < cols; ++k)
m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
RealScalar threshold_helper = m_colSqNorms.maxCoeff() * ei_abs2(NumTraits<Scalar>::epsilon()) / rows;
@@ -386,10 +387,10 @@
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
{
// first, we look up in our table m_colSqNorms which column has the biggest squared norm
- int biggest_col_index;
+ Index biggest_col_index;
RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index);
biggest_col_index += k;
@@ -444,7 +445,7 @@
}
m_colsPermutation.setIdentity(cols);
- for(int k = 0; k < m_nonzero_pivots; ++k)
+ for(Index k = 0; k < m_nonzero_pivots; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@@ -461,12 +462,10 @@
template<typename Dest> void evalTo(Dest& dst) const
{
-#ifndef EIGEN_NO_DEBUG
- const int rows = dec().rows();
- ei_assert(rhs().rows() == rows);
-#endif
+ ei_assert(rhs().rows() == dec().rows());
+
const int cols = dec().cols(),
- nonzero_pivots = dec().nonzeroPivots();
+ nonzero_pivots = dec().nonzeroPivots();
if(nonzero_pivots == 0)
{
@@ -498,8 +497,8 @@
.template triangularView<Upper>()
* c.topRows(nonzero_pivots);
- for(int i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
- for(int i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+ for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+ for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
}
};
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index 0195e13..3b4d02d 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -56,11 +56,12 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
- typedef Matrix<int, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<Index, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
- typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType;
+ typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
typedef typename ei_plain_col_type<MatrixType>::type ColVectorType;
@@ -84,7 +85,7 @@
* according to the specified problem \a size.
* \sa FullPivHouseholderQR()
*/
- FullPivHouseholderQR(int rows, int cols)
+ FullPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)),
m_rows_transpositions(rows),
@@ -188,7 +189,7 @@
* \note This is computed at the time of the construction of the QR decomposition. This
* method does not perform any further computation.
*/
- inline int rank() const
+ inline Index rank() const
{
ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_rank;
@@ -199,7 +200,7 @@
* \note Since the rank is computed at the time of the construction of the QR decomposition, this
* method almost does not perform any further computation.
*/
- inline int dimensionOfKernel() const
+ inline Index dimensionOfKernel() const
{
ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_qr.cols() - m_rank;
@@ -253,8 +254,8 @@
(*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
}
- inline int rows() const { return m_qr.rows(); }
- inline int cols() const { return m_qr.cols(); }
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
@@ -266,8 +267,8 @@
RowVectorType m_temp;
bool m_isInitialized;
RealScalar m_precision;
- int m_rank;
- int m_det_pq;
+ Index m_rank;
+ Index m_det_pq;
};
#ifndef EIGEN_HIDE_HEAVY_CODE
@@ -291,9 +292,9 @@
template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int size = std::min(rows,cols);
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = std::min(rows,cols);
m_rank = size;
m_qr = matrix;
@@ -305,13 +306,13 @@
m_rows_transpositions.resize(matrix.rows());
m_cols_transpositions.resize(matrix.cols());
- int number_of_transpositions = 0;
+ Index number_of_transpositions = 0;
RealScalar biggest(0);
- for (int k = 0; k < size; ++k)
+ for (Index k = 0; k < size; ++k)
{
- int row_of_biggest_in_corner, col_of_biggest_in_corner;
+ Index row_of_biggest_in_corner, col_of_biggest_in_corner;
RealScalar biggest_in_corner;
biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k)
@@ -325,7 +326,7 @@
if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
{
m_rank = k;
- for(int i = k; i < size; i++)
+ for(Index i = k; i < size; i++)
{
m_rows_transpositions.coeffRef(i) = i;
m_cols_transpositions.coeffRef(i) = i;
@@ -354,7 +355,7 @@
}
m_cols_permutation.setIdentity(cols);
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@@ -371,7 +372,7 @@
template<typename Dest> void evalTo(Dest& dst) const
{
- const int rows = dec().rows(), cols = dec().cols();
+ const Index rows = dec().rows(), cols = dec().cols();
ei_assert(rhs().rows() == rows);
// FIXME introduce nonzeroPivots() and use it here. and more generally,
@@ -385,9 +386,9 @@
typename Rhs::PlainObject c(rhs());
Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols());
- for (int k = 0; k < dec().rank(); ++k)
+ for (Index k = 0; k < dec().rank(); ++k)
{
- int remainingSize = rows-k;
+ Index remainingSize = rows-k;
c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k)));
c.bottomRightCorner(remainingSize, rhs().cols())
.applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1),
@@ -409,8 +410,8 @@
.template triangularView<Upper>()
.solveInPlace(c.topRows(dec().rank()));
- for(int i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
- for(int i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+ for(Index i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+ for(Index i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
}
};
@@ -422,12 +423,12 @@
// compute the product H'_0 H'_1 ... H'_n-1,
// where H_k is the k-th Householder transformation I - h_k v_k v_k'
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
- int rows = m_qr.rows();
- int cols = m_qr.cols();
- int size = std::min(rows,cols);
+ Index rows = m_qr.rows();
+ Index cols = m_qr.cols();
+ Index size = std::min(rows,cols);
MatrixQType res = MatrixQType::Identity(rows, rows);
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
- for (int k = size-1; k >= 0; k--)
+ for (Index k = size-1; k >= 0; k--)
{
res.block(k, k, rows-k, rows-k)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k));
diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h
index 6a28839..a8caacc 100644
--- a/Eigen/src/QR/HouseholderQR.h
+++ b/Eigen/src/QR/HouseholderQR.h
@@ -60,6 +60,7 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
@@ -79,7 +80,7 @@
* according to the specified problem \a size.
* \sa HouseholderQR()
*/
- HouseholderQR(int rows, int cols)
+ HouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)),
m_temp(cols),
@@ -165,8 +166,8 @@
*/
typename MatrixType::RealScalar logAbsDeterminant() const;
- inline int rows() const { return m_qr.rows(); }
- inline int cols() const { return m_qr.cols(); }
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
@@ -197,19 +198,19 @@
template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int size = std::min(rows,cols);
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = std::min(rows,cols);
m_qr = matrix;
m_hCoeffs.resize(size);
m_temp.resize(cols);
- for(int k = 0; k < size; ++k)
+ for(Index k = 0; k < size; ++k)
{
- int remainingRows = rows - k;
- int remainingCols = cols - k - 1;
+ Index remainingRows = rows - k;
+ Index remainingCols = cols - k - 1;
RealScalar beta;
m_qr.col(k).tail(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
@@ -231,8 +232,8 @@
template<typename Dest> void evalTo(Dest& dst) const
{
- const int rows = dec().rows(), cols = dec().cols();
- const int rank = std::min(rows, cols);
+ const Index rows = dec().rows(), cols = dec().cols();
+ const Index rank = std::min(rows, cols);
ei_assert(rhs().rows() == rows);
typename Rhs::PlainObject c(rhs());
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 9323c01..2925306 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -63,6 +63,7 @@
private:
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
enum {
ComputeU = (Options & SkipU) == 0,
ComputeV = (Options & SkipV) == 0,
@@ -107,7 +108,7 @@
* according to the specified problem \a size.
* \sa JacobiSVD()
*/
- JacobiSVD(int rows, int cols) : m_matrixU(rows, rows),
+ JacobiSVD(Index rows, Index cols) : m_matrixU(rows, rows),
m_matrixV(cols, cols),
m_singularValues(std::min(rows, cols)),
m_workMatrix(rows, cols),
@@ -119,7 +120,7 @@
m_workMatrix(),
m_isInitialized(false)
{
- const int minSize = std::min(matrix.rows(), matrix.cols());
+ const Index minSize = std::min(matrix.rows(), matrix.cols());
m_singularValues.resize(minSize);
m_workMatrix.resize(minSize, minSize);
compute(matrix);
@@ -164,7 +165,8 @@
struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, Options, false>
{
typedef JacobiSVD<MatrixType, Options> SVD;
- static void run(typename SVD::WorkMatrixType&, JacobiSVD<MatrixType, Options>&, int, int) {}
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, JacobiSVD<MatrixType, Options>&, Index, Index) {}
};
template<typename MatrixType, unsigned int Options>
@@ -173,8 +175,9 @@
typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV };
- static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD<MatrixType, Options>& svd, int p, int q)
+ static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD<MatrixType, Options>& svd, Index p, Index q)
{
Scalar z;
PlanarRotation<Scalar> rot;
@@ -210,8 +213,8 @@
}
};
-template<typename MatrixType, typename RealScalar>
-void ei_real_2x2_jacobi_svd(const MatrixType& matrix, int p, int q,
+template<typename MatrixType, typename RealScalar, typename Index>
+void ei_real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
PlanarRotation<RealScalar> *j_left,
PlanarRotation<RealScalar> *j_right)
{
@@ -250,12 +253,13 @@
typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV };
static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int diagSize = cols;
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index diagSize = cols;
if(rows > cols)
{
FullPivHouseholderQR<MatrixType> qr(matrix);
@@ -282,6 +286,7 @@
typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
enum {
ComputeU = SVD::ComputeU,
ComputeV = SVD::ComputeV,
@@ -294,9 +299,9 @@
static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int diagSize = rows;
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index diagSize = rows;
if(cols > rows)
{
typedef Matrix<Scalar,ColsAtCompileTime,RowsAtCompileTime,
@@ -315,9 +320,9 @@
template<typename MatrixType, unsigned int Options>
JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
- int diagSize = std::min(rows, cols);
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index diagSize = std::min(rows, cols);
m_singularValues.resize(diagSize);
const RealScalar precision = 2 * NumTraits<Scalar>::epsilon();
@@ -333,9 +338,9 @@
while(!finished)
{
finished = true;
- for(int p = 1; p < diagSize; ++p)
+ for(Index p = 1; p < diagSize; ++p)
{
- for(int q = 0; q < p; ++q)
+ for(Index q = 0; q < p; ++q)
{
if(std::max(ei_abs(m_workMatrix.coeff(p,q)),ei_abs(m_workMatrix.coeff(q,p)))
> std::max(ei_abs(m_workMatrix.coeff(p,p)),ei_abs(m_workMatrix.coeff(q,q)))*precision)
@@ -356,16 +361,16 @@
}
}
- for(int i = 0; i < diagSize; ++i)
+ for(Index i = 0; i < diagSize; ++i)
{
RealScalar a = ei_abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = a;
if(ComputeU && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
- for(int i = 0; i < diagSize; i++)
+ for(Index i = 0; i < diagSize; i++)
{
- int pos;
+ Index pos;
m_singularValues.tail(diagSize-i).maxCoeff(&pos);
if(pos)
{
diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h
index a9e22df..7360562 100644
--- a/Eigen/src/SVD/SVD.h
+++ b/Eigen/src/SVD/SVD.h
@@ -46,6 +46,7 @@
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
@@ -79,7 +80,7 @@
* according to the specified problem \a size.
* \sa JacobiSVD()
*/
- SVD(int rows, int cols) : m_matU(rows, rows),
+ SVD(Index rows, Index cols) : m_matU(rows, rows),
m_matV(cols,cols),
m_sigma(std::min(rows, cols)),
m_workMatrix(rows, cols),
@@ -143,13 +144,13 @@
template<typename ScalingType, typename RotationType>
void computeScalingRotation(ScalingType *positive, RotationType *unitary) const;
- inline int rows() const
+ inline Index rows() const
{
ei_assert(m_isInitialized && "SVD is not initialized.");
return m_rows;
}
- inline int cols() const
+ inline Index cols() const
{
ei_assert(m_isInitialized && "SVD is not initialized.");
return m_cols;
@@ -182,7 +183,7 @@
MatrixType m_workMatrix;
RowVector m_rv1;
bool m_isInitialized;
- int m_rows, m_cols;
+ Index m_rows, m_cols;
};
/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
@@ -194,8 +195,8 @@
template<typename MatrixType>
SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
{
- const int m = m_rows = matrix.rows();
- const int n = m_cols = matrix.cols();
+ const Index m = m_rows = matrix.rows();
+ const Index n = m_cols = matrix.cols();
m_matU.resize(m, m);
m_matU.setZero();
@@ -203,14 +204,14 @@
m_matV.resize(n,n);
m_workMatrix = matrix;
- int max_iters = 30;
+ Index max_iters = 30;
MatrixVType& V = m_matV;
MatrixType& A = m_workMatrix;
SingularValuesType& W = m_sigma;
bool flag;
- int i=0,its=0,j=0,k=0,l=0,nm=0;
+ Index i=0,its=0,j=0,k=0,l=0,nm=0;
Scalar anorm, c, f, g, h, s, scale, x, y, z;
bool convergence = true;
Scalar eps = NumTraits<Scalar>::dummy_precision();
@@ -426,9 +427,9 @@
// sort the singular values:
{
- for (int i=0; i<n; i++)
+ for (Index i=0; i<n; i++)
{
- int k;
+ Index k;
W.tail(n-i).maxCoeff(&k);
if (k != 0)
{
@@ -459,11 +460,11 @@
{
ei_assert(rhs().rows() == dec().rows());
- for (int j=0; j<cols(); ++j)
+ for (Index j=0; j<cols(); ++j)
{
Matrix<Scalar,MatrixType::RowsAtCompileTime,1> aux = dec().matrixU().adjoint() * rhs().col(j);
- for (int i = 0; i < dec().rows(); ++i)
+ for (Index i = 0; i < dec().rows(); ++i)
{
Scalar si = dec().singularValues().coeff(i);
if(si == RealScalar(0))
@@ -471,7 +472,7 @@
else
aux.coeffRef(i) /= si;
}
- const int minsize = std::min(dec().rows(),dec().cols());
+ const Index minsize = std::min(dec().rows(),dec().cols());
dst.col(j).head(minsize) = aux.head(minsize);
if(dec().cols()>dec().rows()) dst.col(j).tail(cols()-minsize).setZero();
dst.col(j) = dec().matrixV() * dst.col(j);
diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h
index 53e0407..1e1355b 100644
--- a/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/Eigen/src/SVD/UpperBidiagonalization.h
@@ -37,6 +37,7 @@
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0> BidiagonalType;
@@ -95,8 +96,8 @@
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{
- int rows = matrix.rows();
- int cols = matrix.cols();
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
ei_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols.");
@@ -104,10 +105,10 @@
ColVectorType temp(rows);
- for (int k = 0; /* breaks at k==cols-1 below */ ; ++k)
+ for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{
- int remainingRows = rows - k;
- int remainingCols = cols - k - 1;
+ Index remainingRows = rows - k;
+ Index remainingCols = cols - k - 1;
// construct left householder transform in-place in m_householder
m_householder.col(k).tail(remainingRows)
diff --git a/Eigen/src/Sparse/AmbiVector.h b/Eigen/src/Sparse/AmbiVector.h
index 1ac2827..7b18f8c 100644
--- a/Eigen/src/Sparse/AmbiVector.h
+++ b/Eigen/src/Sparse/AmbiVector.h
@@ -35,7 +35,8 @@
public:
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
- AmbiVector(int size)
+ typedef SparseIndex Index;
+ AmbiVector(Index size)
: m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
@@ -44,40 +45,40 @@
void init(double estimatedDensity);
void init(int mode);
- int nonZeros() const;
+ Index nonZeros() const;
/** Specifies a sub-vector to work on */
- void setBounds(int start, int end) { m_start = start; m_end = end; }
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
void setZero();
void restart();
- Scalar& coeffRef(int i);
- Scalar& coeff(int i);
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
class Iterator;
~AmbiVector() { delete[] m_buffer; }
- void resize(int size)
+ void resize(Index size)
{
if (m_allocatedSize < size)
reallocate(size);
m_size = size;
}
- int size() const { return m_size; }
+ Index size() const { return m_size; }
protected:
- void reallocate(int size)
+ void reallocate(Index size)
{
// if the size of the matrix is not too large, let's allocate a bit more than needed such
// that we can handle dense vector even in sparse mode.
delete[] m_buffer;
if (size<1000)
{
- int allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
m_buffer = new Scalar[allocSize];
}
@@ -93,9 +94,9 @@
void reallocateSparse()
{
- int copyElements = m_allocatedElements;
- m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size);
- int allocSize = m_allocatedElements * sizeof(ListEl);
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
@@ -107,30 +108,30 @@
// element type of the linked list
struct ListEl
{
- int next;
- int index;
+ Index next;
+ Index index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
Scalar m_zero;
- int m_size;
- int m_start;
- int m_end;
- int m_allocatedSize;
- int m_allocatedElements;
- int m_mode;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
// linked list mode
- int m_llStart;
- int m_llCurrent;
- int m_llSize;
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
};
/** \returns the number of non zeros in the current sub vector */
template<typename Scalar>
-int AmbiVector<Scalar>::nonZeros() const
+SparseIndex AmbiVector<Scalar>::nonZeros() const
{
if (m_mode==IsSparse)
return m_llSize;
@@ -175,7 +176,7 @@
{
if (m_mode==IsDense)
{
- for (int i=m_start; i<m_end; ++i)
+ for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0);
}
else
@@ -187,7 +188,7 @@
}
template<typename Scalar>
-Scalar& AmbiVector<Scalar>::coeffRef(int i)
+Scalar& AmbiVector<Scalar>::coeffRef(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
@@ -221,7 +222,7 @@
}
else
{
- int nextel = llElements[m_llCurrent].next;
+ Index nextel = llElements[m_llCurrent].next;
ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i)
{
@@ -256,7 +257,7 @@
}
template<typename Scalar>
-Scalar& AmbiVector<Scalar>::coeff(int i)
+Scalar& AmbiVector<Scalar>::coeff(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
@@ -270,7 +271,7 @@
}
else
{
- int elid = m_llStart;
+ Index elid = m_llStart;
while (elid >= 0 && llElements[elid].index<i)
elid = llElements[elid].next;
@@ -327,7 +328,7 @@
}
}
- int index() const { return m_cachedIndex; }
+ Index index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; }
@@ -365,9 +366,9 @@
protected:
const AmbiVector& m_vector; // the target vector
- int m_currentEl; // the current element in sparse/linked-list mode
+ Index m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
- int m_cachedIndex; // current coordinate
+ Index m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector
};
diff --git a/Eigen/src/Sparse/CholmodSupport.h b/Eigen/src/Sparse/CholmodSupport.h
index cf40724..82a09f3 100644
--- a/Eigen/src/Sparse/CholmodSupport.h
+++ b/Eigen/src/Sparse/CholmodSupport.h
@@ -114,8 +114,8 @@
{
m_innerSize = cm.nrow;
m_outerSize = cm.ncol;
- m_outerIndex = reinterpret_cast<int*>(cm.p);
- m_innerIndices = reinterpret_cast<int*>(cm.i);
+ m_outerIndex = reinterpret_cast<Index*>(cm.p);
+ m_innerIndices = reinterpret_cast<Index*>(cm.i);
m_values = reinterpret_cast<Scalar*>(cm.x);
m_nnz = m_outerIndex[cm.ncol];
}
@@ -220,7 +220,7 @@
template<typename Derived>
bool SparseLLT<MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
{
- const int size = m_cholmodFactor->n;
+ const Index size = m_cholmodFactor->n;
ei_assert(size==b.rows());
// this uses Eigen's triangular sparse solver
diff --git a/Eigen/src/Sparse/CompressedStorage.h b/Eigen/src/Sparse/CompressedStorage.h
index 4fc1797..37d3376 100644
--- a/Eigen/src/Sparse/CompressedStorage.h
+++ b/Eigen/src/Sparse/CompressedStorage.h
@@ -32,6 +32,7 @@
class CompressedStorage
{
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef SparseIndex Index;
public:
CompressedStorage()
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
@@ -53,7 +54,7 @@
{
resize(other.size());
memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
- memcpy(m_indices, other.m_indices, m_size * sizeof(int));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
return *this;
}
@@ -91,9 +92,9 @@
m_size = size;
}
- void append(const Scalar& v, int i)
+ void append(const Scalar& v, Index i)
{
- int id = static_cast<int>(m_size);
+ Index id = static_cast<Index>(m_size);
resize(m_size+1, 1);
m_values[id] = v;
m_indices[id] = i;
@@ -106,10 +107,10 @@
inline Scalar& value(size_t i) { return m_values[i]; }
inline const Scalar& value(size_t i) const { return m_values[i]; }
- inline int& index(size_t i) { return m_indices[i]; }
- inline const int& index(size_t i) const { return m_indices[i]; }
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
- static CompressedStorage Map(int* indices, Scalar* values, size_t size)
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
{
CompressedStorage res;
res.m_indices = indices;
@@ -119,13 +120,13 @@
}
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
- inline int searchLowerIndex(int key) const
+ inline Index searchLowerIndex(Index key) const
{
return searchLowerIndex(0, m_size, key);
}
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
- inline int searchLowerIndex(size_t start, size_t end, int key) const
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
{
while(end>start)
{
@@ -135,12 +136,12 @@
else
end = mid;
}
- return static_cast<int>(start);
+ return static_cast<Index>(start);
}
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
- inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
@@ -153,7 +154,7 @@
}
/** Like at(), but the search is performed in the range [start,end) */
- inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
{
if (start>=end)
return Scalar(0);
@@ -168,7 +169,7 @@
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
- inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
{
size_t id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
@@ -206,11 +207,11 @@
inline void reallocate(size_t size)
{
Scalar* newValues = new Scalar[size];
- int* newIndices = new int[size];
+ Index* newIndices = new Index[size];
size_t copySize = std::min(size, m_size);
// copy
memcpy(newValues, m_values, copySize * sizeof(Scalar));
- memcpy(newIndices, m_indices, copySize * sizeof(int));
+ memcpy(newIndices, m_indices, copySize * sizeof(Index));
// delete old stuff
delete[] m_values;
delete[] m_indices;
@@ -221,7 +222,7 @@
protected:
Scalar* m_values;
- int* m_indices;
+ Index* m_indices;
size_t m_size;
size_t m_allocatedSize;
diff --git a/Eigen/src/Sparse/CoreIterators.h b/Eigen/src/Sparse/CoreIterators.h
index 6978045..7ae847d 100644
--- a/Eigen/src/Sparse/CoreIterators.h
+++ b/Eigen/src/Sparse/CoreIterators.h
@@ -38,9 +38,11 @@
template<typename Derived> class DenseBase<Derived>::InnerIterator
{
typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
public:
- EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
: m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.rows())
{}
@@ -52,17 +54,17 @@
EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
- EIGEN_STRONG_INLINE int index() const { return m_inner; }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const Derived& m_expression;
- int m_inner;
- const int m_outer;
- const int m_end;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
};
#endif // EIGEN_COREITERATORS_H
diff --git a/Eigen/src/Sparse/DynamicSparseMatrix.h b/Eigen/src/Sparse/DynamicSparseMatrix.h
index fd7c7fb..fea707f 100644
--- a/Eigen/src/Sparse/DynamicSparseMatrix.h
+++ b/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -75,16 +75,16 @@
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
- int m_innerSize;
+ Index m_innerSize;
std::vector<CompressedStorage<Scalar> > m_data;
public:
- inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return static_cast<int>(m_data.size()); }
- inline int innerNonZeros(int j) const { return m_data[j].size(); }
+ inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
std::vector<CompressedStorage<Scalar> >& _data() { return m_data; }
const std::vector<CompressedStorage<Scalar> >& _data() const { return m_data; }
@@ -92,21 +92,21 @@
/** \returns the coefficient value at given position \a row, \a col
* This operation involes a log(rho*outer_size) binary search.
*/
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return m_data[outer].at(inner);
}
/** \returns a reference to the coefficient value at given position \a row, \a col
* This operation involes a log(rho*outer_size) binary search. If the coefficient does not
- * exist yet, then a sorted insertion into a sequential buffer is performed.
+ * exist yet, then a sorted insertion Indexo a sequential buffer is performed.
*/
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return m_data[outer].atWithInsertion(inner);
}
@@ -114,44 +114,44 @@
void setZero()
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
m_data[j].clear();
}
/** \returns the number of non zero coefficients */
- int nonZeros() const
+ Index nonZeros() const
{
- int res = 0;
- for (int j=0; j<outerSize(); ++j)
- res += static_cast<int>(m_data[j].size());
+ Index res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ res += static_cast<Index>(m_data[j].size());
return res;
}
/** \deprecated
* Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
- EIGEN_DEPRECATED void startFill(int reserveSize = 1000)
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
{
setZero();
reserve(reserveSize);
}
- void reserve(int reserveSize = 1000)
+ void reserve(Index reserveSize = 1000)
{
if (outerSize()>0)
{
- int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
- for (int j=0; j<outerSize(); ++j)
+ Index reserveSizePerVector = std::max(reserveSize/outerSize(),Index(4));
+ for (Index j=0; j<outerSize(); ++j)
{
m_data[j].reserve(reserveSizePerVector);
}
}
}
- inline void startVec(int /*outer*/) {}
+ inline void startVec(Index /*outer*/) {}
- inline Scalar& insertBack(int outer, int inner)
+ inline Scalar& insertBack(Index outer, Index inner)
{
- ei_assert(outer<int(m_data.size()) && inner<m_innerSize && "out of range");
+ ei_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
ei_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
&& "wrong sorted insertion");
m_data[outer].append(0, inner);
@@ -167,10 +167,10 @@
*
* \see fillrand(), coeffRef()
*/
- EIGEN_DEPRECATED Scalar& fill(int row, int col)
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return insertBack(outer,inner);
}
@@ -179,18 +179,18 @@
* Compared to the generic coeffRef(), the unique limitation is that we assume
* the coefficient does not exist yet.
*/
- EIGEN_DEPRECATED Scalar& fillrand(int row, int col)
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
{
return insert(row,col);
}
- inline Scalar& insert(int row, int col)
+ inline Scalar& insert(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int startId = 0;
- int id = static_cast<int>(m_data[outer].size()) - 1;
+ Index startId = 0;
+ Index id = static_cast<Index>(m_data[outer].size()) - 1;
m_data[outer].resize(id+2,1);
while ( (id >= startId) && (m_data[outer].index(id) > inner) )
@@ -212,27 +212,27 @@
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
m_data[j].prune(reference,epsilon);
}
/** Resize the matrix without preserving the data (the matrix is set to zero)
*/
- void resize(int rows, int cols)
+ void resize(Index rows, Index cols)
{
- const int outerSize = IsRowMajor ? rows : cols;
+ const Index outerSize = IsRowMajor ? rows : cols;
m_innerSize = IsRowMajor ? cols : rows;
setZero();
- if (int(m_data.size()) != outerSize)
+ if (Index(m_data.size()) != outerSize)
{
m_data.resize(outerSize);
}
}
- void resizeAndKeepData(int rows, int cols)
+ void resizeAndKeepData(Index rows, Index cols)
{
- const int outerSize = IsRowMajor ? rows : cols;
- const int innerSize = IsRowMajor ? cols : rows;
+ const Index outerSize = IsRowMajor ? rows : cols;
+ const Index innerSize = IsRowMajor ? cols : rows;
if (m_innerSize>innerSize)
{
// remove all coefficients with innerCoord>=innerSize
@@ -252,7 +252,7 @@
ei_assert(innerSize()==0 && outerSize()==0);
}
- inline DynamicSparseMatrix(int rows, int cols)
+ inline DynamicSparseMatrix(Index rows, Index cols)
: m_innerSize(0)
{
resize(rows, cols);
@@ -308,15 +308,15 @@
{
typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
public:
- InnerIterator(const DynamicSparseMatrix& mat, int outer)
+ InnerIterator(const DynamicSparseMatrix& mat, Index outer)
: Base(mat.m_data[outer]), m_outer(outer)
{}
- inline int row() const { return IsRowMajor ? m_outer : Base::index(); }
- inline int col() const { return IsRowMajor ? Base::index() : m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
protected:
- const int m_outer;
+ const Index m_outer;
};
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/Eigen/src/Sparse/MappedSparseMatrix.h b/Eigen/src/Sparse/MappedSparseMatrix.h
index 43ac6b3..07233ac 100644
--- a/Eigen/src/Sparse/MappedSparseMatrix.h
+++ b/Eigen/src/Sparse/MappedSparseMatrix.h
@@ -48,40 +48,40 @@
protected:
enum { IsRowMajor = Base::IsRowMajor };
- int m_outerSize;
- int m_innerSize;
- int m_nnz;
- int* m_outerIndex;
- int* m_innerIndices;
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
Scalar* m_values;
public:
- inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return m_outerSize; }
- inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
//----------------------------------------
// direct access interface
inline const Scalar* _valuePtr() const { return m_values; }
inline Scalar* _valuePtr() { return m_values; }
- inline const int* _innerIndexPtr() const { return m_innerIndices; }
- inline int* _innerIndexPtr() { return m_innerIndices; }
+ inline const Index* _innerIndexPtr() const { return m_innerIndices; }
+ inline Index* _innerIndexPtr() { return m_innerIndices; }
- inline const int* _outerIndexPtr() const { return m_outerIndex; }
- inline int* _outerIndexPtr() { return m_outerIndex; }
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
//----------------------------------------
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
if (start==end)
return Scalar(0);
else if (end>0 && inner==m_innerIndices[end-1])
@@ -89,22 +89,22 @@
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
- const int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
- const int id = r-&m_innerIndices[0];
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
- const int id = r-&m_innerIndices[0];
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
ei_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
return m_values[id];
}
@@ -112,9 +112,9 @@
class InnerIterator;
/** \returns the number of non zero coefficients */
- inline int nonZeros() const { return m_nnz; }
+ inline Index nonZeros() const { return m_nnz; }
- inline MappedSparseMatrix(int rows, int cols, int nnz, int* outerIndexPtr, int* innerIndexPtr, Scalar* valuePtr)
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
m_innerIndices(innerIndexPtr), m_values(valuePtr)
{}
@@ -139,7 +139,7 @@
class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
{
public:
- InnerIterator(const MappedSparseMatrix& mat, int outer)
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
: m_matrix(mat),
m_outer(outer),
m_id(mat._outerIndexPtr()[outer]),
@@ -148,7 +148,7 @@
{}
template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, int outer)
+ InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, Index outer)
: m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]),
m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1])
{}
@@ -158,18 +158,18 @@
inline Scalar value() const { return m_matrix._valuePtr()[m_id]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix._valuePtr()[m_id]); }
- inline int index() const { return m_matrix._innerIndexPtr()[m_id]; }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
+ inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
protected:
const MappedSparseMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
};
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/Eigen/src/Sparse/RandomSetter.h b/Eigen/src/Sparse/RandomSetter.h
index 76f24cf..abe9881 100644
--- a/Eigen/src/Sparse/RandomSetter.h
+++ b/Eigen/src/Sparse/RandomSetter.h
@@ -166,7 +166,9 @@
,int OuterPacketBits = 6>
class RandomSetter
{
- typedef typename ei_traits<SparseMatrixType>::Scalar Scalar;
+ typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
+
struct ScalarWrapper
{
ScalarWrapper() : value(0) {}
@@ -194,14 +196,14 @@
inline RandomSetter(SparseMatrixType& target)
: mp_target(&target)
{
- const int outerSize = SwapStorage ? target.innerSize() : target.outerSize();
- const int innerSize = SwapStorage ? target.outerSize() : target.innerSize();
+ const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize();
+ const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize();
m_outerPackets = outerSize >> OuterPacketBits;
if (outerSize&OuterPacketMask)
m_outerPackets += 1;
m_hashmaps = new HashMapType[m_outerPackets];
// compute number of bits needed to store inner indices
- int aux = innerSize - 1;
+ Index aux = innerSize - 1;
m_keyBitsOffset = 0;
while (aux)
{
@@ -209,11 +211,11 @@
aux = aux >> 1;
}
KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
- for (int k=0; k<m_outerPackets; ++k)
+ for (Index k=0; k<m_outerPackets; ++k)
MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
// insert current coeffs
- for (int j=0; j<mp_target->outerSize(); ++j)
+ for (Index j=0; j<mp_target->outerSize(); ++j)
for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
(*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
}
@@ -226,18 +228,18 @@
{
mp_target->setZero();
mp_target->reserve(nonZeros());
- int prevOuter = -1;
- for (int k=0; k<m_outerPackets; ++k)
+ Index prevOuter = -1;
+ for (Index k=0; k<m_outerPackets; ++k)
{
- const int outerOffset = (1<<OuterPacketBits) * k;
+ const Index outerOffset = (1<<OuterPacketBits) * k;
typename HashMapType::iterator end = m_hashmaps[k].end();
for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
{
- const int outer = (it->first >> m_keyBitsOffset) + outerOffset;
- const int inner = it->first & keyBitsMask;
+ const Index outer = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index inner = it->first & keyBitsMask;
if (prevOuter!=outer)
{
- for (int j=prevOuter+1;j<=outer;++j)
+ for (Index j=prevOuter+1;j<=outer;++j)
mp_target->startVec(j);
prevOuter = outer;
}
@@ -251,20 +253,20 @@
VectorXi positions(mp_target->outerSize());
positions.setZero();
// pass 1
- for (int k=0; k<m_outerPackets; ++k)
+ for (Index k=0; k<m_outerPackets; ++k)
{
typename HashMapType::iterator end = m_hashmaps[k].end();
for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
{
- const int outer = it->first & keyBitsMask;
+ const Index outer = it->first & keyBitsMask;
++positions[outer];
}
}
// prefix sum
- int count = 0;
- for (int j=0; j<mp_target->outerSize(); ++j)
+ Index count = 0;
+ for (Index j=0; j<mp_target->outerSize(); ++j)
{
- int tmp = positions[j];
+ Index tmp = positions[j];
mp_target->_outerIndexPtr()[j] = count;
positions[j] = count;
count += tmp;
@@ -272,20 +274,20 @@
mp_target->_outerIndexPtr()[mp_target->outerSize()] = count;
mp_target->resizeNonZeros(count);
// pass 2
- for (int k=0; k<m_outerPackets; ++k)
+ for (Index k=0; k<m_outerPackets; ++k)
{
- const int outerOffset = (1<<OuterPacketBits) * k;
+ const Index outerOffset = (1<<OuterPacketBits) * k;
typename HashMapType::iterator end = m_hashmaps[k].end();
for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
{
- const int inner = (it->first >> m_keyBitsOffset) + outerOffset;
- const int outer = it->first & keyBitsMask;
+ const Index inner = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index outer = it->first & keyBitsMask;
// sorted insertion
// Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
// moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
// small fraction of them have to be sorted, whence the following simple procedure:
- int posStart = mp_target->_outerIndexPtr()[outer];
- int i = (positions[outer]++) - 1;
+ Index posStart = mp_target->_outerIndexPtr()[outer];
+ Index i = (positions[outer]++) - 1;
while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) )
{
mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i];
@@ -301,14 +303,14 @@
}
/** \returns a reference to the coefficient at given coordinates \a row, \a col */
- Scalar& operator() (int row, int col)
+ Scalar& operator() (Index row, Index col)
{
ei_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix");
ei_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix");
- const int outer = SetterRowMajor ? row : col;
- const int inner = SetterRowMajor ? col : row;
- const int outerMajor = outer >> OuterPacketBits; // index of the packet/map
- const int outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
+ const Index outer = SetterRowMajor ? row : col;
+ const Index inner = SetterRowMajor ? col : row;
+ const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
+ const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
return m_hashmaps[outerMajor][key].value;
}
@@ -318,11 +320,11 @@
* \note According to the underlying map/hash_map implementation,
* this function might be quite expensive.
*/
- int nonZeros() const
+ Index nonZeros() const
{
- int nz = 0;
- for (int k=0; k<m_outerPackets; ++k)
- nz += static_cast<int>(m_hashmaps[k].size());
+ Index nz = 0;
+ for (Index k=0; k<m_outerPackets; ++k)
+ nz += static_cast<Index>(m_hashmaps[k].size());
return nz;
}
@@ -331,7 +333,7 @@
HashMapType* m_hashmaps;
SparseMatrixType* mp_target;
- int m_outerPackets;
+ Index m_outerPackets;
unsigned char m_keyBitsOffset;
};
diff --git a/Eigen/src/Sparse/SparseBlock.h b/Eigen/src/Sparse/SparseBlock.h
index bdbc460..bf8b5ad 100644
--- a/Eigen/src/Sparse/SparseBlock.h
+++ b/Eigen/src/Sparse/SparseBlock.h
@@ -54,22 +54,22 @@
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
: MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
- inline int row() const { return IsRowMajor ? m_outer : this->index(); }
- inline int col() const { return IsRowMajor ? this->index() : m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
- int m_outer;
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
ei_assert(Size!=Dynamic);
@@ -88,15 +88,14 @@
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
-
+ Index m_outerStart;
+ const ei_variable_if_dynamic<Index, Size> m_outerSize;
};
/***************************************************************************
@@ -116,22 +115,22 @@
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
: MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
- inline int row() const { return IsRowMajor ? m_outer : this->index(); }
- inline int col() const { return IsRowMajor ? this->index() : m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
- int m_outer;
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
ei_assert(Size!=Dynamic);
@@ -150,7 +149,7 @@
else
{
// evaluate/copy vector per vector
- for (int j=0; j<m_outerSize.value(); ++j)
+ for (Index j=0; j<m_outerSize.value(); ++j)
{
SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
@@ -164,10 +163,10 @@
return operator=<SparseInnerVectorSet>(other);
}
- int nonZeros() const
+ Index nonZeros() const
{
- int count = 0;
- for (int j=0; j<m_outerSize; ++j)
+ Index count = 0;
+ for (Index j=0; j<m_outerSize; ++j)
count += m_matrix._data()[m_outerStart+j].size();
return count;
}
@@ -185,14 +184,14 @@
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
+ Index m_outerStart;
+ const ei_variable_if_dynamic<Index, Size> m_outerSize;
};
@@ -214,22 +213,22 @@
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
: MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
- inline int row() const { return IsRowMajor ? m_outer : this->index(); }
- inline int col() const { return IsRowMajor ? this->index() : m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
- int m_outer;
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
ei_assert(Size==1);
@@ -248,7 +247,7 @@
else
{
// evaluate/copy vector per vector
- for (int j=0; j<m_outerSize.value(); ++j)
+ for (Index j=0; j<m_outerSize.value(); ++j)
{
SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
@@ -267,17 +266,17 @@
inline Scalar* _valuePtr()
{ return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
- inline const int* _innerIndexPtr() const
+ inline const Index* _innerIndexPtr() const
{ return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
- inline int* _innerIndexPtr()
+ inline Index* _innerIndexPtr()
{ return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
- inline const int* _outerIndexPtr() const
+ inline const Index* _outerIndexPtr() const
{ return m_matrix._outerIndexPtr() + m_outerStart; }
- inline int* _outerIndexPtr()
+ inline Index* _outerIndexPtr()
{ return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; }
- int nonZeros() const
+ Index nonZeros() const
{
return size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()])
- size_t(m_matrix._outerIndexPtr()[m_outerStart]); }
@@ -295,14 +294,14 @@
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
+ Index m_outerStart;
+ const ei_variable_if_dynamic<Index, Size> m_outerSize;
};
@@ -310,7 +309,7 @@
/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVector(i);
@@ -319,7 +318,7 @@
/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVector(i);
@@ -327,7 +326,7 @@
/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVector(i);
@@ -336,7 +335,7 @@
/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVector(i);
@@ -346,21 +345,21 @@
* is col-major (resp. row-major).
*/
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
//----------
/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size)
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -369,7 +368,7 @@
/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size) const
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -377,7 +376,7 @@
/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size)
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -386,7 +385,7 @@
/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size) const
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -396,14 +395,14 @@
* is col-major (resp. row-major).
*/
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/Eigen/src/Sparse/SparseCwiseBinaryOp.h
index 91fbcb1..90878fe 100644
--- a/Eigen/src/Sparse/SparseCwiseBinaryOp.h
+++ b/Eigen/src/Sparse/SparseCwiseBinaryOp.h
@@ -68,10 +68,11 @@
: public ei_sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
{
public:
+ typedef typename Lhs::Index Index;
typedef ei_sparse_cwise_binary_op_inner_iterator_selector<
BinaryOp,Lhs,Rhs, InnerIterator> Base;
- EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
: Base(binOp.derived(),outer)
{}
};
@@ -95,9 +96,11 @@
typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
typedef typename _LhsNested::InnerIterator LhsIterator;
typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
public:
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
: m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
{
this->operator++();
@@ -134,9 +137,9 @@
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
- EIGEN_STRONG_INLINE int index() const { return m_id; }
- EIGEN_STRONG_INLINE int row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
- EIGEN_STRONG_INLINE int col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
@@ -145,7 +148,7 @@
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
- int m_id;
+ Index m_id;
};
// sparse - sparse (product)
@@ -159,9 +162,10 @@
typedef typename _LhsNested::InnerIterator LhsIterator;
typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
public:
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
: m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
{
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
@@ -189,9 +193,9 @@
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
- EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
@@ -211,10 +215,11 @@
typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
typedef typename ei_traits<CwiseBinaryXpr>::RhsNested RhsNested;
typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
public:
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
: m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
{}
@@ -228,9 +233,9 @@
{ return m_functor(m_lhsIter.value(),
m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
- EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
@@ -238,7 +243,7 @@
const RhsNested m_rhs;
LhsIterator m_lhsIter;
const BinaryFunc m_functor;
- const int m_outer;
+ const Index m_outer;
};
// sparse - dense (product)
@@ -250,10 +255,12 @@
typedef typename CwiseBinaryXpr::Scalar Scalar;
typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
public:
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
: m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
{}
@@ -266,9 +273,9 @@
EIGEN_STRONG_INLINE Scalar value() const
{ return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
- EIGEN_STRONG_INLINE int index() const { return m_rhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_rhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_rhsIter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
@@ -276,7 +283,7 @@
const CwiseBinaryXpr& m_xpr;
RhsIterator m_rhsIter;
const BinaryFunc& m_functor;
- const int m_outer;
+ const Index m_outer;
};
diff --git a/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/Eigen/src/Sparse/SparseCwiseUnaryOp.h
index f3f8c82..5e12da4 100644
--- a/Eigen/src/Sparse/SparseCwiseUnaryOp.h
+++ b/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -57,9 +57,10 @@
typedef typename CwiseUnaryOpImpl::Scalar Scalar;
typedef typename ei_traits<Derived>::_XprTypeNested _MatrixTypeNested;
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
public:
- EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer)
: m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
{}
@@ -68,9 +69,9 @@
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
- EIGEN_STRONG_INLINE int index() const { return m_iter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_iter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_iter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
@@ -98,9 +99,10 @@
typedef typename CwiseUnaryViewImpl::Scalar Scalar;
typedef typename ei_traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
public:
- EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer)
: m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor())
{}
@@ -110,9 +112,9 @@
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); }
- EIGEN_STRONG_INLINE int index() const { return m_iter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_iter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_iter.col(); }
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
@@ -125,7 +127,7 @@
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
i.valueRef() *= other;
return derived();
@@ -135,7 +137,7 @@
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
i.valueRef() /= other;
return derived();
diff --git a/Eigen/src/Sparse/SparseDiagonalProduct.h b/Eigen/src/Sparse/SparseDiagonalProduct.h
index d0d0517..70b35bc 100644
--- a/Eigen/src/Sparse/SparseDiagonalProduct.h
+++ b/Eigen/src/Sparse/SparseDiagonalProduct.h
@@ -93,8 +93,8 @@
ei_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
}
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
@@ -111,9 +111,10 @@
: public CwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator
{
typedef typename CwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
public:
inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
+ const SparseDiagonalProductType& expr, Index outer)
: Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
{}
};
@@ -130,9 +131,10 @@
ei_scalar_product_op<typename Lhs::Scalar>,
SparseInnerVectorSet<Rhs,1>,
typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
public:
inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
+ const SparseDiagonalProductType& expr, Index outer)
: Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
{}
};
@@ -143,9 +145,10 @@
: public CwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator
{
typedef typename CwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
public:
inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
+ const SparseDiagonalProductType& expr, Index outer)
: Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
{}
};
@@ -162,9 +165,10 @@
ei_scalar_product_op<typename Rhs::Scalar>,
SparseInnerVectorSet<Lhs,1>,
Transpose<typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
public:
inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
+ const SparseDiagonalProductType& expr, Index outer)
: Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
{}
};
diff --git a/Eigen/src/Sparse/SparseLDLT.h b/Eigen/src/Sparse/SparseLDLT.h
index 28797a6..b6a51c6 100644
--- a/Eigen/src/Sparse/SparseLDLT.h
+++ b/Eigen/src/Sparse/SparseLDLT.h
@@ -78,6 +78,7 @@
{
protected:
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef SparseMatrix<Scalar> CholMatrixType;
typedef Matrix<Scalar,MatrixType::ColsAtCompileTime,1> VectorType;
@@ -188,36 +189,36 @@
void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
{
assert(a.rows()==a.cols());
- const int size = a.rows();
+ const Index size = a.rows();
m_matrix.resize(size, size);
m_parent.resize(size);
m_nonZerosPerCol.resize(size);
- int * tags = ei_aligned_stack_new(int, size);
+ Index * tags = ei_aligned_stack_new(Index, size);
- const int* Ap = a._outerIndexPtr();
- const int* Ai = a._innerIndexPtr();
- int* Lp = m_matrix._outerIndexPtr();
- const int* P = 0;
- int* Pinv = 0;
+ const Index* Ap = a._outerIndexPtr();
+ const Index* Ai = a._innerIndexPtr();
+ Index* Lp = m_matrix._outerIndexPtr();
+ const Index* P = 0;
+ Index* Pinv = 0;
if (P)
{
/* If P is present then compute Pinv, the inverse of P */
- for (int k = 0; k < size; ++k)
+ for (Index k = 0; k < size; ++k)
Pinv[P[k]] = k;
}
- for (int k = 0; k < size; ++k)
+ for (Index k = 0; k < size; ++k)
{
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
m_parent[k] = -1; /* parent of k is not yet known */
tags[k] = k; /* mark node k as visited */
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
- int kk = P ? P[k] : k; /* kth original, or permuted, column */
- int p2 = Ap[kk+1];
- for (int p = Ap[kk]; p < p2; ++p)
+ Index kk = P ? P[k] : k; /* kth original, or permuted, column */
+ Index p2 = Ap[kk+1];
+ for (Index p = Ap[kk]; p < p2; ++p)
{
/* A (i,k) is nonzero (original or permuted A) */
- int i = Pinv ? Pinv[Ai[p]] : Ai[p];
+ Index i = Pinv ? Pinv[Ai[p]] : Ai[p];
if (i < k)
{
/* follow path from i to root of etree, stop at flagged node */
@@ -234,53 +235,53 @@
}
/* construct Lp index array from m_nonZerosPerCol column counts */
Lp[0] = 0;
- for (int k = 0; k < size; ++k)
+ for (Index k = 0; k < size; ++k)
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
m_matrix.resizeNonZeros(Lp[size]);
- ei_aligned_stack_delete(int, tags, size);
+ ei_aligned_stack_delete(Index, tags, size);
}
template<typename MatrixType, int Backend>
bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
{
assert(a.rows()==a.cols());
- const int size = a.rows();
+ const Index size = a.rows();
assert(m_parent.size()==size);
assert(m_nonZerosPerCol.size()==size);
- const int* Ap = a._outerIndexPtr();
- const int* Ai = a._innerIndexPtr();
+ const Index* Ap = a._outerIndexPtr();
+ const Index* Ai = a._innerIndexPtr();
const Scalar* Ax = a._valuePtr();
- const int* Lp = m_matrix._outerIndexPtr();
- int* Li = m_matrix._innerIndexPtr();
+ const Index* Lp = m_matrix._outerIndexPtr();
+ Index* Li = m_matrix._innerIndexPtr();
Scalar* Lx = m_matrix._valuePtr();
m_diag.resize(size);
Scalar * y = ei_aligned_stack_new(Scalar, size);
- int * pattern = ei_aligned_stack_new(int, size);
- int * tags = ei_aligned_stack_new(int, size);
+ Index * pattern = ei_aligned_stack_new(Index, size);
+ Index * tags = ei_aligned_stack_new(Index, size);
- const int* P = 0;
- const int* Pinv = 0;
+ const Index* P = 0;
+ const Index* Pinv = 0;
bool ok = true;
- for (int k = 0; k < size; ++k)
+ for (Index k = 0; k < size; ++k)
{
/* compute nonzero pattern of kth row of L, in topological order */
y[k] = 0.0; /* Y(0:k) is now all zero */
- int top = size; /* stack for pattern is empty */
+ Index top = size; /* stack for pattern is empty */
tags[k] = k; /* mark node k as visited */
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
- int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
- int p2 = Ap[kk+1];
- for (int p = Ap[kk]; p < p2; ++p)
+ Index kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
+ Index p2 = Ap[kk+1];
+ for (Index p = Ap[kk]; p < p2; ++p)
{
- int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
+ Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
if (i <= k)
{
y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */
- int len;
+ Index len;
for (len = 0; tags[i] != k; i = m_parent[i])
{
pattern[len++] = i; /* L(k,i) is nonzero */
@@ -295,11 +296,11 @@
y[k] = 0.0;
for (; top < size; ++top)
{
- int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
Scalar yi = y[i]; /* get and clear Y(i) */
y[i] = 0.0;
- int p2 = Lp[i] + m_nonZerosPerCol[i];
- int p;
+ Index p2 = Lp[i] + m_nonZerosPerCol[i];
+ Index p;
for (p = Lp[i]; p < p2; ++p)
y[Li[p]] -= Lx[p] * yi;
Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
@@ -316,8 +317,8 @@
}
ei_aligned_stack_delete(Scalar, y, size);
- ei_aligned_stack_delete(int, pattern, size);
- ei_aligned_stack_delete(int, tags, size);
+ ei_aligned_stack_delete(Index, pattern, size);
+ ei_aligned_stack_delete(Index, tags, size);
return ok; /* success, diagonal of D is all nonzero */
}
@@ -327,7 +328,7 @@
template<typename Derived>
bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
{
- const int size = m_matrix.rows();
+ const Index size = m_matrix.rows();
ei_assert(size==b.rows());
if (!m_succeeded)
return false;
diff --git a/Eigen/src/Sparse/SparseLLT.h b/Eigen/src/Sparse/SparseLLT.h
index a1c10ba..37c6c3f 100644
--- a/Eigen/src/Sparse/SparseLLT.h
+++ b/Eigen/src/Sparse/SparseLLT.h
@@ -40,6 +40,7 @@
{
protected:
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef SparseMatrix<Scalar> CholMatrixType;
@@ -127,7 +128,7 @@
void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
{
assert(a.rows()==a.cols());
- const int size = a.rows();
+ const Index size = a.rows();
m_matrix.resize(size, size);
// allocate a temporary vector for accumulations
@@ -137,7 +138,7 @@
// TODO estimate the number of non zeros
m_matrix.setZero();
m_matrix.reserve(a.nonZeros()*2);
- for (int j = 0; j < size; ++j)
+ for (Index j = 0; j < size; ++j)
{
Scalar x = ei_real(a.coeff(j,j));
@@ -154,7 +155,7 @@
for (; it; ++it)
tempVector.coeffRef(it.index()) = it.value();
}
- for (int k=0; k<j+1; ++k)
+ for (Index k=0; k<j+1; ++k)
{
typename CholMatrixType::InnerIterator it(m_matrix, k);
while (it && it.index()<j)
@@ -190,7 +191,7 @@
template<typename Derived>
bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
{
- const int size = m_matrix.rows();
+ const Index size = m_matrix.rows();
ei_assert(size==b.rows());
m_matrix.template triangularView<Lower>().solveInPlace(b);
diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/Sparse/SparseMatrix.h
index 06cc7a9..fd41d73 100644
--- a/Eigen/src/Sparse/SparseMatrix.h
+++ b/Eigen/src/Sparse/SparseMatrix.h
@@ -77,46 +77,46 @@
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
- int m_outerSize;
- int m_innerSize;
- int* m_outerIndex;
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
CompressedStorage<Scalar> m_data;
public:
- inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return m_outerSize; }
- inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
inline const Scalar* _valuePtr() const { return &m_data.value(0); }
inline Scalar* _valuePtr() { return &m_data.value(0); }
- inline const int* _innerIndexPtr() const { return &m_data.index(0); }
- inline int* _innerIndexPtr() { return &m_data.index(0); }
+ inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ inline Index* _innerIndexPtr() { return &m_data.index(0); }
- inline const int* _outerIndexPtr() const { return m_outerIndex; }
- inline int* _outerIndexPtr() { return m_outerIndex; }
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- const int id = m_data.searchLowerIndex(start,end-1,inner);
+ const Index id = m_data.searchLowerIndex(start,end-1,inner);
ei_assert((id<end) && (m_data.index(id)==inner) && "coeffRef cannot be called on a zero coefficient");
return m_data.value(id);
}
@@ -129,40 +129,40 @@
inline void setZero()
{
m_data.clear();
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
}
/** \returns the number of non zero coefficients */
- inline int nonZeros() const { return static_cast<int>(m_data.size()); }
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
/** \deprecated use setZero() and reserve()
* Initializes the filling process of \c *this.
* \param reserveSize approximate number of nonzeros
* Note that the matrix \c *this is zero-ed.
*/
- EIGEN_DEPRECATED void startFill(int reserveSize = 1000)
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
{
setZero();
m_data.reserve(reserveSize);
}
/** Preallocates \a reserveSize non zeros */
- inline void reserve(int reserveSize)
+ inline void reserve(Index reserveSize)
{
m_data.reserve(reserveSize);
}
/** \deprecated use insert()
*/
- EIGEN_DEPRECATED Scalar& fill(int row, int col)
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
- int i = outer;
+ Index i = outer;
while (i>=0 && m_outerIndex[i]==0)
{
m_outerIndex[i] = m_data.size();
@@ -176,7 +176,7 @@
}
// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
assert(size_t(m_outerIndex[outer+1]) == m_data.size());
- int id = m_outerIndex[outer+1];
+ Index id = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(0, inner);
@@ -185,25 +185,25 @@
//--- low level purely coherent filling ---
- inline Scalar& insertBack(int outer, int inner)
+ inline Scalar& insertBack(Index outer, Index inner)
{
ei_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "wrong sorted insertion");
ei_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "wrong sorted insertion");
- int id = m_outerIndex[outer+1];
+ Index id = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(0, inner);
return m_data.value(id);
}
- inline Scalar& insertBackNoCheck(int outer, int inner)
+ inline Scalar& insertBackNoCheck(Index outer, Index inner)
{
- int id = m_outerIndex[outer+1];
+ Index id = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(0, inner);
return m_data.value(id);
}
- inline void startVec(int outer)
+ inline void startVec(Index outer)
{
ei_assert(m_outerIndex[outer]==int(m_data.size()) && "you must call startVec on each inner vec");
ei_assert(m_outerIndex[outer+1]==0 && "you must call startVec on each inner vec");
@@ -215,7 +215,7 @@
/** \deprecated use insert()
* Like fill() but with random inner coordinates.
*/
- EIGEN_DEPRECATED Scalar& fillrand(int row, int col)
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
{
return insert(row,col);
}
@@ -228,18 +228,18 @@
*
* After an insertion session, you should call the finalize() function.
*/
- EIGEN_DONT_INLINE Scalar& insert(int row, int col)
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int previousOuter = outer;
+ Index previousOuter = outer;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
- m_outerIndex[previousOuter] = static_cast<int>(m_data.size());
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
@@ -285,9 +285,9 @@
{
// oops wrong guess.
// let's correct the outer offsets
- for (int k=0; k<=(outer+1); ++k)
+ for (Index k=0; k<=(outer+1); ++k)
m_outerIndex[k] = 0;
- int k=outer+1;
+ Index k=outer+1;
while(m_outerIndex[k]==0)
m_outerIndex[k++] = 1;
while (k<=m_outerSize && m_outerIndex[k]!=0)
@@ -306,13 +306,13 @@
{
// we are not inserting into the last inner vec
// update outer indices:
- int j = outer+2;
+ Index j = outer+2;
while (j<=m_outerSize && m_outerIndex[j]!=0)
m_outerIndex[j++]++;
--j;
// shift data of last vecs:
- int k = m_outerIndex[j]-1;
- while (k>=int(id))
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(id))
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
@@ -338,8 +338,8 @@
*/
inline void finalize()
{
- int size = static_cast<int>(m_data.size());
- int i = m_outerSize;
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
// find the last filled column
while (i>=0 && m_outerIndex[i]==0)
--i;
@@ -353,13 +353,13 @@
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
- int k = 0;
- for (int j=0; j<m_outerSize; ++j)
+ Index k = 0;
+ for (Index j=0; j<m_outerSize; ++j)
{
- int previousStart = m_outerIndex[j];
+ Index previousStart = m_outerIndex[j];
m_outerIndex[j] = k;
- int end = m_outerIndex[j+1];
- for (int i=previousStart; i<end; ++i)
+ Index end = m_outerIndex[j+1];
+ for (Index i=previousStart; i<end; ++i)
{
if (!ei_isMuchSmallerThan(m_data.value(i), reference, epsilon))
{
@@ -374,22 +374,22 @@
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
- * \sa resizeNonZeros(int), reserve(), setZero()
+ * \sa resizeNonZeros(Index), reserve(), setZero()
*/
- void resize(int rows, int cols)
+ void resize(Index rows, Index cols)
{
- const int outerSize = IsRowMajor ? rows : cols;
+ const Index outerSize = IsRowMajor ? rows : cols;
m_innerSize = IsRowMajor ? cols : rows;
m_data.clear();
if (m_outerSize != outerSize || m_outerSize==0)
{
delete[] m_outerIndex;
- m_outerIndex = new int [outerSize+1];
+ m_outerIndex = new Index [outerSize+1];
m_outerSize = outerSize;
}
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
}
- void resizeNonZeros(int size)
+ void resizeNonZeros(Index size)
{
m_data.resize(size);
}
@@ -400,7 +400,7 @@
resize(0, 0);
}
- inline SparseMatrix(int rows, int cols)
+ inline SparseMatrix(Index rows, Index cols)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0)
{
resize(rows, cols);
@@ -438,7 +438,7 @@
else
{
resize(other.rows(), other.cols());
- memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int));
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
m_data = other.m_data;
}
return *this;
@@ -465,19 +465,19 @@
OtherCopy otherCopy(other.derived());
resize(other.rows(), other.cols());
- Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
- for (int j=0; j<otherCopy.outerSize(); ++j)
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
++m_outerIndex[it.index()];
// prefix sum
- int count = 0;
+ Index count = 0;
VectorXi positions(outerSize());
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
{
- int tmp = m_outerIndex[j];
+ Index tmp = m_outerIndex[j];
m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
@@ -486,11 +486,11 @@
// alloc
m_data.resize(count);
// pass 2
- for (int j=0; j<otherCopy.outerSize(); ++j)
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
{
for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
{
- int pos = positions[it.index()]++;
+ Index pos = positions[it.index()]++;
m_data.index(pos) = j;
m_data.value(pos) = it.value();
}
@@ -508,14 +508,14 @@
{
EIGEN_DBG_SPARSE(
s << "Nonzero entries:\n";
- for (int i=0; i<m.nonZeros(); ++i)
+ for (Index i=0; i<m.nonZeros(); ++i)
{
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
}
s << std::endl;
s << std::endl;
s << "Column pointers:\n";
- for (int i=0; i<m.outerSize(); ++i)
+ for (Index i=0; i<m.outerSize(); ++i)
{
s << m.m_outerIndex[i] << " ";
}
@@ -540,12 +540,12 @@
class SparseMatrix<Scalar,_Options>::InnerIterator
{
public:
- InnerIterator(const SparseMatrix& mat, int outer)
+ InnerIterator(const SparseMatrix& mat, Index outer)
: m_matrix(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1])
{}
template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<SparseMatrix,Added,Removed>& mat, int outer)
+ InnerIterator(const Flagged<SparseMatrix,Added,Removed>& mat, Index outer)
: m_matrix(mat._expression()), m_outer(outer), m_id(m_matrix.m_outerIndex[outer]),
m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1])
{}
@@ -555,19 +555,19 @@
inline Scalar value() const { return m_matrix.m_data.value(m_id); }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.m_data.value(m_id)); }
- inline int index() const { return m_matrix.m_data.index(m_id); }
- inline int outer() const { return m_outer; }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
+ inline Index index() const { return m_matrix.m_data.index(m_id); }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
protected:
const SparseMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
};
#endif // EIGEN_SPARSEMATRIX_H
diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/Sparse/SparseMatrixBase.h
index 65fa19a..a432682 100644
--- a/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/Eigen/src/Sparse/SparseMatrixBase.h
@@ -42,6 +42,9 @@
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
+
typedef SparseMatrixBase StorageBaseType;
enum {
@@ -145,15 +148,15 @@
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- inline int rows() const { return derived().rows(); }
+ inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- inline int cols() const { return derived().cols(); }
+ inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
- inline int size() const { return rows() * cols(); }
+ inline Index size() const { return rows() * cols(); }
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
- inline int nonZeros() const { return derived().nonZeros(); }
+ inline Index nonZeros() const { return derived().nonZeros(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
@@ -161,10 +164,10 @@
inline bool isVector() const { return rows()==1 || cols()==1; }
/** \returns the size of the storage major dimension,
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
- int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
/** \returns the size of the inner dimension according to the storage order,
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
- int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
bool isRValue() const { return m_isRValue; }
Derived& markAsRValue() { m_isRValue = true; return derived(); }
@@ -193,13 +196,13 @@
enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
- const int outerSize = other.outerSize();
+ const Index outerSize = other.outerSize();
//typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
// thanks to shallow copies, we always eval to a tempary
Derived temp(other.rows(), other.cols());
temp.reserve(std::max(this->rows(),this->cols())*2);
- for (int j=0; j<outerSize; ++j)
+ for (Index j=0; j<outerSize; ++j)
{
temp.startVec(j);
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
@@ -222,14 +225,14 @@
// std::cout << Flags << " " << OtherDerived::Flags << "\n";
const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
// std::cout << "eval transpose = " << transpose << "\n";
- const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
if ((!transpose) && other.isRValue())
{
// eval without temporary
derived().resize(other.rows(), other.cols());
derived().setZero();
derived().reserve(std::max(this->rows(),this->cols())*2);
- for (int j=0; j<outerSize; ++j)
+ for (Index j=0; j<outerSize; ++j)
{
derived().startVec(j);
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
@@ -258,9 +261,9 @@
{
if (Flags&RowMajorBit)
{
- for (int row=0; row<m.outerSize(); ++row)
+ for (Index row=0; row<m.outerSize(); ++row)
{
- int col = 0;
+ Index col = 0;
for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
{
for ( ; col<it.index(); ++col)
@@ -276,7 +279,7 @@
else
{
if (m.cols() == 1) {
- int row = 0;
+ Index row = 0;
for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it)
{
for ( ; row<it.index(); ++row)
@@ -405,20 +408,20 @@
const AdjointReturnType adjoint() const { return transpose(); }
// sub-vector
- SparseInnerVectorSet<Derived,1> row(int i);
- const SparseInnerVectorSet<Derived,1> row(int i) const;
- SparseInnerVectorSet<Derived,1> col(int j);
- const SparseInnerVectorSet<Derived,1> col(int j) const;
- SparseInnerVectorSet<Derived,1> innerVector(int outer);
- const SparseInnerVectorSet<Derived,1> innerVector(int outer) const;
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
// set of sub-vectors
- SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size);
- const SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size) const;
- SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size);
- const SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size) const;
- SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize);
- const SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize) const;
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
// const typename BlockReturnType<Derived>::Type
@@ -493,7 +496,7 @@
void evalTo(MatrixBase<DenseDerived>& dst) const
{
dst.setZero();
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
dst.coeffRef(i.row(),i.col()) = i.value();
}
diff --git a/Eigen/src/Sparse/SparseProduct.h b/Eigen/src/Sparse/SparseProduct.h
index 078ac5f..fb53902 100644
--- a/Eigen/src/Sparse/SparseProduct.h
+++ b/Eigen/src/Sparse/SparseProduct.h
@@ -126,8 +126,8 @@
EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
}
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
@@ -140,16 +140,17 @@
template<typename Lhs, typename Rhs, typename ResultType>
static void ei_sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
+ typedef typename ei_cleantype<Lhs>::type::Scalar Scalar;
+ typedef typename ei_cleantype<Lhs>::type::Index Index;
// make sure to call innerSize/outerSize since we fake the storage order.
- int rows = lhs.innerSize();
- int cols = rhs.outerSize();
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
ei_assert(lhs.outerSize() == rhs.innerSize());
std::vector<bool> mask(rows,false);
Matrix<Scalar,Dynamic,1> values(rows);
- Matrix<int,Dynamic,1> indices(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
// estimate the number of non zero entries
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
@@ -160,20 +161,20 @@
// int t = (rows*100)/139;
res.resize(rows, cols);
- res.reserve(int(ratioRes*rows*cols));
+ res.reserve(Index(ratioRes*rows*cols));
// we compute each column of the result, one after the other
- for (int j=0; j<cols; ++j)
+ for (Index j=0; j<cols; ++j)
{
res.startVec(j);
- int nnz = 0;
+ Index nnz = 0;
for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
{
Scalar y = rhsIt.value();
- int k = rhsIt.index();
+ Index k = rhsIt.index();
for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
{
- int i = lhsIt.index();
+ Index i = lhsIt.index();
Scalar x = lhsIt.value();
if(!mask[i])
{
@@ -225,11 +226,12 @@
{
// return ei_sparse_product_impl2(lhs,rhs,res);
- typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
+ typedef typename ei_cleantype<Lhs>::type::Scalar Scalar;
+ typedef typename ei_cleantype<Lhs>::type::Index Index;
// make sure to call innerSize/outerSize since we fake the storage order.
- int rows = lhs.innerSize();
- int cols = rhs.outerSize();
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
//int size = lhs.outerSize();
ei_assert(lhs.outerSize() == rhs.innerSize());
@@ -242,8 +244,8 @@
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
res.resize(rows, cols);
- res.reserve(int(ratioRes*rows*cols));
- for (int j=0; j<cols; ++j)
+ res.reserve(Index(ratioRes*rows*cols));
+ for (Index j=0; j<cols; ++j)
{
// let's do a more accurate determination of the nnz ratio for the current column j of res
//float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
@@ -514,7 +516,7 @@
typedef typename ei_cleantype<Rhs>::type _Rhs;
typedef typename _Lhs::InnerIterator LhsInnerIterator;
enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
- for(int j=0; j<m_lhs.outerSize(); ++j)
+ for(Index j=0; j<m_lhs.outerSize(); ++j)
{
typename Rhs::Scalar rhs_j = alpha * m_rhs.coeff(j,0);
Block<Dest,1,Dest::ColsAtCompileTime> dest_j(dest.row(LhsIsRowMajor ? j : 0));
@@ -555,7 +557,7 @@
typedef typename ei_cleantype<Rhs>::type _Rhs;
typedef typename _Rhs::InnerIterator RhsInnerIterator;
enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
- for(int j=0; j<m_rhs.outerSize(); ++j)
+ for(Index j=0; j<m_rhs.outerSize(); ++j)
for(RhsInnerIterator i(m_rhs,j); i; ++i)
dest.col(RhsIsRowMajor ? i.index() : j) += (alpha*i.value()) * m_lhs.col(RhsIsRowMajor ? j : i.index());
}
diff --git a/Eigen/src/Sparse/SparseRedux.h b/Eigen/src/Sparse/SparseRedux.h
index a243be6..ea2ce18 100644
--- a/Eigen/src/Sparse/SparseRedux.h
+++ b/Eigen/src/Sparse/SparseRedux.h
@@ -31,7 +31,7 @@
{
ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res = 0;
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
res += iter.value();
return res;
diff --git a/Eigen/src/Sparse/SparseSelfAdjointView.h b/Eigen/src/Sparse/SparseSelfAdjointView.h
index d477053..703ca64 100644
--- a/Eigen/src/Sparse/SparseSelfAdjointView.h
+++ b/Eigen/src/Sparse/SparseSelfAdjointView.h
@@ -49,7 +49,8 @@
{
public:
- typedef typename ei_traits<MatrixType>::Scalar Scalar;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
{
@@ -57,8 +58,8 @@
ei_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
/** \internal \returns a reference to the nested matrix */
const MatrixType& matrix() const { return m_matrix; }
@@ -173,7 +174,7 @@
|| ( (UpLo&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
- for (int j=0; j<m_lhs.outerSize(); ++j)
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
{
LhsInnerIterator i(m_lhs,j);
if (ProcessSecondHalf && i && (i.index()==j))
@@ -184,8 +185,8 @@
Block<Dest,1,Dest::ColsAtCompileTime> dest_j(dest.row(LhsIsRowMajor ? j : 0));
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
{
- int a = LhsIsRowMajor ? j : i.index();
- int b = LhsIsRowMajor ? i.index() : j;
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
typename Lhs::Scalar v = i.value();
dest.row(a) += (v) * m_rhs.row(b);
dest.row(b) += ei_conj(v) * m_rhs.row(a);
diff --git a/Eigen/src/Sparse/SparseTranspose.h b/Eigen/src/Sparse/SparseTranspose.h
index b8c3861..a94f5ae 100644
--- a/Eigen/src/Sparse/SparseTranspose.h
+++ b/Eigen/src/Sparse/SparseTranspose.h
@@ -35,19 +35,19 @@
class InnerIterator;
class ReverseInnerIterator;
- inline int nonZeros() const { return derived().nestedExpression().nonZeros(); }
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
// FIXME should be keep them ?
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{ return const_cast_derived().nestedExpression().coeffRef(col, row); }
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{ return derived().nestedExpression().coeff(col, row); }
- inline const Scalar coeff(int index) const
+ inline const Scalar coeff(Index index) const
{ return derived().nestedExpression().coeff(index); }
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{ return const_cast_derived().nestedExpression().coeffRef(index); }
};
@@ -56,11 +56,11 @@
typedef typename MatrixType::InnerIterator Base;
public:
- EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer)
: Base(trans.derived().nestedExpression(), outer)
{}
- inline int row() const { return Base::col(); }
- inline int col() const { return Base::row(); }
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
};
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator : public MatrixType::ReverseInnerIterator
@@ -68,11 +68,11 @@
typedef typename MatrixType::ReverseInnerIterator Base;
public:
- EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, int outer)
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer)
: Base(xpr.derived().nestedExpression(), outer)
{}
- inline int row() const { return Base::col(); }
- inline int col() const { return Base::row(); }
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
};
#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/Eigen/src/Sparse/SparseTriangularView.h b/Eigen/src/Sparse/SparseTriangularView.h
index e713220..2588c36 100644
--- a/Eigen/src/Sparse/SparseTriangularView.h
+++ b/Eigen/src/Sparse/SparseTriangularView.h
@@ -38,11 +38,12 @@
public:
class InnerIterator;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
- inline int rows() { return m_matrix.rows(); }
- inline int cols() { return m_matrix.cols(); }
+ inline Index rows() { return m_matrix.rows(); }
+ inline Index cols() { return m_matrix.cols(); }
- typedef typename ei_traits<MatrixType>::Scalar Scalar;
typedef typename ei_meta_if<ei_must_nest_by_value<MatrixType>::ret,
MatrixType, const MatrixType&>::ret MatrixTypeNested;
@@ -68,15 +69,15 @@
typedef typename MatrixType::InnerIterator Base;
public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
: Base(view.nestedExpression(), outer)
{
if(SkipFirst)
while((*this) && this->index()<outer)
++(*this);
}
- inline int row() const { return Base::row(); }
- inline int col() const { return Base::col(); }
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
EIGEN_STRONG_INLINE operator bool() const
{
diff --git a/Eigen/src/Sparse/SparseUtil.h b/Eigen/src/Sparse/SparseUtil.h
index 782690b..7f10040 100644
--- a/Eigen/src/Sparse/SparseUtil.h
+++ b/Eigen/src/Sparse/SparseUtil.h
@@ -57,16 +57,18 @@
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
#define _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
-typedef BaseClass Base; \
-typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
-typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
-typedef typename Eigen::ei_nested<Derived>::type Nested; \
-enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
- ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
- Flags = Eigen::ei_traits<Derived>::Flags, \
- CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
- SizeAtCompileTime = Base::SizeAtCompileTime, \
- IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+ typedef BaseClass Base; \
+ typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::ei_nested<Derived>::type Nested; \
+ typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::ei_index<StorageKind>::type Index; \
+ enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::ei_traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
#define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \
_EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
@@ -76,6 +78,8 @@
typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
typedef typename Eigen::ei_nested<Derived>::type Nested; \
+ typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::ei_index<StorageKind>::type Index; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::ei_traits<Derived>::Flags, \
@@ -88,6 +92,12 @@
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
_EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
+template<>
+struct ei_index<Sparse>
+{ typedef EIGEN_DEFAULT_SPARSE_INDEX_TYPE type; };
+
+typedef typename ei_index<Sparse>::type SparseIndex;
+
enum SparseBackend {
DefaultBackend,
Taucs,
diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/Sparse/SparseVector.h
index 6806ab2..4013b4d 100644
--- a/Eigen/src/Sparse/SparseVector.h
+++ b/Eigen/src/Sparse/SparseVector.h
@@ -70,33 +70,33 @@
enum { IsColVector = ei_traits<SparseVector>::IsColVector };
CompressedStorage<Scalar> m_data;
- int m_size;
+ Index m_size;
CompressedStorage<Scalar>& _data() { return m_data; }
CompressedStorage<Scalar>& _data() const { return m_data; }
public:
- EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; }
- EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; }
- EIGEN_STRONG_INLINE int innerSize() const { return m_size; }
- EIGEN_STRONG_INLINE int outerSize() const { return 1; }
- EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; }
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+ EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { ei_assert(j==0); return m_size; }
EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
- EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); }
- EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); }
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
ei_assert((IsColVector ? col : row)==0);
return coeff(IsColVector ? row : col);
}
- inline Scalar coeff(int i) const { return m_data.at(i); }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
ei_assert((IsColVector ? col : row)==0);
return coeff(IsColVector ? row : col);
@@ -108,7 +108,7 @@
*
* This insertion might be very costly if the number of nonzeros above \a i is large.
*/
- inline Scalar& coeffRef(int i)
+ inline Scalar& coeffRef(Index i)
{
return m_data.atWithInsertion(i);
}
@@ -120,33 +120,33 @@
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
- inline int nonZeros() const { return static_cast<int>(m_data.size()); }
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
- inline void startVec(int outer)
+ inline void startVec(Index outer)
{
ei_assert(outer==0);
}
- inline Scalar& insertBack(int outer, int inner)
+ inline Scalar& insertBack(Index outer, Index inner)
{
ei_assert(outer==0);
return insertBack(inner);
}
- inline Scalar& insertBack(int i)
+ inline Scalar& insertBack(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
- inline Scalar& insert(int outer, int inner)
+ inline Scalar& insert(Index outer, Index inner)
{
ei_assert(outer==0);
return insert(inner);
}
- Scalar& insert(int i)
+ Scalar& insert(Index i)
{
- int startId = 0;
- int id = m_data.size() - 1;
+ Index startId = 0;
+ Index id = m_data.size() - 1;
// TODO smart realloc
m_data.resize(id+2,1);
@@ -163,38 +163,38 @@
/**
*/
- inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
/** \deprecated use setZero() and reserve() */
- EIGEN_DEPRECATED void startFill(int reserve)
+ EIGEN_DEPRECATED void startFill(Index reserve)
{
setZero();
m_data.reserve(reserve);
}
- /** \deprecated use insertBack(int,int) */
- EIGEN_DEPRECATED Scalar& fill(int r, int c)
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
{
ei_assert(r==0 || c==0);
return fill(IsColVector ? r : c);
}
- /** \deprecated use insertBack(int) */
- EIGEN_DEPRECATED Scalar& fill(int i)
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
- /** \deprecated use insert(int,int) */
- EIGEN_DEPRECATED Scalar& fillrand(int r, int c)
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
{
ei_assert(r==0 || c==0);
return fillrand(IsColVector ? r : c);
}
- /** \deprecated use insert(int) */
- EIGEN_DEPRECATED Scalar& fillrand(int i)
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
{
return insert(i);
}
@@ -208,25 +208,25 @@
m_data.prune(reference,epsilon);
}
- void resize(int rows, int cols)
+ void resize(Index rows, Index cols)
{
ei_assert(rows==1 || cols==1);
resize(IsColVector ? rows : cols);
}
- void resize(int newSize)
+ void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
- void resizeNonZeros(int size) { m_data.resize(size); }
+ void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { resize(0); }
- inline SparseVector(int size) : m_size(0) { resize(size); }
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
- inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); }
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const MatrixBase<OtherDerived>& other)
@@ -329,7 +329,7 @@
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
{
- for (unsigned int i=0; i<m.nonZeros(); ++i)
+ for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
s << std::endl;
return s;
@@ -368,18 +368,18 @@
class SparseVector<Scalar,_Options>::InnerIterator
{
public:
- InnerIterator(const SparseVector& vec, int outer=0)
- : m_data(vec.m_data), m_id(0), m_end(static_cast<int>(m_data.size()))
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
{
ei_assert(outer==0);
}
InnerIterator(const CompressedStorage<Scalar>& data)
- : m_data(data), m_id(0), m_end(static_cast<int>(m_data.size()))
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
{}
template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int )
+ InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, Index )
: m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
{}
@@ -388,16 +388,16 @@
inline Scalar value() const { return m_data.value(m_id); }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
- inline int index() const { return m_data.index(m_id); }
- inline int row() const { return IsColVector ? index() : 0; }
- inline int col() const { return IsColVector ? 0 : index(); }
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
inline operator bool() const { return (m_id < m_end); }
protected:
const CompressedStorage<Scalar>& m_data;
- int m_id;
- const int m_end;
+ Index m_id;
+ const Index m_end;
};
#endif // EIGEN_SPARSEVECTOR_H
diff --git a/Eigen/src/misc/Image.h b/Eigen/src/misc/Image.h
index 1d63d81..32392fd 100644
--- a/Eigen/src/misc/Image.h
+++ b/Eigen/src/misc/Image.h
@@ -48,6 +48,8 @@
{
typedef _DecompositionType DecompositionType;
typedef typename DecompositionType::MatrixType MatrixType;
+ typedef ReturnByValue<ei_image_retval_base> Base;
+ typedef typename Base::Index Index;
ei_image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)
: m_dec(dec), m_rank(dec.rank()),
@@ -55,9 +57,9 @@
m_originalMatrix(originalMatrix)
{}
- inline int rows() const { return m_dec.rows(); }
- inline int cols() const { return m_cols; }
- inline int rank() const { return m_rank; }
+ inline Index rows() const { return m_dec.rows(); }
+ inline Index cols() const { return m_cols; }
+ inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
inline const MatrixType& originalMatrix() const { return m_originalMatrix; }
@@ -68,7 +70,7 @@
protected:
const DecompositionType& m_dec;
- int m_rank, m_cols;
+ Index m_rank, m_cols;
const MatrixType& m_originalMatrix;
};
@@ -76,6 +78,7 @@
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
typedef ei_image_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::originalMatrix; \
diff --git a/Eigen/src/misc/Kernel.h b/Eigen/src/misc/Kernel.h
index 497b42e..38a2d40 100644
--- a/Eigen/src/misc/Kernel.h
+++ b/Eigen/src/misc/Kernel.h
@@ -49,6 +49,8 @@
: public ReturnByValue<ei_kernel_retval_base<_DecompositionType> >
{
typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<ei_kernel_retval_base> Base;
+ typedef typename Base::Index Index;
ei_kernel_retval_base(const DecompositionType& dec)
: m_dec(dec),
@@ -56,9 +58,9 @@
m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
{}
- inline int rows() const { return m_dec.cols(); }
- inline int cols() const { return m_cols; }
- inline int rank() const { return m_rank; }
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_cols; }
+ inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
template<typename Dest> inline void evalTo(Dest& dst) const
@@ -68,13 +70,14 @@
protected:
const DecompositionType& m_dec;
- int m_rank, m_cols;
+ Index m_rank, m_cols;
};
#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
typedef ei_kernel_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::rank; \
diff --git a/Eigen/src/misc/Solve.h b/Eigen/src/misc/Solve.h
index 028716a..d6fc674 100644
--- a/Eigen/src/misc/Solve.h
+++ b/Eigen/src/misc/Solve.h
@@ -45,13 +45,15 @@
{
typedef typename ei_cleantype<typename Rhs::Nested>::type RhsNestedCleaned;
typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<ei_solve_retval_base> Base;
+ typedef typename Base::Index Index;
ei_solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
: m_dec(dec), m_rhs(rhs)
{}
- inline int rows() const { return m_dec.cols(); }
- inline int cols() const { return m_rhs.cols(); }
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
inline const DecompositionType& dec() const { return m_dec; }
inline const RhsNestedCleaned& rhs() const { return m_rhs; }
@@ -69,6 +71,7 @@
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
typedef ei_solve_retval_base<DecompositionType,Rhs> Base; \
using Base::dec; \
using Base::rhs; \
diff --git a/test/array.cpp b/test/array.cpp
index 8006531..df1e1b4 100644
--- a/test/array.cpp
+++ b/test/array.cpp
@@ -127,9 +127,12 @@
// count
VERIFY(((m1.abs()+1)>RealScalar(0.1)).count() == rows*cols);
+
+ typedef Array<typename ArrayType::Index, Dynamic, 1> ArrayOfIndices;
+
// TODO allows colwise/rowwise for array
- VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayXi::Constant(cols,rows).transpose());
- VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayXi::Constant(rows, cols));
+ VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose());
+ VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayOfIndices::Constant(rows, cols));
}
template<typename ArrayType> void array_real(const ArrayType& m)
diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp
index 516c040..477d178 100644
--- a/test/array_for_matrix.cpp
+++ b/test/array_for_matrix.cpp
@@ -124,9 +124,12 @@
// count
VERIFY(((m1.array().abs()+1)>RealScalar(0.1)).count() == rows*cols);
+
+ typedef Matrix<typename MatrixType::Index, Dynamic, 1> VectorOfIndices;
+
// TODO allows colwise/rowwise for array
- VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), RowVectorXi::Constant(cols,rows));
- VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().rowwise().count(), VectorXi::Constant(rows, cols));
+ VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), VectorOfIndices::Constant(cols,rows).transpose());
+ VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().rowwise().count(), VectorOfIndices::Constant(rows, cols));
}
template<typename VectorType> void lpNorm(const VectorType& v)
diff --git a/test/nomalloc.cpp b/test/nomalloc.cpp
index a80145f..9eb8d13 100644
--- a/test/nomalloc.cpp
+++ b/test/nomalloc.cpp
@@ -133,7 +133,7 @@
void test_nomalloc()
{
// check that our operator new is indeed called:
- VERIFY_RAISES_ASSERT(MatrixXd dummy = MatrixXd::Random(3,3));
+ VERIFY_RAISES_ASSERT(MatrixXd dummy(MatrixXd::Random(3,3)));
CALL_SUBTEST_1(nomalloc(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2(nomalloc(Matrix4d()) );
CALL_SUBTEST_3(nomalloc(Matrix<float,32,32>()) );
diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp
index a34feed..7064bc2 100644
--- a/test/qr_colpivoting.cpp
+++ b/test/qr_colpivoting.cpp
@@ -38,7 +38,7 @@
MatrixType m1;
createRandomPIMatrixOfRank(rank,rows,cols,m1);
ColPivHouseholderQR<MatrixType> qr(m1);
- VERIFY_IS_APPROX(rank, qr.rank());
+ VERIFY(rank == qr.rank());
VERIFY(cols - qr.rank() == qr.dimensionOfKernel());
VERIFY(!qr.isInjective());
VERIFY(!qr.isInvertible());
@@ -66,7 +66,7 @@
Matrix<Scalar,Rows,Cols> m1;
createRandomPIMatrixOfRank(rank,Rows,Cols,m1);
ColPivHouseholderQR<Matrix<Scalar,Rows,Cols> > qr(m1);
- VERIFY_IS_APPROX(rank, qr.rank());
+ VERIFY(rank == qr.rank());
VERIFY(Cols - qr.rank() == qr.dimensionOfKernel());
VERIFY(qr.isInjective() == (rank == Rows));
VERIFY(qr.isSurjective() == (rank == Cols));
diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp
index 82c42c7..33350ce 100644
--- a/test/qr_fullpivoting.cpp
+++ b/test/qr_fullpivoting.cpp
@@ -37,7 +37,7 @@
MatrixType m1;
createRandomPIMatrixOfRank(rank,rows,cols,m1);
FullPivHouseholderQR<MatrixType> qr(m1);
- VERIFY_IS_APPROX(rank, qr.rank());
+ VERIFY(rank == qr.rank());
VERIFY(cols - qr.rank() == qr.dimensionOfKernel());
VERIFY(!qr.isInjective());
VERIFY(!qr.isInvertible());
diff --git a/test/sizeof.cpp b/test/sizeof.cpp
index a724359..779f3b5 100644
--- a/test/sizeof.cpp
+++ b/test/sizeof.cpp
@@ -30,7 +30,7 @@
if (MatrixType::RowsAtCompileTime!=Dynamic && MatrixType::ColsAtCompileTime!=Dynamic)
VERIFY(sizeof(MatrixType)==sizeof(Scalar)*MatrixType::SizeAtCompileTime);
else
- VERIFY(sizeof(MatrixType)==sizeof(Scalar*) + 2 * sizeof(int));
+ VERIFY(sizeof(MatrixType)==sizeof(Scalar*) + 2 * sizeof(typename MatrixType::Index));
}
void test_sizeof()
diff --git a/test/visitor.cpp b/test/visitor.cpp
index 65ee60b..1ddabc6 100644
--- a/test/visitor.cpp
+++ b/test/visitor.cpp
@@ -27,22 +27,23 @@
template<typename MatrixType> void matrixVisitor(const MatrixType& p)
{
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
- int rows = p.rows();
- int cols = p.cols();
+ Index rows = p.rows();
+ Index cols = p.cols();
// construct a random matrix where all coefficients are different
MatrixType m;
m = MatrixType::Random(rows, cols);
- for(int i = 0; i < m.size(); i++)
- for(int i2 = 0; i2 < i; i2++)
+ for(Index i = 0; i < m.size(); i++)
+ for(Index i2 = 0; i2 < i; i2++)
while(m(i) == m(i2)) // yes, ==
m(i) = ei_random<Scalar>();
Scalar minc = Scalar(1000), maxc = Scalar(-1000);
- int minrow=0,mincol=0,maxrow=0,maxcol=0;
- for(int j = 0; j < cols; j++)
- for(int i = 0; i < rows; i++)
+ Index minrow=0,mincol=0,maxrow=0,maxcol=0;
+ for(Index j = 0; j < cols; j++)
+ for(Index i = 0; i < rows; i++)
{
if(m(i,j) < minc)
{
@@ -57,7 +58,7 @@
maxcol = j;
}
}
- int eigen_minrow, eigen_mincol, eigen_maxrow, eigen_maxcol;
+ Index eigen_minrow, eigen_mincol, eigen_maxrow, eigen_maxcol;
Scalar eigen_minc, eigen_maxc;
eigen_minc = m.minCoeff(&eigen_minrow,&eigen_mincol);
eigen_maxc = m.maxCoeff(&eigen_maxrow,&eigen_maxcol);
@@ -74,20 +75,21 @@
template<typename VectorType> void vectorVisitor(const VectorType& w)
{
typedef typename VectorType::Scalar Scalar;
+ typedef typename VectorType::Index Index;
- int size = w.size();
+ Index size = w.size();
// construct a random vector where all coefficients are different
VectorType v;
v = VectorType::Random(size);
- for(int i = 0; i < size; i++)
- for(int i2 = 0; i2 < i; i2++)
+ for(Index i = 0; i < size; i++)
+ for(Index i2 = 0; i2 < i; i2++)
while(v(i) == v(i2)) // yes, ==
v(i) = ei_random<Scalar>();
Scalar minc = Scalar(1000), maxc = Scalar(-1000);
- int minidx=0,maxidx=0;
- for(int i = 0; i < size; i++)
+ Index minidx=0,maxidx=0;
+ for(Index i = 0; i < size; i++)
{
if(v(i) < minc)
{
@@ -100,7 +102,7 @@
maxidx = i;
}
}
- int eigen_minidx, eigen_maxidx;
+ Index eigen_minidx, eigen_maxidx;
Scalar eigen_minc, eigen_maxc;
eigen_minc = v.minCoeff(&eigen_minidx);
eigen_maxc = v.maxCoeff(&eigen_maxidx);
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
index b3983f8..6a6516c 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
@@ -51,6 +51,7 @@
typedef typename Functor::ValueType ValueType;
typedef typename Functor::JacobianType JacobianType;
typedef typename JacobianType::Scalar Scalar;
+ typedef typename JacobianType::Index Index;
typedef Matrix<Scalar,InputsAtCompileTime,1> DerivativeType;
typedef AutoDiffScalar<DerivativeType> ActiveScalar;
@@ -74,15 +75,15 @@
ActiveValue av(jac.rows());
if(InputsAtCompileTime==Dynamic)
- for (int j=0; j<jac.rows(); j++)
+ for (Index j=0; j<jac.rows(); j++)
av[j].derivatives().resize(this->inputs());
- for (int i=0; i<jac.cols(); i++)
+ for (Index i=0; i<jac.cols(); i++)
ax[i].derivatives() = DerivativeType::Unit(this->inputs(),i);
Functor::operator()(ax, &av);
- for (int i=0; i<jac.rows(); i++)
+ for (Index i=0; i<jac.rows(); i++)
{
(*v)[i] = av[i].value();
jac.row(i) = av[i].derivatives();
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
index c0765d4..9b9dbcc 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
@@ -53,6 +53,7 @@
typedef AutoDiffScalar<Matrix<BaseScalar,JacobianType::RowsAtCompileTime,1> > ActiveScalar;
typedef ActiveScalar Scalar;
typedef AutoDiffScalar<typename JacobianType::ColXpr> CoeffType;
+ typedef typename JacobianType::Index Index;
inline AutoDiffVector() {}
@@ -63,16 +64,16 @@
}
- CoeffType operator[] (int i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
- const CoeffType operator[] (int i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ CoeffType operator[] (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType operator[] (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
- CoeffType operator() (int i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
- const CoeffType operator() (int i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ CoeffType operator() (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType operator() (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
- CoeffType coeffRef(int i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
- const CoeffType coeffRef(int i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ CoeffType coeffRef(Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType coeffRef(Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
- int size() const { return m_values.size(); }
+ Index size() const { return m_values.size(); }
// FIXME here we could return an expression of the sum
Scalar sum() const { /*std::cerr << "sum \n\n";*/ /*std::cerr << m_jacobian.rowwise().sum() << "\n\n";*/ return Scalar(m_values.sum(), m_jacobian.rowwise().sum()); }
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
index c2b7763..8f2a353 100644
--- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -61,17 +61,18 @@
{
// optimisable : copie de la ligne, precalcul de C * trans(C).
typedef typename CMatrix::Scalar Scalar;
+ typedef typename CMatrix::Index Index;
// FIXME use sparse vectors ?
typedef Matrix<Scalar,Dynamic,1> TmpVec;
- int rows = C.rows(), cols = C.cols();
+ Index rows = C.rows(), cols = C.cols();
TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows);
Scalar rho, rho_1, alpha;
d.setZero();
CINV.startFill(); // FIXME estimate the number of non-zeros
- for (int i = 0; i < rows; ++i)
+ for (Index i = 0; i < rows; ++i)
{
d[i] = 1.0;
rho = 1.0;
@@ -94,7 +95,7 @@
l = C.transpose() * e; // l is the i-th row of CINV
// FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
- for (int j=0; j<l.size(); ++j)
+ for (Index j=0; j<l.size(); ++j)
if (l[j]<1e-15)
CINV.fill(i,j) = l[j];
@@ -116,10 +117,11 @@
const VectorB& b, const VectorF& f, IterationController &iter)
{
typedef typename TMatrix::Scalar Scalar;
+ typedef typename TMatrix::Index Index;
typedef Matrix<Scalar,Dynamic,1> TmpVec;
Scalar rho = 1.0, rho_1, lambda, gamma;
- int xSize = x.size();
+ Index xSize = x.size();
TmpVec p(xSize), q(xSize), q2(xSize),
r(xSize), old_z(xSize), z(xSize),
memox(xSize);
@@ -140,7 +142,7 @@
r += A * -x;
z = r;
bool transition = false;
- for (int i = 0; i < C.rows(); ++i)
+ for (Index i = 0; i < C.rows(); ++i)
{
Scalar al = C.row(i).dot(x) - f.coeff(i);
if (al >= -1.0E-15)
@@ -175,7 +177,7 @@
// one dimensionnal optimization
q = A * p;
lambda = rho / q.dot(p);
- for (int i = 0; i < C.rows(); ++i)
+ for (Index i = 0; i < C.rows(); ++i)
{
if (!satured[i])
{
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index d7eb1b2..5d47f2c 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -296,6 +296,7 @@
template<typename Derived> struct MatrixExponentialReturnValue
: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
{
+ typedef typename Derived::Index Index;
public:
/** \brief Constructor.
*
@@ -317,8 +318,8 @@
me.compute(result);
}
- int rows() const { return m_src.rows(); }
- int cols() const { return m_src.cols(); }
+ Index rows() const { return m_src.rows(); }
+ Index cols() const { return m_src.cols(); }
protected:
const Derived& m_src;
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index 8ed8567..be27c50 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -131,14 +131,15 @@
private:
typedef ei_traits<MatrixType> Traits;
- typedef typename Traits::Scalar Scalar;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
static const int Options = MatrixType::Options;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_stem_function<Scalar>::type StemFunction;
typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
- typedef Matrix<int, Traits::RowsAtCompileTime, 1> IntVectorType;
+ typedef Matrix<Index, Traits::RowsAtCompileTime, 1> IntVectorType;
typedef std::list<Scalar> Cluster;
typedef std::list<Cluster> ListOfClusters;
typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
@@ -157,9 +158,9 @@
void computeBlockStart();
void constructPermutation();
void permuteSchur();
- void swapEntriesInSchur(int index);
+ void swapEntriesInSchur(Index index);
void computeBlockAtomic();
- Block<MatrixType> block(const MatrixType& A, int i, int j);
+ Block<MatrixType> block(const MatrixType& A, Index i, Index j);
void computeOffDiagonal();
DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
@@ -238,10 +239,10 @@
template <typename MatrixType>
void MatrixFunction<MatrixType,1>::partitionEigenvalues()
{
- const int rows = m_T.rows();
+ const Index rows = m_T.rows();
VectorType diag = m_T.diagonal(); // contains eigenvalues of A
- for (int i=0; i<rows; ++i) {
+ for (Index i=0; i<rows; ++i) {
// Find set containing diag(i), adding a new set if necessary
typename ListOfClusters::iterator qi = findCluster(diag(i));
if (qi == m_clusters.end()) {
@@ -253,7 +254,7 @@
}
// Look for other element to add to the set
- for (int j=i+1; j<rows; ++j) {
+ for (Index j=i+1; j<rows; ++j) {
if (ei_abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
typename ListOfClusters::iterator qj = findCluster(diag(j));
if (qj == m_clusters.end()) {
@@ -288,15 +289,15 @@
template <typename MatrixType>
void MatrixFunction<MatrixType,1>::computeClusterSize()
{
- const int rows = m_T.rows();
+ const Index rows = m_T.rows();
VectorType diag = m_T.diagonal();
- const int numClusters = static_cast<int>(m_clusters.size());
+ const Index numClusters = static_cast<Index>(m_clusters.size());
m_clusterSize.setZero(numClusters);
m_eivalToCluster.resize(rows);
- int clusterIndex = 0;
+ Index clusterIndex = 0;
for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) {
- for (int i = 0; i < diag.rows(); ++i) {
+ for (Index i = 0; i < diag.rows(); ++i) {
if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) {
++m_clusterSize[clusterIndex];
m_eivalToCluster[i] = clusterIndex;
@@ -312,7 +313,7 @@
{
m_blockStart.resize(m_clusterSize.rows());
m_blockStart(0) = 0;
- for (int i = 1; i < m_clusterSize.rows(); i++) {
+ for (Index i = 1; i < m_clusterSize.rows(); i++) {
m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1);
}
}
@@ -323,8 +324,8 @@
{
VectorXi indexNextEntry = m_blockStart;
m_permutation.resize(m_T.rows());
- for (int i = 0; i < m_T.rows(); i++) {
- int cluster = m_eivalToCluster[i];
+ for (Index i = 0; i < m_T.rows(); i++) {
+ Index cluster = m_eivalToCluster[i];
m_permutation[i] = indexNextEntry[cluster];
++indexNextEntry[cluster];
}
@@ -335,13 +336,13 @@
void MatrixFunction<MatrixType,1>::permuteSchur()
{
IntVectorType p = m_permutation;
- for (int i = 0; i < p.rows() - 1; i++) {
- int j;
+ for (Index i = 0; i < p.rows() - 1; i++) {
+ Index j;
for (j = i; j < p.rows(); j++) {
if (p(j) == i) break;
}
ei_assert(p(j) == i);
- for (int k = j-1; k >= i; k--) {
+ for (Index k = j-1; k >= i; k--) {
swapEntriesInSchur(k);
std::swap(p.coeffRef(k), p.coeffRef(k+1));
}
@@ -350,7 +351,7 @@
/** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */
template <typename MatrixType>
-void MatrixFunction<MatrixType,1>::swapEntriesInSchur(int index)
+void MatrixFunction<MatrixType,1>::swapEntriesInSchur(Index index)
{
PlanarRotation<Scalar> rotation;
rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index));
@@ -372,14 +373,14 @@
m_fT.resize(m_T.rows(), m_T.cols());
m_fT.setZero();
MatrixFunctionAtomic<DynMatrixType> mfa(m_f);
- for (int i = 0; i < m_clusterSize.rows(); ++i) {
+ for (Index i = 0; i < m_clusterSize.rows(); ++i) {
block(m_fT, i, i) = mfa.compute(block(m_T, i, i));
}
}
/** \brief Return block of matrix according to blocking given by #m_blockStart */
template <typename MatrixType>
-Block<MatrixType> MatrixFunction<MatrixType,1>::block(const MatrixType& A, int i, int j)
+Block<MatrixType> MatrixFunction<MatrixType,1>::block(const MatrixType& A, Index i, Index j)
{
return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j));
}
@@ -394,14 +395,14 @@
template <typename MatrixType>
void MatrixFunction<MatrixType,1>::computeOffDiagonal()
{
- for (int diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
- for (int blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
+ for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
+ for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
// compute (blockIndex, blockIndex+diagIndex) block
DynMatrixType A = block(m_T, blockIndex, blockIndex);
DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex);
DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex);
C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex);
- for (int k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
+ for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex);
C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex);
}
@@ -446,12 +447,12 @@
ei_assert(C.rows() == A.rows());
ei_assert(C.cols() == B.rows());
- int m = A.rows();
- int n = B.rows();
+ Index m = A.rows();
+ Index n = B.rows();
DynMatrixType X(m, n);
- for (int i = m - 1; i >= 0; --i) {
- for (int j = 0; j < n; ++j) {
+ for (Index i = m - 1; i >= 0; --i) {
+ for (Index j = 0; j < n; ++j) {
// Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj}
Scalar AX;
@@ -494,7 +495,8 @@
{
public:
- typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
typedef typename ei_stem_function<Scalar>::type StemFunction;
/** \brief Constructor.
@@ -518,8 +520,8 @@
mf.compute(result);
}
- int rows() const { return m_A.rows(); }
- int cols() const { return m_A.cols(); }
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
private:
const Derived& m_A;
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
index 4bcae47..b578c13 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -38,11 +38,11 @@
{
public:
- typedef ei_traits<MatrixType> Traits;
- typedef typename Traits::Scalar Scalar;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_stem_function<Scalar>::type StemFunction;
- typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
/** \brief Constructor
* \param[in] f matrix function to compute.
@@ -62,13 +62,13 @@
MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&);
void computeMu();
- bool taylorConverged(int s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P);
+ bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P);
/** \brief Pointer to scalar function */
StemFunction* m_f;
/** \brief Size of matrix function */
- int m_Arows;
+ Index m_Arows;
/** \brief Mean of eigenvalues */
Scalar m_avgEival;
@@ -91,7 +91,7 @@
MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows);
MatrixType P = m_Ashifted;
MatrixType Fincr;
- for (int s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
+ for (Index s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
Fincr = m_f(m_avgEival, s) * P;
F += Fincr;
P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted;
@@ -115,18 +115,18 @@
/** \brief Determine whether Taylor series has converged */
template <typename MatrixType>
-bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType& F,
+bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType& F,
const MatrixType& Fincr, const MatrixType& P)
{
- const int n = F.rows();
+ const Index n = F.rows();
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
RealScalar delta = 0;
RealScalar rfactorial = 1;
- for (int r = 0; r < n; r++) {
+ for (Index r = 0; r < n; r++) {
RealScalar mx = 0;
- for (int i = 0; i < n; i++)
+ for (Index i = 0; i < n; i++)
mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
if (r != 0)
rfactorial *= RealScalar(r);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index d75b140..aba31b2 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -56,6 +56,8 @@
class HybridNonLinearSolver
{
public:
+ typedef DenseIndex Index;
+
HybridNonLinearSolver(FunctorType &_functor)
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;}
@@ -68,10 +70,10 @@
, nb_of_superdiagonals(-1)
, epsfcn(Scalar(0.)) {}
Scalar factor;
- int maxfev; // maximum number of function evaluation
+ Index maxfev; // maximum number of function evaluation
Scalar xtol;
- int nb_of_subdiagonals;
- int nb_of_superdiagonals;
+ Index nb_of_subdiagonals;
+ Index nb_of_superdiagonals;
Scalar epsfcn;
};
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
@@ -102,24 +104,24 @@
FVectorType fvec, qtf, diag;
JacobianType fjac;
UpperTriangularType R;
- int nfev;
- int njev;
- int iter;
+ Index nfev;
+ Index njev;
+ Index iter;
Scalar fnorm;
bool useExternalScaling;
private:
FunctorType &functor;
- int n;
+ Index n;
Scalar sum;
bool sing;
Scalar temp;
Scalar delta;
bool jeval;
- int ncsuc;
+ Index ncsuc;
Scalar ratio;
Scalar pnorm, xnorm, fnorm1;
- int nslow1, nslow2;
- int ncfail;
+ Index nslow1, nslow2;
+ Index ncfail;
Scalar actred, prered;
FVectorType wa1, wa2, wa3, wa4;
};
@@ -169,7 +171,7 @@
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
return HybridNonLinearSolverSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return HybridNonLinearSolverSpace::ImproperInputParameters;
@@ -196,7 +198,7 @@
{
assert(x.size()==n); // check the caller is not cheating us
- int j;
+ Index j;
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
jeval = true;
@@ -408,7 +410,7 @@
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
return HybridNonLinearSolverSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return HybridNonLinearSolverSpace::ImproperInputParameters;
@@ -435,7 +437,7 @@
{
assert(x.size()==n); // check the caller is not cheating us
- int j;
+ Index j;
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
jeval = true;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index 3d5b6ea..63eb667 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -63,6 +63,8 @@
LevenbergMarquardt(FunctorType &_functor)
: functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
+ typedef DenseIndex Index;
+
struct Parameters {
Parameters()
: factor(Scalar(100.))
@@ -72,7 +74,7 @@
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
- int maxfev; // maximum number of function evaluation
+ Index maxfev; // maximum number of function evaluation
Scalar ftol;
Scalar xtol;
Scalar gtol;
@@ -94,7 +96,7 @@
static LevenbergMarquardtSpace::Status lmdif1(
FunctorType &functor,
FVectorType &x,
- int *nfev,
+ Index *nfev,
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
@@ -113,17 +115,17 @@
FVectorType fvec, qtf, diag;
JacobianType fjac;
PermutationMatrix<Dynamic,Dynamic> permutation;
- int nfev;
- int njev;
- int iter;
+ Index nfev;
+ Index njev;
+ Index iter;
Scalar fnorm, gnorm;
bool useExternalScaling;
Scalar lm_param(void) { return par; }
private:
FunctorType &functor;
- int n;
- int m;
+ Index n;
+ Index m;
FVectorType wa1, wa2, wa3, wa4;
Scalar par, sum;
@@ -194,7 +196,7 @@
return LevenbergMarquardtSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return LevenbergMarquardtSpace::ImproperInputParameters;
@@ -219,7 +221,7 @@
assert(x.size()==n); // check the caller is not cheating us
/* calculate the jacobian matrix. */
- int df_ret = functor.df(x, fjac);
+ Index df_ret = functor.df(x, fjac);
if (df_ret<0)
return LevenbergMarquardtSpace::UserAsked;
if (df_ret>0)
@@ -237,7 +239,7 @@
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (!useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
@@ -257,7 +259,7 @@
/* compute the norm of the scaled gradient. */
gnorm = 0.;
if (fnorm != 0.)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
@@ -410,7 +412,7 @@
return LevenbergMarquardtSpace::ImproperInputParameters;
if (useExternalScaling)
- for (int j = 0; j < n; ++j)
+ for (Index j = 0; j < n; ++j)
if (diag[j] <= 0.)
return LevenbergMarquardtSpace::ImproperInputParameters;
@@ -435,7 +437,7 @@
{
assert(x.size()==n); // check the caller is not cheating us
- int i, j;
+ Index i, j;
bool sing;
/* compute the qr factorization of the jacobian matrix */
@@ -444,7 +446,7 @@
/* n components in qtf. */
qtf.fill(0.);
fjac.fill(0.);
- int rownb = 2;
+ Index rownb = 2;
for (i = 0; i < m; ++i) {
if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked;
ei_rwupdt<Scalar>(fjac, wa3, qtf, fvec[i]);
@@ -471,7 +473,7 @@
fjac.diagonal() = qrfac.hCoeffs();
permutation = qrfac.colsPermutation();
// TODO : avoid this:
- for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
+ for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
for (j = 0; j < n; ++j) {
if (fjac(j,j) != 0.) {
@@ -623,12 +625,12 @@
LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
FunctorType &functor,
FVectorType &x,
- int *nfev,
+ Index *nfev,
const Scalar tol
)
{
- int n = x.size();
- int m = functor.values();
+ Index n = x.size();
+ Index m = functor.values();
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
index 591e8be..4cb4fbd 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/chkder.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -13,17 +13,19 @@
Matrix< Scalar, Dynamic, 1 > &err
)
{
+ typedef DenseIndex Index;
+
const Scalar eps = ei_sqrt(NumTraits<Scalar>::epsilon());
const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
const Scalar epslog = chkder_log10e * ei_log(eps);
Scalar temp;
- const int m = fvec.size(), n = x.size();
+ const Index m = fvec.size(), n = x.size();
if (mode != 2) {
/* mode = 1. */
xp.resize(n);
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
temp = eps * ei_abs(x[j]);
if (temp == 0.)
temp = eps;
@@ -33,13 +35,13 @@
else {
/* mode = 2. */
err.setZero(m);
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
temp = ei_abs(x[j]);
if (temp == 0.)
temp = 1.;
err += temp * fjac.col(j);
}
- for (int i = 0; i < m; ++i) {
+ for (Index i = 0; i < m; ++i) {
temp = 1.;
if (fvec[i] != 0. && fvecp[i] != 0. && ei_abs(fvecp[i] - fvec[i]) >= epsf * ei_abs(fvec[i]))
temp = eps * ei_abs((fvecp[i] - fvec[i]) / eps - err[i]) / (ei_abs(fvec[i]) + ei_abs(fvecp[i]));
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
index 7cfaa22..104898a 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/covar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -5,13 +5,15 @@
const VectorXi &ipvt,
Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon()) )
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, k, l, ii, jj;
- int sing;
+ Index i, j, k, l, ii, jj;
+ bool sing;
Scalar temp;
/* Function Body */
- const int n = r.cols();
+ const Index n = r.cols();
const Scalar tolr = tol * ei_abs(r(0,0));
Matrix< Scalar, Dynamic, 1 > wa(n);
assert(ipvt.size()==n);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index 9c1d38d..ab01d5c 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -7,15 +7,17 @@
Scalar delta,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j;
+ Index i, j;
Scalar sum, temp, alpha, bnorm;
Scalar gnorm, qnorm;
Scalar sgnorm;
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
- const int n = qrfac.cols();
+ const Index n = qrfac.cols();
assert(n==qtb.size());
assert(n==x.size());
assert(n==diag.size());
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 3dc1e80..74cf53b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -1,24 +1,26 @@
template<typename FunctorType, typename Scalar>
-int ei_fdjac1(
+DenseIndex ei_fdjac1(
const FunctorType &Functor,
Matrix< Scalar, Dynamic, 1 > &x,
Matrix< Scalar, Dynamic, 1 > &fvec,
Matrix< Scalar, Dynamic, Dynamic > &fjac,
- int ml, int mu,
+ DenseIndex ml, DenseIndex mu,
Scalar epsfcn)
{
+ typedef DenseIndex Index;
+
/* Local variables */
Scalar h;
- int j, k;
+ Index j, k;
Scalar eps, temp;
- int msum;
+ Index msum;
int iflag;
- int start, length;
+ Index start, length;
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
- const int n = x.size();
+ const Index n = x.size();
assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
@@ -57,7 +59,7 @@
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
fjac.col(j).setZero();
- start = std::max(0,j-mu);
+ start = std::max<Index>(0,j-mu);
length = std::min(n-1, j+ml) - start + 1;
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index 8500119..27138de 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -9,11 +9,13 @@
Scalar &par,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, l;
+ Index i, j, l;
Scalar fp;
Scalar parc, parl;
- int iter;
+ Index iter;
Scalar temp, paru;
Scalar gnorm;
Scalar dxnorm;
@@ -21,7 +23,7 @@
/* Function Body */
const Scalar dwarf = std::numeric_limits<Scalar>::min();
- const int n = r.cols();
+ const Index n = r.cols();
assert(n==diag.size());
assert(n==qtb.size());
assert(n==x.size());
@@ -30,7 +32,7 @@
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
- int nsing = n-1;
+ Index nsing = n-1;
wa1 = qtb;
for (j = 0; j < n; ++j) {
if (r(j,j) == 0. && nsing == n-1)
@@ -163,11 +165,13 @@
Matrix< Scalar, Dynamic, 1 > &x)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int j;
+ Index j;
Scalar fp;
Scalar parc, parl;
- int iter;
+ Index iter;
Scalar temp, paru;
Scalar gnorm;
Scalar dxnorm;
@@ -175,7 +179,7 @@
/* Function Body */
const Scalar dwarf = std::numeric_limits<Scalar>::min();
- const int n = qr.matrixQR().cols();
+ const Index n = qr.matrixQR().cols();
assert(n==diag.size());
assert(n==qtb.size());
@@ -184,8 +188,8 @@
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
-// const int rank = qr.nonzeroPivots(); // exactly double(0.)
- const int rank = qr.rank(); // use a threshold
+// const Index rank = qr.nonzeroPivots(); // exactly double(0.)
+ const Index rank = qr.rank(); // use a threshold
wa1 = qtb;
wa1.tail(n-rank).setZero();
qr.matrixQR().topLeftCorner(rank, rank).template triangularView<Upper>().solveInPlace(wa1.head(rank));
@@ -262,7 +266,7 @@
for (j = 0; j < n; ++j) {
wa1[j] /= sdiag[j];
temp = wa1[j];
- for (int i = j+1; i < n; ++i)
+ for (Index i = j+1; i < n; ++i)
wa1[i] -= s(i,j) * temp;
}
temp = wa1.blueNorm();
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index 205d934..bce8a44 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -11,10 +11,12 @@
Matrix< Scalar, Dynamic, 1 > &sdiag)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- int i, j, k, l;
+ Index i, j, k, l;
Scalar temp;
- int n = s.cols();
+ Index n = s.cols();
Matrix< Scalar, Dynamic, 1 > wa(n);
PlanarRotation<Scalar> givens;
@@ -67,7 +69,7 @@
/* solve the triangular system for z. if the system is */
/* singular, then obtain a least squares solution. */
- int nsing;
+ Index nsing;
for (nsing=0; nsing<n && sdiag[nsing]!=0; nsing++);
wa.tail(n-nsing).setZero();
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
index 855cb7a..ad319d9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
@@ -2,18 +2,20 @@
// TODO : move this to GivensQR once there's such a thing in Eigen
template <typename Scalar>
-void ei_r1mpyq(int m, int n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
+void ei_r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
{
+ typedef DenseIndex Index;
+
/* apply the first set of givens rotations to a. */
- for (int j = n-2; j>=0; --j)
- for (int i = 0; i<m; ++i) {
+ for (Index j = n-2; j>=0; --j)
+ for (Index i = 0; i<m; ++i) {
Scalar temp = v_givens[j].c() * a[i+m*j] - v_givens[j].s() * a[i+m*(n-1)];
a[i+m*(n-1)] = v_givens[j].s() * a[i+m*j] + v_givens[j].c() * a[i+m*(n-1)];
a[i+m*j] = temp;
}
/* apply the second set of givens rotations to a. */
- for (int j = 0; j<n-1; ++j)
- for (int i = 0; i<m; ++i) {
+ for (Index j = 0; j<n-1; ++j)
+ for (Index i = 0; i<m; ++i) {
Scalar temp = w_givens[j].c() * a[i+m*j] + w_givens[j].s() * a[i+m*(n-1)];
a[i+m*(n-1)] = -w_givens[j].s() * a[i+m*j] + w_givens[j].c() * a[i+m*(n-1)];
a[i+m*j] = temp;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index 3d89788..e01d029 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -9,10 +9,12 @@
Matrix< Scalar, Dynamic, 1> &w,
bool *sing)
{
+ typedef DenseIndex Index;
+
/* Local variables */
- const int m = s.rows();
- const int n = s.cols();
- int i, j=1;
+ const Index m = s.rows();
+ const Index n = s.cols();
+ Index i, j=1;
Scalar temp;
PlanarRotation<Scalar> givens;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
index b0bf729..aa0bf7d 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -6,7 +6,9 @@
Matrix< Scalar, Dynamic, 1> &b,
Scalar alpha)
{
- const int n = r.cols();
+ typedef DenseIndex Index;
+
+ const Index n = r.cols();
assert(r.rows()>=n);
std::vector<PlanarRotation<Scalar> > givens(n);
@@ -14,12 +16,12 @@
Scalar temp, rowj;
/* Function Body */
- for (int j = 0; j < n; ++j) {
+ for (Index j = 0; j < n; ++j) {
rowj = w[j];
/* apply the previous transformations to */
/* r(i,j), i=0,1,...,j-1, and to w(j). */
- for (int i = 0; i < j; ++i) {
+ for (Index i = 0; i < j; ++i) {
temp = givens[i].c() * r(i,j) + givens[i].s() * rowj;
rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj;
r(i,j) = temp;
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
index 7c9e9c5..5a3bacc 100644
--- a/unsupported/Eigen/src/Polynomials/Companion.h
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -68,8 +68,10 @@
typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock;
typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow;
+ typedef DenseIndex Index;
+
public:
- EIGEN_STRONG_INLINE const _Scalar operator()( int row, int col ) const
+ EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const
{
if( m_bl_diag.rows() > col )
{
@@ -83,7 +85,7 @@
template<typename VectorType>
void setPolynomial( const VectorType& poly )
{
- const int deg = poly.size()-1;
+ const Index deg = poly.size()-1;
m_monic = -1/poly[deg] * poly.head(deg);
//m_bl_diag.setIdentity( deg-1 );
m_bl_diag.setOnes(deg-1);
@@ -96,8 +98,8 @@
public:
DenseCompanionMatrixType denseMatrix() const
{
- const int deg = m_monic.size();
- const int deg_1 = deg-1;
+ const Index deg = m_monic.size();
+ const Index deg_1 = deg-1;
DenseCompanionMatrixType companion(deg,deg);
companion <<
( LeftBlock(deg,deg_1)
@@ -220,8 +222,8 @@
void ei_companion<_Scalar,_Deg>::balance()
{
EIGEN_STATIC_ASSERT( 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE );
- const int deg = m_monic.size();
- const int deg_1 = deg-1;
+ const Index deg = m_monic.size();
+ const Index deg_1 = deg-1;
bool hasConverged=false;
while( !hasConverged )
@@ -244,7 +246,7 @@
//Middle rows and columns excluding the diagonal
//==============================================
- for( int i=1; i<deg_1; ++i )
+ for( Index i=1; i<deg_1; ++i )
{
// column norm, excluding the diagonal
colNorm = ei_abs(m_bl_diag[i]);
@@ -263,7 +265,7 @@
//Last row, last column excluding the diagonal
//============================================
- const int ebl = m_bl_diag.size()-1;
+ const Index ebl = m_bl_diag.size()-1;
VectorBlock<RightColumn,Deg_1> headMonic( m_monic, 0, deg_1 );
colNorm = headMonic.array().abs().sum();
rowNorm = ei_abs( m_bl_diag[ebl] );
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
index 2078365..ba14b59 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
@@ -49,6 +49,8 @@
typedef std::complex<RealScalar> RootType;
typedef Matrix<RootType,_Deg,1> RootsType;
+ typedef DenseIndex Index;
+
protected:
template< typename OtherPolynomial >
inline void setPolynomial( const OtherPolynomial& poly ){
@@ -81,7 +83,7 @@
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
bi_seq.clear();
- for( int i=0; i<m_roots.size(); ++i )
+ for(Index i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold ){
bi_seq.push_back( m_roots[i].real() ); }
@@ -92,9 +94,9 @@
template<typename squaredNormBinaryPredicate>
inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const
{
- int res=0;
+ Index res=0;
RealScalar norm2 = ei_abs2( m_roots[0] );
- for( int i=1; i<m_roots.size(); ++i )
+ for( Index i=1; i<m_roots.size(); ++i )
{
const RealScalar currNorm2 = ei_abs2( m_roots[i] );
if( pred( currNorm2, norm2 ) ){
@@ -130,10 +132,10 @@
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
hasArealRoot = false;
- int res=0;
+ Index res=0;
RealScalar abs2(0);
- for( int i=0; i<m_roots.size(); ++i )
+ for( Index i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
@@ -170,10 +172,10 @@
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
hasArealRoot = false;
- int res=0;
+ Index res=0;
RealScalar val(0);
- for( int i=0; i<m_roots.size(); ++i )
+ for( Index i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
index d78821f..d10b8f4 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
@@ -41,7 +41,7 @@
T poly_eval_horner( const Polynomials& poly, const T& x )
{
T val=poly[poly.size()-1];
- for( int i=poly.size()-2; i>=0; --i ){
+ for(DenseIndex i=poly.size()-2; i>=0; --i ){
val = val*x + poly[i]; }
return val;
}
@@ -66,7 +66,7 @@
{
T val=poly[0];
T inv_x = T(1)/x;
- for( int i=1; i<poly.size(); ++i ){
+ for( DenseIndex i=1; i<poly.size(); ++i ){
val = val*inv_x + poly[i]; }
return std::pow(x,(T)(poly.size()-1)) * val;
@@ -94,7 +94,7 @@
const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1];
Real cb(0);
- for( int i=0; i<poly.size()-1; ++i ){
+ for( DenseIndex i=0; i<poly.size()-1; ++i ){
cb += ei_abs(poly[i]*inv_leading_coeff); }
return cb + Real(1);
}
@@ -112,14 +112,14 @@
typedef typename Polynomial::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real Real;
- int i=0;
+ DenseIndex i=0;
while( i<poly.size()-1 && Scalar(0) == poly(i) ){ ++i; }
if( poly.size()-1 == i ){
return Real(1); }
const Scalar inv_min_coeff = Scalar(1)/poly[i];
Real cb(1);
- for( int j=i+1; j<poly.size(); ++j ){
+ for( DenseIndex j=i+1; j<poly.size(); ++j ){
cb += ei_abs(poly[j]*inv_min_coeff); }
return Real(1)/cb;
}
@@ -142,9 +142,9 @@
poly.setZero( rv.size()+1 );
poly[0] = -rv[0]; poly[1] = Scalar(1);
- for( int i=1; i<(int)rv.size(); ++i )
+ for( DenseIndex i=1; i< rv.size(); ++i )
{
- for( int j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; }
+ for( DenseIndex j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; }
poly[0] = -rv[i]*poly[0];
}
}
diff --git a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
index c8c5f75..fa8f819 100644
--- a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
+++ b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
@@ -38,6 +38,8 @@
class SkylineInplaceLU {
protected:
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
public:
@@ -135,18 +137,18 @@
ei_assert(rows == cols && "We do not (yet) support rectangular LU.");
ei_assert(!m_lu.IsRowMajor && "LU decomposition does not work with rowMajor Storage");
- for (unsigned int row = 0; row < rows; row++) {
+ for (Index row = 0; row < rows; row++) {
const double pivot = m_lu.coeffDiag(row);
//Lower matrix Columns update
- const unsigned int& col = row;
+ const Index& col = row;
for (typename MatrixType::InnerLowerIterator lIt(m_lu, col); lIt; ++lIt) {
lIt.valueRef() /= pivot;
}
//Upper matrix update -> contiguous memory access
typename MatrixType::InnerLowerIterator lIt(m_lu, col);
- for (unsigned int rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
typename MatrixType::InnerUpperIterator uIt(m_lu, rrow);
const double coef = lIt.value();
@@ -165,12 +167,12 @@
//Upper matrix update -> non contiguous memory access
typename MatrixType::InnerLowerIterator lIt3(m_lu, col);
- for (unsigned int rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
const double coef = lIt3.value();
//update lower part -> non contiguous memory access
- for (unsigned int i = 0; i < rrow - row - 1; i++) {
+ for (Index i = 0; i < rrow - row - 1; i++) {
m_lu.coeffRefLower(rrow, row + i + 1) -= uItPivot.value() * coef;
++uItPivot;
}
@@ -178,7 +180,7 @@
}
//update diag -> contiguous
typename MatrixType::InnerLowerIterator lIt2(m_lu, col);
- for (unsigned int rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
typename MatrixType::InnerUpperIterator uIt(m_lu, rrow);
@@ -199,11 +201,11 @@
ei_assert(rows == cols && "We do not (yet) support rectangular LU.");
ei_assert(m_lu.IsRowMajor && "You're trying to apply rowMajor decomposition on a ColMajor matrix !");
- for (unsigned int row = 0; row < rows; row++) {
+ for (Index row = 0; row < rows; row++) {
typename MatrixType::InnerLowerIterator llIt(m_lu, row);
- for (unsigned int col = llIt.col(); col < row; col++) {
+ for (Index col = llIt.col(); col < row; col++) {
if (m_lu.coeffExistLower(row, col)) {
const double diag = m_lu.coeffDiag(col);
@@ -211,10 +213,10 @@
typename MatrixType::InnerUpperIterator uIt(m_lu, col);
- const int offset = lIt.col() - uIt.row();
+ const Index offset = lIt.col() - uIt.row();
- int stop = offset > 0 ? col - lIt.col() : col - uIt.row();
+ Index stop = offset > 0 ? col - lIt.col() : col - uIt.row();
//#define VECTORIZE
#ifdef VECTORIZE
@@ -230,7 +232,7 @@
lIt += -offset;
Scalar newCoeff = m_lu.coeffLower(row, col);
- for (int k = 0; k < stop; ++k) {
+ for (Index k = 0; k < stop; ++k) {
const Scalar tmp = newCoeff;
newCoeff = tmp - lIt.value() * uIt.value();
++lIt;
@@ -243,15 +245,15 @@
}
//Upper matrix update
- const int col = row;
+ const Index col = row;
typename MatrixType::InnerUpperIterator uuIt(m_lu, col);
- for (unsigned int rrow = uuIt.row(); rrow < col; rrow++) {
+ for (Index rrow = uuIt.row(); rrow < col; rrow++) {
typename MatrixType::InnerLowerIterator lIt(m_lu, rrow);
typename MatrixType::InnerUpperIterator uIt(m_lu, col);
- const int offset = lIt.col() - uIt.row();
+ const Index offset = lIt.col() - uIt.row();
- int stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row();
+ Index stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row();
#ifdef VECTORIZE
Map<VectorXd > rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop);
@@ -264,7 +266,7 @@
else //Skip zero values of uIt
lIt += -offset;
Scalar newCoeff = m_lu.coeffUpper(rrow, col);
- for (int k = 0; k < stop; ++k) {
+ for (Index k = 0; k < stop; ++k) {
const Scalar tmp = newCoeff;
newCoeff = tmp - lIt.value() * uIt.value();
@@ -280,10 +282,10 @@
typename MatrixType::InnerLowerIterator lIt(m_lu, row);
typename MatrixType::InnerUpperIterator uIt(m_lu, row);
- const int offset = lIt.col() - uIt.row();
+ const Index offset = lIt.col() - uIt.row();
- int stop = offset > 0 ? lIt.size() : uIt.size();
+ Index stop = offset > 0 ? lIt.size() : uIt.size();
#ifdef VECTORIZE
Map<VectorXd > rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop);
Map<VectorXd > colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop);
@@ -294,7 +296,7 @@
else //Skip zero values of uIt
lIt += -offset;
Scalar newCoeff = m_lu.coeffDiag(row);
- for (unsigned int k = 0; k < stop; ++k) {
+ for (Index k = 0; k < stop; ++k) {
const Scalar tmp = newCoeff;
newCoeff = tmp - lIt.value() * uIt.value();
++lIt;
@@ -320,12 +322,12 @@
const size_t cols = m_lu.cols();
- for (int row = 0; row < rows; row++) {
+ for (Index row = 0; row < rows; row++) {
x->coeffRef(row) = b.coeff(row);
Scalar newVal = x->coeff(row);
typename MatrixType::InnerLowerIterator lIt(m_lu, row);
- unsigned int col = lIt.col();
+ Index col = lIt.col();
while (lIt.col() < row) {
newVal -= x->coeff(col++) * lIt.value();
@@ -336,7 +338,7 @@
}
- for (int col = rows - 1; col > 0; col--) {
+ for (Index col = rows - 1; col > 0; col--) {
x->coeffRef(col) = x->coeff(col) / m_lu.coeffDiag(col);
const Scalar x_col = x->coeff(col);
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
index 6dd4f17..20fafaf 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrix.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
@@ -46,6 +46,7 @@
template<typename _Scalar, int _Options>
struct ei_traits<SkylineMatrix<_Scalar, _Options> > {
typedef _Scalar Scalar;
+ typedef Sparse StorageKind;
enum {
RowsAtCompileTime = Dynamic,
@@ -71,45 +72,45 @@
typedef SkylineMatrix<Scalar, (Flags&~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0) > TransposedSkylineMatrix;
- int m_outerSize;
- int m_innerSize;
+ Index m_outerSize;
+ Index m_innerSize;
public:
- int* m_colStartIndex;
- int* m_rowStartIndex;
+ Index* m_colStartIndex;
+ Index* m_rowStartIndex;
SkylineStorage<Scalar> m_data;
public:
- inline int rows() const {
+ inline Index rows() const {
return IsRowMajor ? m_outerSize : m_innerSize;
}
- inline int cols() const {
+ inline Index cols() const {
return IsRowMajor ? m_innerSize : m_outerSize;
}
- inline int innerSize() const {
+ inline Index innerSize() const {
return m_innerSize;
}
- inline int outerSize() const {
+ inline Index outerSize() const {
return m_outerSize;
}
- inline int upperNonZeros() const {
+ inline Index upperNonZeros() const {
return m_data.upperSize();
}
- inline int lowerNonZeros() const {
+ inline Index lowerNonZeros() const {
return m_data.lowerSize();
}
- inline int upperNonZeros(int j) const {
+ inline Index upperNonZeros(Index j) const {
return m_colStartIndex[j + 1] - m_colStartIndex[j];
}
- inline int lowerNonZeros(int j) const {
+ inline Index lowerNonZeros(Index j) const {
return m_rowStartIndex[j + 1] - m_rowStartIndex[j];
}
@@ -137,25 +138,25 @@
return &m_data.lower(0);
}
- inline const int* _upperProfilePtr() const {
+ inline const Index* _upperProfilePtr() const {
return &m_data.upperProfile(0);
}
- inline int* _upperProfilePtr() {
+ inline Index* _upperProfilePtr() {
return &m_data.upperProfile(0);
}
- inline const int* _lowerProfilePtr() const {
+ inline const Index* _lowerProfilePtr() const {
return &m_data.lowerProfile(0);
}
- inline int* _lowerProfilePtr() {
+ inline Index* _lowerProfilePtr() {
return &m_data.lowerProfile(0);
}
- inline Scalar coeff(int row, int col) const {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar coeff(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
@@ -166,7 +167,7 @@
if (IsRowMajor) {
if (inner > outer) //upper matrix
{
- const int minOuterIndex = inner - m_data.upperProfile(inner);
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
if (outer >= minOuterIndex)
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
else
@@ -174,7 +175,7 @@
}
if (inner < outer) //lower matrix
{
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
if (inner >= minInnerIndex)
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
else
@@ -184,7 +185,7 @@
} else {
if (outer > inner) //upper matrix
{
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
if (outer <= maxOuterIndex)
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
else
@@ -192,7 +193,7 @@
}
if (outer < inner) //lower matrix
{
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
if (inner <= maxInnerIndex)
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
@@ -202,9 +203,9 @@
}
}
- inline Scalar& coeffRef(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar& coeffRef(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
@@ -215,55 +216,55 @@
if (IsRowMajor) {
if (col > row) //upper matrix
{
- const int minOuterIndex = inner - m_data.upperProfile(inner);
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
ei_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
}
if (col < row) //lower matrix
{
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
ei_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
}
} else {
if (outer > inner) //upper matrix
{
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
ei_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
if (outer < inner) //lower matrix
{
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
ei_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
}
- inline Scalar coeffDiag(int idx) const {
+ inline Scalar coeffDiag(Index idx) const {
ei_assert(idx < outerSize());
ei_assert(idx < innerSize());
return this->m_data.diag(idx);
}
- inline Scalar coeffLower(int row, int col) const {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar coeffLower(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
if (inner >= minInnerIndex)
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
else
return Scalar(0);
} else {
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
if (inner <= maxInnerIndex)
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
else
@@ -271,22 +272,22 @@
}
}
- inline Scalar coeffUpper(int row, int col) const {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar coeffUpper(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minOuterIndex = inner - m_data.upperProfile(inner);
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
if (outer >= minOuterIndex)
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
else
return Scalar(0);
} else {
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
if (outer <= maxOuterIndex)
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
else
@@ -294,80 +295,80 @@
}
}
- inline Scalar& coeffRefDiag(int idx) {
+ inline Scalar& coeffRefDiag(Index idx) {
ei_assert(idx < outerSize());
ei_assert(idx < innerSize());
return this->m_data.diag(idx);
}
- inline Scalar& coeffRefLower(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar& coeffRefLower(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
ei_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
} else {
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
ei_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
- inline bool coeffExistLower(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline bool coeffExistLower(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
return inner >= minInnerIndex;
} else {
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
return inner <= maxInnerIndex;
}
}
- inline Scalar& coeffRefUpper(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline Scalar& coeffRefUpper(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minOuterIndex = inner - m_data.upperProfile(inner);
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
ei_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
} else {
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
ei_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
}
- inline bool coeffExistUpper(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ inline bool coeffExistUpper(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
ei_assert(inner != outer);
if (IsRowMajor) {
- const int minOuterIndex = inner - m_data.upperProfile(inner);
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
return outer >= minOuterIndex;
} else {
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
return outer <= maxOuterIndex;
}
}
@@ -385,17 +386,17 @@
/** Removes all non zeros */
inline void setZero() {
m_data.clear();
- memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (int));
- memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (int));
+ memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
+ memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
}
/** \returns the number of non zero coefficients */
- inline int nonZeros() const {
+ inline Index nonZeros() const {
return m_data.diagSize() + m_data.upperSize() + m_data.lowerSize();
}
/** Preallocates \a reserveSize non zeros */
- inline void reserve(int reserveSize, int reserveUpperSize, int reserveLowerSize) {
+ inline void reserve(Index reserveSize, Index reserveUpperSize, Index reserveLowerSize) {
m_data.reserve(reserveSize, reserveUpperSize, reserveLowerSize);
}
@@ -407,9 +408,9 @@
*
* After an insertion session, you should call the finalize() function.
*/
- EIGEN_DONT_INLINE Scalar & insert(int row, int col) {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ EIGEN_DONT_INLINE Scalar & insert(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
ei_assert(outer < outerSize());
ei_assert(inner < innerSize());
@@ -420,27 +421,27 @@
if (IsRowMajor) {
if (outer < inner) //upper matrix
{
- int minOuterIndex = 0;
+ Index minOuterIndex = 0;
minOuterIndex = inner - m_data.upperProfile(inner);
if (outer < minOuterIndex) //The value does not yet exist
{
- const int previousProfile = m_data.upperProfile(inner);
+ const Index previousProfile = m_data.upperProfile(inner);
m_data.upperProfile(inner) = inner - outer;
- const int bandIncrement = m_data.upperProfile(inner) - previousProfile;
+ const Index bandIncrement = m_data.upperProfile(inner) - previousProfile;
//shift data stored after this new one
- const int stop = m_colStartIndex[cols()];
- const int start = m_colStartIndex[inner];
+ const Index stop = m_colStartIndex[cols()];
+ const Index start = m_colStartIndex[inner];
- for (int innerIdx = stop; innerIdx >= start; innerIdx--) {
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx);
}
- for (int innerIdx = cols(); innerIdx > inner; innerIdx--) {
+ for (Index innerIdx = cols(); innerIdx > inner; innerIdx--) {
m_colStartIndex[innerIdx] += bandIncrement;
}
@@ -455,23 +456,23 @@
if (outer > inner) //lower matrix
{
- const int minInnerIndex = outer - m_data.lowerProfile(outer);
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
if (inner < minInnerIndex) //The value does not yet exist
{
- const int previousProfile = m_data.lowerProfile(outer);
+ const Index previousProfile = m_data.lowerProfile(outer);
m_data.lowerProfile(outer) = outer - inner;
- const int bandIncrement = m_data.lowerProfile(outer) - previousProfile;
+ const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile;
//shift data stored after this new one
- const int stop = m_rowStartIndex[rows()];
- const int start = m_rowStartIndex[outer];
+ const Index stop = m_rowStartIndex[rows()];
+ const Index start = m_rowStartIndex[outer];
- for (int innerIdx = stop; innerIdx >= start; innerIdx--) {
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx);
}
- for (int innerIdx = rows(); innerIdx > outer; innerIdx--) {
+ for (Index innerIdx = rows(); innerIdx > outer; innerIdx--) {
m_rowStartIndex[innerIdx] += bandIncrement;
}
@@ -485,22 +486,22 @@
} else {
if (outer > inner) //upper matrix
{
- const int maxOuterIndex = inner + m_data.upperProfile(inner);
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
if (outer > maxOuterIndex) //The value does not yet exist
{
- const int previousProfile = m_data.upperProfile(inner);
+ const Index previousProfile = m_data.upperProfile(inner);
m_data.upperProfile(inner) = outer - inner;
- const int bandIncrement = m_data.upperProfile(inner) - previousProfile;
+ const Index bandIncrement = m_data.upperProfile(inner) - previousProfile;
//shift data stored after this new one
- const int stop = m_rowStartIndex[rows()];
- const int start = m_rowStartIndex[inner + 1];
+ const Index stop = m_rowStartIndex[rows()];
+ const Index start = m_rowStartIndex[inner + 1];
- for (int innerIdx = stop; innerIdx >= start; innerIdx--) {
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx);
}
- for (int innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) {
+ for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) {
m_rowStartIndex[innerIdx] += bandIncrement;
}
memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
@@ -512,22 +513,22 @@
if (outer < inner) //lower matrix
{
- const int maxInnerIndex = outer + m_data.lowerProfile(outer);
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
if (inner > maxInnerIndex) //The value does not yet exist
{
- const int previousProfile = m_data.lowerProfile(outer);
+ const Index previousProfile = m_data.lowerProfile(outer);
m_data.lowerProfile(outer) = inner - outer;
- const int bandIncrement = m_data.lowerProfile(outer) - previousProfile;
+ const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile;
//shift data stored after this new one
- const int stop = m_colStartIndex[cols()];
- const int start = m_colStartIndex[outer + 1];
+ const Index stop = m_colStartIndex[cols()];
+ const Index start = m_colStartIndex[outer + 1];
- for (int innerIdx = stop; innerIdx >= start; innerIdx--) {
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx);
}
- for (int innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) {
+ for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) {
m_colStartIndex[innerIdx] += bandIncrement;
}
memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
@@ -551,16 +552,16 @@
// ei_assert(rows() == cols() && "memory reorganisatrion only works with suare matrix");
//
// Scalar* newArray = new Scalar[m_colStartIndex[cols()] + 1 + m_rowStartIndex[rows()] + 1];
- // unsigned int dataIdx = 0;
- // for (unsigned int row = 0; row < rows(); row++) {
+ // Index dataIdx = 0;
+ // for (Index row = 0; row < rows(); row++) {
//
- // const unsigned int nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row];
+ // const Index nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row];
// // std::cout << "nbLowerElts" << nbLowerElts << std::endl;
// memcpy(newArray + dataIdx, m_data.m_lower + m_rowStartIndex[row], nbLowerElts * sizeof (Scalar));
// m_rowStartIndex[row] = dataIdx;
// dataIdx += nbLowerElts;
//
- // const unsigned int nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row];
+ // const Index nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row];
// memcpy(newArray + dataIdx, m_data.m_upper + m_colStartIndex[row], nbUpperElts * sizeof (Scalar));
// m_colStartIndex[row] = dataIdx;
// dataIdx += nbUpperElts;
@@ -594,16 +595,16 @@
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
- * \sa resizeNonZeros(int), reserve(), setZero()
+ * \sa resizeNonZeros(Index), reserve(), setZero()
*/
void resize(size_t rows, size_t cols) {
- const int diagSize = rows > cols ? cols : rows;
+ const Index diagSize = rows > cols ? cols : rows;
m_innerSize = IsRowMajor ? cols : rows;
ei_assert(rows == cols && "Skyline matrix must be square matrix");
if (diagSize % 2) { // diagSize is odd
- const int k = (diagSize - 1) / 2;
+ const Index k = (diagSize - 1) / 2;
m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols,
2 * k * k + k + 1,
@@ -611,7 +612,7 @@
} else // diagSize is even
{
- const int k = diagSize / 2;
+ const Index k = diagSize / 2;
m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols,
2 * k * k - k + 1,
2 * k * k - k + 1);
@@ -621,19 +622,19 @@
delete[] m_colStartIndex;
delete[] m_rowStartIndex;
}
- m_colStartIndex = new int [cols + 1];
- m_rowStartIndex = new int [rows + 1];
+ m_colStartIndex = new Index [cols + 1];
+ m_rowStartIndex = new Index [rows + 1];
m_outerSize = diagSize;
m_data.reset();
m_data.clear();
m_outerSize = diagSize;
- memset(m_colStartIndex, 0, (cols + 1) * sizeof (int));
- memset(m_rowStartIndex, 0, (rows + 1) * sizeof (int));
+ memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index));
+ memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index));
}
- void resizeNonZeros(int size) {
+ void resizeNonZeros(Index size) {
m_data.resize(size);
}
@@ -673,8 +674,8 @@
swap(other.const_cast_derived());
} else {
resize(other.rows(), other.cols());
- memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (int));
- memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (int));
+ memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (Index));
+ memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (Index));
m_data = other.m_data;
}
return *this;
@@ -696,34 +697,34 @@
EIGEN_DBG_SKYLINE(
std::cout << "upper elements : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.upperSize(); i++)
+ for (Index i = 0; i < m.m_data.upperSize(); i++)
std::cout << m.m_data.upper(i) << "\t";
std::cout << std::endl;
std::cout << "upper profile : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.upperProfileSize(); i++)
+ for (Index i = 0; i < m.m_data.upperProfileSize(); i++)
std::cout << m.m_data.upperProfile(i) << "\t";
std::cout << std::endl;
std::cout << "lower startIdx : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.upperProfileSize(); i++)
+ for (Index i = 0; i < m.m_data.upperProfileSize(); i++)
std::cout << (IsRowMajor ? m.m_colStartIndex[i] : m.m_rowStartIndex[i]) << "\t";
std::cout << std::endl;
std::cout << "lower elements : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.lowerSize(); i++)
+ for (Index i = 0; i < m.m_data.lowerSize(); i++)
std::cout << m.m_data.lower(i) << "\t";
std::cout << std::endl;
std::cout << "lower profile : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.lowerProfileSize(); i++)
+ for (Index i = 0; i < m.m_data.lowerProfileSize(); i++)
std::cout << m.m_data.lowerProfile(i) << "\t";
std::cout << std::endl;
std::cout << "lower startIdx : " << std::endl;
- for (unsigned int i = 0; i < m.m_data.lowerProfileSize(); i++)
+ for (Index i = 0; i < m.m_data.lowerProfileSize(); i++)
std::cout << (IsRowMajor ? m.m_rowStartIndex[i] : m.m_colStartIndex[i]) << "\t";
std::cout << std::endl;
);
- for (unsigned int rowIdx = 0; rowIdx < m.rows(); rowIdx++) {
- for (unsigned int colIdx = 0; colIdx < m.cols(); colIdx++) {
+ for (Index rowIdx = 0; rowIdx < m.rows(); rowIdx++) {
+ for (Index colIdx = 0; colIdx < m.cols(); colIdx++) {
s << m.coeff(rowIdx, colIdx) << "\t";
}
s << std::endl;
@@ -745,7 +746,7 @@
class SkylineMatrix<Scalar, _Options>::InnerUpperIterator {
public:
- InnerUpperIterator(const SkylineMatrix& mat, int outer)
+ InnerUpperIterator(const SkylineMatrix& mat, Index outer)
: m_matrix(mat), m_outer(outer),
m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1),
m_start(m_id),
@@ -757,7 +758,7 @@
return *this;
}
- inline InnerUpperIterator & operator+=(unsigned int shift) {
+ inline InnerUpperIterator & operator+=(Index shift) {
m_id += shift;
return *this;
}
@@ -774,16 +775,16 @@
return const_cast<Scalar&> (m_matrix.m_data.upper(m_id));
}
- inline int index() const {
+ inline Index index() const {
return IsRowMajor ? m_outer - m_matrix.m_data.upperProfile(m_outer) + (m_id - m_start) :
m_outer + (m_id - m_start) + 1;
}
- inline int row() const {
+ inline Index row() const {
return IsRowMajor ? index() : m_outer;
}
- inline int col() const {
+ inline Index col() const {
return IsRowMajor ? m_outer : index();
}
@@ -797,17 +798,17 @@
protected:
const SkylineMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
};
template<typename Scalar, int _Options>
class SkylineMatrix<Scalar, _Options>::InnerLowerIterator {
public:
- InnerLowerIterator(const SkylineMatrix& mat, int outer)
+ InnerLowerIterator(const SkylineMatrix& mat, Index outer)
: m_matrix(mat),
m_outer(outer),
m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1),
@@ -820,7 +821,7 @@
return *this;
}
- inline InnerLowerIterator & operator+=(unsigned int shift) {
+ inline InnerLowerIterator & operator+=(Index shift) {
m_id += shift;
return *this;
}
@@ -837,17 +838,17 @@
return const_cast<Scalar&> (m_matrix.m_data.lower(m_id));
}
- inline int index() const {
+ inline Index index() const {
return IsRowMajor ? m_outer - m_matrix.m_data.lowerProfile(m_outer) + (m_id - m_start) :
m_outer + (m_id - m_start) + 1;
;
}
- inline int row() const {
+ inline Index row() const {
return IsRowMajor ? m_outer : index();
}
- inline int col() const {
+ inline Index col() const {
return IsRowMajor ? index() : m_outer;
}
@@ -861,10 +862,10 @@
protected:
const SkylineMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
};
#endif // EIGEN_SkylineMatrix_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
index ff20b83..17349c1 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
@@ -40,6 +40,8 @@
public:
typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::StorageKind StorageKind;
+ typedef typename ei_index<StorageKind>::type Index;
enum {
RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
@@ -113,36 +115,36 @@
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- inline int rows() const {
+ inline Index rows() const {
return derived().rows();
}
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- inline int cols() const {
+ inline Index cols() const {
return derived().cols();
}
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
- inline int size() const {
+ inline Index size() const {
return rows() * cols();
}
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
- inline int nonZeros() const {
+ inline Index nonZeros() const {
return derived().nonZeros();
}
/** \returns the size of the storage major dimension,
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
- int outerSize() const {
+ Index outerSize() const {
return (int(Flags) & RowMajorBit) ? this->rows() : this->cols();
}
/** \returns the size of the inner dimension according to the storage order,
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
- int innerSize() const {
+ Index innerSize() const {
return (int(Flags) & RowMajorBit) ? this->cols() : this->rows();
}
@@ -167,8 +169,8 @@
template<typename OtherDerived>
inline void assignGeneric(const OtherDerived& other) {
derived().resize(other.rows(), other.cols());
- for (unsigned int row = 0; row < rows(); row++)
- for (unsigned int col = 0; col < cols(); col++) {
+ for (Index row = 0; row < rows(); row++)
+ for (Index col = 0; col < cols(); col++) {
if (other.coeff(row, col) != Scalar(0))
derived().insert(row, col) = other.coeff(row, col);
}
@@ -196,8 +198,8 @@
template<typename DenseDerived>
void evalTo(MatrixBase<DenseDerived>& dst) const {
dst.setZero();
- for (unsigned int i = 0; i < rows(); i++)
- for (unsigned int j = 0; j < rows(); j++)
+ for (Index i = 0; i < rows(); i++)
+ for (Index j = 0; j < rows(); j++)
dst(i, j) = derived().coeff(i, j);
}
diff --git a/unsupported/Eigen/src/Skyline/SkylineProduct.h b/unsupported/Eigen/src/Skyline/SkylineProduct.h
index 9586576..e6484ad 100644
--- a/unsupported/Eigen/src/Skyline/SkylineProduct.h
+++ b/unsupported/Eigen/src/Skyline/SkylineProduct.h
@@ -106,11 +106,11 @@
EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
}
- EIGEN_STRONG_INLINE int rows() const {
+ EIGEN_STRONG_INLINE Index rows() const {
return m_lhs.rows();
}
- EIGEN_STRONG_INLINE int cols() const {
+ EIGEN_STRONG_INLINE Index cols() const {
return m_rhs.cols();
}
@@ -147,18 +147,18 @@
};
//Use matrix diagonal part <- Improvement : use inner iterator on dense matrix.
- for (unsigned int col = 0; col < rhs.cols(); col++) {
- for (unsigned int row = 0; row < lhs.rows(); row++) {
+ for (Index col = 0; col < rhs.cols(); col++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
dst(row, col) = lhs.coeffDiag(row) * rhs(row, col);
}
}
//Use matrix lower triangular part
- for (unsigned int row = 0; row < lhs.rows(); row++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
typename _Lhs::InnerLowerIterator lIt(lhs, row);
- const int stop = lIt.col() + lIt.size();
- for (unsigned int col = 0; col < rhs.cols(); col++) {
+ const Index stop = lIt.col() + lIt.size();
+ for (Index col = 0; col < rhs.cols(); col++) {
- unsigned int k = lIt.col();
+ Index k = lIt.col();
Scalar tmp = 0;
while (k < stop) {
tmp +=
@@ -173,14 +173,14 @@
}
//Use matrix upper triangular part
- for (unsigned int lhscol = 0; lhscol < lhs.cols(); lhscol++) {
+ for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
typename _Lhs::InnerUpperIterator uIt(lhs, lhscol);
- const int stop = uIt.size() + uIt.row();
- for (unsigned int rhscol = 0; rhscol < rhs.cols(); rhscol++) {
+ const Index stop = uIt.size() + uIt.row();
+ for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol);
- unsigned int k = uIt.row();
+ Index k = uIt.row();
while (k < stop) {
dst(k++, rhscol) +=
uIt.value() *
@@ -210,19 +210,19 @@
};
//Use matrix diagonal part <- Improvement : use inner iterator on dense matrix.
- for (unsigned int col = 0; col < rhs.cols(); col++) {
- for (unsigned int row = 0; row < lhs.rows(); row++) {
+ for (Index col = 0; col < rhs.cols(); col++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
dst(row, col) = lhs.coeffDiag(row) * rhs(row, col);
}
}
//Use matrix upper triangular part
- for (unsigned int row = 0; row < lhs.rows(); row++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
typename _Lhs::InnerUpperIterator uIt(lhs, row);
- const int stop = uIt.col() + uIt.size();
- for (unsigned int col = 0; col < rhs.cols(); col++) {
+ const Index stop = uIt.col() + uIt.size();
+ for (Index col = 0; col < rhs.cols(); col++) {
- unsigned int k = uIt.col();
+ Index k = uIt.col();
Scalar tmp = 0;
while (k < stop) {
tmp +=
@@ -238,13 +238,13 @@
}
//Use matrix lower triangular part
- for (unsigned int lhscol = 0; lhscol < lhs.cols(); lhscol++) {
+ for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
typename _Lhs::InnerLowerIterator lIt(lhs, lhscol);
- const int stop = lIt.size() + lIt.row();
- for (unsigned int rhscol = 0; rhscol < rhs.cols(); rhscol++) {
+ const Index stop = lIt.size() + lIt.row();
+ for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol);
- unsigned int k = lIt.row();
+ Index k = lIt.row();
while (k < stop) {
dst(k++, rhscol) +=
lIt.value() *
diff --git a/unsupported/Eigen/src/Skyline/SkylineStorage.h b/unsupported/Eigen/src/Skyline/SkylineStorage.h
index 641508f..13f3e0c 100644
--- a/unsupported/Eigen/src/Skyline/SkylineStorage.h
+++ b/unsupported/Eigen/src/Skyline/SkylineStorage.h
@@ -34,6 +34,7 @@
template<typename Scalar>
class SkylineStorage {
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef SparseIndex Index;
public:
SkylineStorage()
@@ -70,8 +71,8 @@
memcpy(m_diag, other.m_diag, m_diagSize * sizeof (Scalar));
memcpy(m_upper, other.m_upper, other.upperSize() * sizeof (Scalar));
memcpy(m_lower, other.m_lower, other.lowerSize() * sizeof (Scalar));
- memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (int));
- memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (int));
+ memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (Index));
+ memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (Index));
return *this;
}
@@ -96,8 +97,8 @@
delete[] m_lowerProfile;
}
- void reserve(size_t size, size_t upperProfileSize, size_t lowerProfileSize, size_t upperSize, size_t lowerSize) {
- int newAllocatedSize = size + upperSize + lowerSize;
+ void reserve(Index size, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) {
+ Index newAllocatedSize = size + upperSize + lowerSize;
if (newAllocatedSize > m_allocatedSize)
reallocate(size, upperProfileSize, lowerProfileSize, upperSize, lowerSize);
}
@@ -107,9 +108,9 @@
reallocate(m_diagSize, m_upperProfileSize, m_lowerProfileSize, m_upperSize, m_lowerSize);
}
- void resize(size_t diagSize, size_t upperProfileSize, size_t lowerProfileSize, size_t upperSize, size_t lowerSize, float reserveSizeFactor = 0) {
+ void resize(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize, float reserveSizeFactor = 0) {
if (m_allocatedSize < diagSize + upperSize + lowerSize)
- reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + size_t(reserveSizeFactor * upperSize), lowerSize + size_t(reserveSizeFactor * lowerSize));
+ reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + Index(reserveSizeFactor * upperSize), lowerSize + Index(reserveSizeFactor * lowerSize));
m_diagSize = diagSize;
m_upperSize = upperSize;
m_lowerSize = lowerSize;
@@ -117,27 +118,27 @@
m_lowerProfileSize = lowerProfileSize;
}
- inline size_t diagSize() const {
+ inline Index diagSize() const {
return m_diagSize;
}
- inline size_t upperSize() const {
+ inline Index upperSize() const {
return m_upperSize;
}
- inline size_t lowerSize() const {
+ inline Index lowerSize() const {
return m_lowerSize;
}
- inline size_t upperProfileSize() const {
+ inline Index upperProfileSize() const {
return m_upperProfileSize;
}
- inline size_t lowerProfileSize() const {
+ inline Index lowerProfileSize() const {
return m_lowerProfileSize;
}
- inline size_t allocatedSize() const {
+ inline Index allocatedSize() const {
return m_allocatedSize;
}
@@ -145,47 +146,47 @@
m_diagSize = 0;
}
- inline Scalar& diag(size_t i) {
+ inline Scalar& diag(Index i) {
return m_diag[i];
}
- inline const Scalar& diag(size_t i) const {
+ inline const Scalar& diag(Index i) const {
return m_diag[i];
}
- inline Scalar& upper(size_t i) {
+ inline Scalar& upper(Index i) {
return m_upper[i];
}
- inline const Scalar& upper(size_t i) const {
+ inline const Scalar& upper(Index i) const {
return m_upper[i];
}
- inline Scalar& lower(size_t i) {
+ inline Scalar& lower(Index i) {
return m_lower[i];
}
- inline const Scalar& lower(size_t i) const {
+ inline const Scalar& lower(Index i) const {
return m_lower[i];
}
- inline int& upperProfile(size_t i) {
+ inline Index& upperProfile(Index i) {
return m_upperProfile[i];
}
- inline const int& upperProfile(size_t i) const {
+ inline const Index& upperProfile(Index i) const {
return m_upperProfile[i];
}
- inline int& lowerProfile(size_t i) {
+ inline Index& lowerProfile(Index i) {
return m_lowerProfile[i];
}
- inline const int& lowerProfile(size_t i) const {
+ inline const Index& lowerProfile(Index i) const {
return m_lowerProfile[i];
}
- static SkylineStorage Map(int* upperProfile, int* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, size_t size, size_t upperSize, size_t lowerSize) {
+ static SkylineStorage Map(Index* upperProfile, Index* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, Index size, Index upperSize, Index lowerSize) {
SkylineStorage res;
res.m_upperProfile = upperProfile;
res.m_lowerProfile = lowerProfile;
@@ -202,8 +203,8 @@
memset(m_diag, 0, m_diagSize * sizeof (Scalar));
memset(m_upper, 0, m_upperSize * sizeof (Scalar));
memset(m_lower, 0, m_lowerSize * sizeof (Scalar));
- memset(m_upperProfile, 0, m_diagSize * sizeof (int));
- memset(m_lowerProfile, 0, m_diagSize * sizeof (int));
+ memset(m_upperProfile, 0, m_diagSize * sizeof (Index));
+ memset(m_lowerProfile, 0, m_diagSize * sizeof (Index));
}
void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>()) {
@@ -212,26 +213,26 @@
protected:
- inline void reallocate(size_t diagSize, size_t upperProfileSize, size_t lowerProfileSize, size_t upperSize, size_t lowerSize) {
+ inline void reallocate(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) {
Scalar* diag = new Scalar[diagSize];
Scalar* upper = new Scalar[upperSize];
Scalar* lower = new Scalar[lowerSize];
- int* upperProfile = new int[upperProfileSize];
- int* lowerProfile = new int[lowerProfileSize];
+ Index* upperProfile = new Index[upperProfileSize];
+ Index* lowerProfile = new Index[lowerProfileSize];
- size_t copyDiagSize = std::min(diagSize, m_diagSize);
- size_t copyUpperSize = std::min(upperSize, m_upperSize);
- size_t copyLowerSize = std::min(lowerSize, m_lowerSize);
- size_t copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize);
- size_t copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize);
+ Index copyDiagSize = std::min(diagSize, m_diagSize);
+ Index copyUpperSize = std::min(upperSize, m_upperSize);
+ Index copyLowerSize = std::min(lowerSize, m_lowerSize);
+ Index copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize);
+ Index copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize);
// copy
memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
memcpy(upper, m_upper, copyUpperSize * sizeof (Scalar));
memcpy(lower, m_lower, copyLowerSize * sizeof (Scalar));
- memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (int));
- memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (int));
+ memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (Index));
+ memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (Index));
@@ -255,14 +256,14 @@
Scalar* m_diag;
Scalar* m_upper;
Scalar* m_lower;
- int* m_upperProfile;
- int* m_lowerProfile;
- size_t m_diagSize;
- size_t m_upperSize;
- size_t m_lowerSize;
- size_t m_upperProfileSize;
- size_t m_lowerProfileSize;
- size_t m_allocatedSize;
+ Index* m_upperProfile;
+ Index* m_lowerProfile;
+ Index m_diagSize;
+ Index m_upperSize;
+ Index m_lowerSize;
+ Index m_upperProfileSize;
+ Index m_lowerProfileSize;
+ Index m_allocatedSize;
};
diff --git a/unsupported/Eigen/src/Skyline/SkylineUtil.h b/unsupported/Eigen/src/Skyline/SkylineUtil.h
index 71563ad..7781c33 100644
--- a/unsupported/Eigen/src/Skyline/SkylineUtil.h
+++ b/unsupported/Eigen/src/Skyline/SkylineUtil.h
@@ -56,20 +56,22 @@
}
#define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
-EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
-EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
-EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
-EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
-EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
#define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
-typedef BaseClass Base; \
-typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
-typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
-enum { Flags = Eigen::ei_traits<Derived>::Flags, };
+ typedef BaseClass Base; \
+ typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::ei_index<StorageKind>::type Index; \
+ enum { Flags = Eigen::ei_traits<Derived>::Flags, };
#define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \
-_EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase<Derived>)
+ _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase<Derived>)
template<typename Derived> class SkylineMatrixBase;
template<typename _Scalar, int _Flags = 0> class SkylineMatrix;
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index 3867c31..0a609f7 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -558,7 +558,7 @@
// do the computation
lmdif_functor functor;
- int nfev;
+ DenseIndex nfev;
info = LevenbergMarquardt<lmdif_functor>::lmdif1(functor, x, &nfev);
// check return value