ADD: added other eigen lib
This commit is contained in:
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_AMBIVECTOR_H
|
||||
#define EIGEN_AMBIVECTOR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -19,12 +21,12 @@ namespace internal {
|
||||
*
|
||||
* See BasicSparseLLT and SparseProduct for usage examples.
|
||||
*/
|
||||
template<typename _Scalar, typename _StorageIndex>
|
||||
template<typename Scalar_, typename StorageIndex_>
|
||||
class AmbiVector
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
explicit AmbiVector(Index size)
|
||||
@@ -125,8 +127,8 @@ class AmbiVector
|
||||
};
|
||||
|
||||
/** \returns the number of non zeros in the current sub vector */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
Index AmbiVector<Scalar_,StorageIndex_>::nonZeros() const
|
||||
{
|
||||
if (m_mode==IsSparse)
|
||||
return m_llSize;
|
||||
@@ -134,8 +136,8 @@ Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
|
||||
return m_end - m_start;
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
void AmbiVector<Scalar_,StorageIndex_>::init(double estimatedDensity)
|
||||
{
|
||||
if (estimatedDensity>0.1)
|
||||
init(IsDense);
|
||||
@@ -143,8 +145,8 @@ void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
|
||||
init(IsSparse);
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
void AmbiVector<Scalar_,StorageIndex_>::init(int mode)
|
||||
{
|
||||
m_mode = mode;
|
||||
// This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
|
||||
@@ -160,15 +162,15 @@ void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
|
||||
*
|
||||
* Don't worry, this function is extremely cheap.
|
||||
*/
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::restart()
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
void AmbiVector<Scalar_,StorageIndex_>::restart()
|
||||
{
|
||||
m_llCurrent = m_llStart;
|
||||
}
|
||||
|
||||
/** Set all coefficients of current subvector to zero */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::setZero()
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
void AmbiVector<Scalar_,StorageIndex_>::setZero()
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
{
|
||||
@@ -183,8 +185,8 @@ void AmbiVector<_Scalar,_StorageIndex>::setZero()
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
Scalar_& AmbiVector<Scalar_,StorageIndex_>::coeffRef(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@@ -252,8 +254,8 @@ _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
Scalar_& AmbiVector<Scalar_,StorageIndex_>::coeff(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@@ -280,11 +282,11 @@ _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
|
||||
}
|
||||
|
||||
/** Iterator over the nonzero coefficients */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
class AmbiVector<_Scalar,_StorageIndex>::Iterator
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
class AmbiVector<Scalar_,StorageIndex_>::Iterator
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** Default constructor
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_COMPRESSED_STORAGE_H
|
||||
#define EIGEN_COMPRESSED_STORAGE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -18,13 +20,13 @@ namespace internal {
|
||||
* Stores a sparse set of values as a list of values and a list of indices.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
template<typename Scalar_,typename StorageIndex_>
|
||||
class CompressedStorage
|
||||
{
|
||||
public:
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
|
||||
protected:
|
||||
|
||||
@@ -69,8 +71,8 @@ class CompressedStorage
|
||||
|
||||
~CompressedStorage()
|
||||
{
|
||||
delete[] m_values;
|
||||
delete[] m_indices;
|
||||
conditional_aligned_delete_auto<Scalar, true>(m_values, m_allocatedSize);
|
||||
conditional_aligned_delete_auto<StorageIndex, true>(m_indices, m_allocatedSize);
|
||||
}
|
||||
|
||||
void reserve(Index size)
|
||||
@@ -178,24 +180,13 @@ class CompressedStorage
|
||||
{
|
||||
if (m_allocatedSize<m_size+1)
|
||||
{
|
||||
m_allocatedSize = 2*(m_size+1);
|
||||
internal::scoped_array<Scalar> newValues(m_allocatedSize);
|
||||
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
|
||||
|
||||
// copy first chunk
|
||||
internal::smart_copy(m_values, m_values +id, newValues.ptr());
|
||||
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
|
||||
|
||||
// copy the rest
|
||||
if(m_size>id)
|
||||
{
|
||||
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
|
||||
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
|
||||
}
|
||||
std::swap(m_values,newValues.ptr());
|
||||
std::swap(m_indices,newIndices.ptr());
|
||||
Index newAllocatedSize = 2 * (m_size + 1);
|
||||
m_values = conditional_aligned_realloc_new_auto<Scalar, true>(m_values, newAllocatedSize, m_allocatedSize);
|
||||
m_indices =
|
||||
conditional_aligned_realloc_new_auto<StorageIndex, true>(m_indices, newAllocatedSize, m_allocatedSize);
|
||||
m_allocatedSize = newAllocatedSize;
|
||||
}
|
||||
else if(m_size>id)
|
||||
if(m_size>id)
|
||||
{
|
||||
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
|
||||
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
||||
@@ -223,22 +214,6 @@ class CompressedStorage
|
||||
}
|
||||
}
|
||||
|
||||
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
Index k = 0;
|
||||
Index n = size();
|
||||
for (Index i=0; i<n; ++i)
|
||||
{
|
||||
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
|
||||
{
|
||||
value(k) = value(i);
|
||||
index(k) = index(i);
|
||||
++k;
|
||||
}
|
||||
}
|
||||
resize(k,0);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
inline void reallocate(Index size)
|
||||
@@ -247,15 +222,8 @@ class CompressedStorage
|
||||
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
||||
#endif
|
||||
eigen_internal_assert(size!=m_allocatedSize);
|
||||
internal::scoped_array<Scalar> newValues(size);
|
||||
internal::scoped_array<StorageIndex> newIndices(size);
|
||||
Index copySize = (std::min)(size, m_size);
|
||||
if (copySize>0) {
|
||||
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
|
||||
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
|
||||
}
|
||||
std::swap(m_values,newValues.ptr());
|
||||
std::swap(m_indices,newIndices.ptr());
|
||||
m_values = conditional_aligned_realloc_new_auto<Scalar, true>(m_values, size, m_allocatedSize);
|
||||
m_indices = conditional_aligned_realloc_new_auto<StorageIndex, true>(m_indices, size, m_allocatedSize);
|
||||
m_allocatedSize = size;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
||||
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -17,9 +19,9 @@ namespace internal {
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
|
||||
typedef typename remove_all_t<Lhs>::Scalar LhsScalar;
|
||||
typedef typename remove_all_t<Rhs>::Scalar RhsScalar;
|
||||
typedef typename remove_all_t<ResultType>::Scalar ResScalar;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
@@ -124,6 +126,11 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
||||
// Helper template to generate new sparse matrix types
|
||||
template<class Source, int Order>
|
||||
using WithStorageOrder = SparseMatrix<typename Source::Scalar, Order, typename Source::StorageIndex>;
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType,
|
||||
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
||||
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
||||
@@ -133,20 +140,20 @@ struct conservative_sparse_sparse_product_selector;
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type LhsCleaned;
|
||||
typedef remove_all_t<Lhs> LhsCleaned;
|
||||
typedef typename LhsCleaned::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
|
||||
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
|
||||
using RowMajorMatrix = WithStorageOrder<ResultType, RowMajor>;
|
||||
using ColMajorMatrixAux = WithStorageOrder<ResultType, ColMajor>;
|
||||
|
||||
// If the result is tall and thin (in the extreme case a column vector)
|
||||
// then it is faster to sort the coefficients inplace instead of transposing twice.
|
||||
// FIXME, the following heuristic is probably not very good.
|
||||
if(lhs.rows()>rhs.cols())
|
||||
{
|
||||
using ColMajorMatrix = typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type;
|
||||
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
|
||||
// perform sorted insertion
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
|
||||
@@ -168,8 +175,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
||||
using RowMajorRhs = WithStorageOrder<Rhs, RowMajor>;
|
||||
using RowMajorRes = WithStorageOrder<ResultType, RowMajor>;
|
||||
RowMajorRhs rhsRow = rhs;
|
||||
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
|
||||
@@ -182,8 +189,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
||||
using RowMajorLhs = WithStorageOrder<Lhs, RowMajor>;
|
||||
using RowMajorRes = WithStorageOrder<ResultType, RowMajor>;
|
||||
RowMajorLhs lhsRow = lhs;
|
||||
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
|
||||
@@ -196,9 +203,9 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
using RowMajorRes = WithStorageOrder<ResultType, RowMajor>;
|
||||
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorRes>(rhs, lhs, resRow);
|
||||
res = resRow;
|
||||
}
|
||||
};
|
||||
@@ -207,13 +214,13 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
|
||||
{
|
||||
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
|
||||
typedef typename traits<remove_all_t<Lhs>>::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
|
||||
using ColMajorRes = WithStorageOrder<ResultType, ColMajor>;
|
||||
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorRes>(lhs, rhs, resCol);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
@@ -223,8 +230,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
||||
using ColMajorLhs = WithStorageOrder<Lhs, ColMajor>;
|
||||
using ColMajorRes = WithStorageOrder<ResultType, ColMajor>;
|
||||
ColMajorLhs lhsCol = lhs;
|
||||
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
|
||||
@@ -237,8 +244,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
||||
using ColMajorRhs = WithStorageOrder<Rhs, ColMajor>;
|
||||
using ColMajorRes = WithStorageOrder<ResultType, ColMajor>;
|
||||
ColMajorRhs rhsCol = rhs;
|
||||
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
|
||||
@@ -251,12 +258,12 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
using ColMajorRes = WithStorageOrder<ResultType, ColMajor>;
|
||||
using RowMajorRes = WithStorageOrder<ResultType, RowMajor>;
|
||||
RowMajorRes resRow(lhs.rows(),rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorRes>(rhs, lhs, resRow);
|
||||
// sort the non zeros:
|
||||
ColMajorMatrix resCol(resRow);
|
||||
ColMajorRes resCol(resRow);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
@@ -269,8 +276,8 @@ namespace internal {
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
typedef typename remove_all_t<Lhs>::Scalar LhsScalar;
|
||||
typedef typename remove_all_t<Rhs>::Scalar RhsScalar;
|
||||
Index cols = rhs.outerSize();
|
||||
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
@@ -317,7 +324,7 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMa
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
||||
using ColMajorLhs = WithStorageOrder<Lhs, ColMajor>;
|
||||
ColMajorLhs lhsCol(lhs);
|
||||
internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
|
||||
}
|
||||
@@ -328,7 +335,7 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMa
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
||||
using ColMajorRhs = WithStorageOrder<Rhs, ColMajor>;
|
||||
ColMajorRhs rhsCol(rhs);
|
||||
internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
|
||||
}
|
||||
|
||||
3
libs/eigen/Eigen/src/SparseCore/InternalHeaderCheck.h
Normal file
3
libs/eigen/Eigen/src/SparseCore/InternalHeaderCheck.h
Normal file
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_SPARSECORE_MODULE_H
|
||||
#error "Please include Eigen/SparseCore instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -1,67 +0,0 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
|
||||
#define EIGEN_MAPPED_SPARSEMATRIX_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \deprecated Use Map<SparseMatrix<> >
|
||||
* \class MappedSparseMatrix
|
||||
*
|
||||
* \brief Sparse matrix
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{};
|
||||
} // end namespace internal
|
||||
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
class MappedSparseMatrix
|
||||
: public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{
|
||||
typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
|
||||
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
|
||||
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~MappedSparseMatrix() {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
: evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
|
||||
{
|
||||
typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEASSIGN_H
|
||||
#define EIGEN_SPARSEASSIGN_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
@@ -78,12 +80,18 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
||||
|
||||
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
|
||||
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
|
||||
|
||||
Index reserveSize = 0;
|
||||
for (Index j = 0; j < outerEvaluationSize; ++j)
|
||||
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
|
||||
reserveSize++;
|
||||
|
||||
if ((!transpose) && src.isRValue())
|
||||
{
|
||||
// eval without temporary
|
||||
dst.resize(src.rows(), src.cols());
|
||||
dst.setZero();
|
||||
dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
||||
dst.reserve(reserveSize);
|
||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||
{
|
||||
dst.startVec(j);
|
||||
@@ -107,7 +115,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
||||
|
||||
DstXprType temp(src.rows(), src.cols());
|
||||
|
||||
temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
||||
temp.reserve(reserveSize);
|
||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
@@ -172,7 +180,7 @@ struct assignment_from_dense_op_sparse
|
||||
// Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
|
||||
template<typename Lhs, typename Rhs, typename Scalar>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
|
||||
std::enable_if_t<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>
|
||||
run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
|
||||
const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
|
||||
{
|
||||
@@ -188,7 +196,7 @@ struct assignment_from_dense_op_sparse
|
||||
// Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
|
||||
template<typename Lhs, typename Rhs, typename Scalar>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
|
||||
std::enable_if_t<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>
|
||||
run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
|
||||
const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
|
||||
{
|
||||
@@ -206,8 +214,8 @@ struct assignment_from_dense_op_sparse
|
||||
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
|
||||
struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
|
||||
Sparse2Dense, \
|
||||
typename internal::enable_if< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
|
||||
|| internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type> \
|
||||
std::enable_if_t< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
|
||||
|| internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>> \
|
||||
: assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
|
||||
{}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_BLOCK_H
|
||||
#define EIGEN_SPARSE_BLOCK_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// Subset of columns or rows
|
||||
@@ -17,7 +19,7 @@ template<typename XprType, int BlockRows, int BlockCols>
|
||||
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
|
||||
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
|
||||
{
|
||||
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
|
||||
typedef internal::remove_all_t<typename XprType::Nested> MatrixTypeNested_;
|
||||
typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
|
||||
public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
@@ -96,7 +98,7 @@ template<typename SparseMatrixType, int BlockRows, int BlockCols>
|
||||
class sparse_matrix_block_impl
|
||||
: public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
|
||||
{
|
||||
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
|
||||
typedef internal::remove_all_t<typename SparseMatrixType::Nested> MatrixTypeNested_;
|
||||
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
|
||||
typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
|
||||
using Base::convert_index;
|
||||
@@ -119,8 +121,8 @@ public:
|
||||
template<typename OtherDerived>
|
||||
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
|
||||
_NestedMatrixType& matrix = m_matrix;
|
||||
typedef internal::remove_all_t<typename SparseMatrixType::Nested> NestedMatrixType_;
|
||||
NestedMatrixType_& matrix = m_matrix;
|
||||
// This assignment is slow if this vector set is not empty
|
||||
// and/or it is not at the end of the nonzeros of the underlying matrix.
|
||||
|
||||
@@ -283,13 +285,13 @@ public:
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
|
||||
class BlockImpl<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
@@ -302,13 +304,13 @@ public:
|
||||
using Base::operator=;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
|
||||
class BlockImpl<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef const SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
@@ -340,7 +342,7 @@ public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
|
||||
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
|
||||
typedef internal::remove_all_t<typename XprType::Nested> MatrixTypeNested_;
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
@@ -429,17 +431,12 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
|
||||
|
||||
enum {
|
||||
IsRowMajor = XprType::IsRowMajor,
|
||||
|
||||
OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
|
||||
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
|
||||
// revert to || as soon as not needed anymore.
|
||||
(BlockRows==1 && !ArgType::IsRowMajor),
|
||||
|
||||
OuterVector = (BlockCols == 1 && ArgType::IsRowMajor) || (BlockRows == 1 && !ArgType::IsRowMajor),
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
|
||||
typedef std::conditional_t<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator> InnerIterator;
|
||||
|
||||
explicit unary_evaluator(const XprType& op)
|
||||
: m_argImpl(op.nestedExpression()), m_block(op)
|
||||
@@ -467,7 +464,7 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
|
||||
: public EvalIterator
|
||||
{
|
||||
// NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
|
||||
// NOTE MSVC fails to compile if we don't explicitly "import" IsRowMajor from unary_evaluator
|
||||
// because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
|
||||
// NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
|
||||
enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
|
||||
@@ -533,8 +530,8 @@ public:
|
||||
while(++m_outerPos<m_end)
|
||||
{
|
||||
// Restart iterator at the next inner-vector:
|
||||
m_it.~EvalIterator();
|
||||
::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
|
||||
internal::destroy_at(&m_it);
|
||||
internal::construct_at(&m_it, m_eval.m_argImpl, m_outerPos);
|
||||
// search for the key m_innerIndex in the current outer-vector
|
||||
while(m_it && m_it.index() < m_innerIndex) ++m_it;
|
||||
if(m_it && m_it.index()==m_innerIndex) break;
|
||||
@@ -545,20 +542,20 @@ public:
|
||||
inline operator bool() const { return m_outerPos < m_end; }
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> > >
|
||||
{
|
||||
typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
|
||||
typedef Block<SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> > >
|
||||
{
|
||||
typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
|
||||
typedef Block<const SparseMatrix<Scalar_, Options_, StorageIndex_>,BlockRows,BlockCols,true> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
@@ -31,6 +31,8 @@
|
||||
#ifndef SPARSE_COLETREE_H
|
||||
#define SPARSE_COLETREE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
|
||||
#define EIGEN_SPARSE_COMPRESSED_BASE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived> class SparseCompressedBase;
|
||||
@@ -20,6 +22,9 @@ template<typename Derived>
|
||||
struct traits<SparseCompressedBase<Derived> > : traits<Derived>
|
||||
{};
|
||||
|
||||
template <typename Derived, class Comp, bool IsVector>
|
||||
struct inner_sort_impl;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -124,6 +129,40 @@ class SparseCompressedBase
|
||||
*
|
||||
* \sa valuePtr(), isCompressed() */
|
||||
Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
|
||||
|
||||
/** sorts the inner vectors in the range [begin,end) with respect to `Comp`
|
||||
* \sa innerIndicesAreSorted() */
|
||||
template <class Comp = std::less<>>
|
||||
inline void sortInnerIndices(Index begin, Index end) {
|
||||
eigen_assert(begin >= 0 && end <= derived().outerSize() && end >= begin);
|
||||
internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::run(*this, begin, end);
|
||||
}
|
||||
|
||||
/** \returns the index of the first inner vector in the range [begin,end) that is not sorted with respect to `Comp`, or `end` if the range is fully sorted
|
||||
* \sa sortInnerIndices() */
|
||||
template <class Comp = std::less<>>
|
||||
inline Index innerIndicesAreSorted(Index begin, Index end) const {
|
||||
eigen_assert(begin >= 0 && end <= derived().outerSize() && end >= begin);
|
||||
return internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::check(*this, begin, end);
|
||||
}
|
||||
|
||||
/** sorts the inner vectors in the range [0,outerSize) with respect to `Comp`
|
||||
* \sa innerIndicesAreSorted() */
|
||||
template <class Comp = std::less<>>
|
||||
inline void sortInnerIndices() {
|
||||
Index begin = 0;
|
||||
Index end = derived().outerSize();
|
||||
internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::run(*this, begin, end);
|
||||
}
|
||||
|
||||
/** \returns the index of the first inner vector in the range [0,outerSize) that is not sorted with respect to `Comp`, or `outerSize` if the range is fully sorted
|
||||
* \sa sortInnerIndices() */
|
||||
template<class Comp = std::less<>>
|
||||
inline Index innerIndicesAreSorted() const {
|
||||
Index begin = 0;
|
||||
Index end = derived().outerSize();
|
||||
return internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::check(*this, begin, end);
|
||||
}
|
||||
|
||||
protected:
|
||||
/** Default constructor. Do nothing. */
|
||||
@@ -194,8 +233,7 @@ class SparseCompressedBase<Derived>::InnerIterator
|
||||
}
|
||||
}
|
||||
|
||||
explicit InnerIterator(const SparseCompressedBase& mat)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())
|
||||
explicit InnerIterator(const SparseCompressedBase& mat) : InnerIterator(mat, Index(0))
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
}
|
||||
@@ -305,6 +343,138 @@ class SparseCompressedBase<Derived>::ReverseInnerIterator
|
||||
|
||||
namespace internal {
|
||||
|
||||
// modified from https://artificial-mind.net/blog/2020/11/28/std-sort-multiple-ranges
|
||||
template <typename Scalar, typename StorageIndex>
|
||||
class CompressedStorageIterator;
|
||||
|
||||
// wrapper class analogous to std::pair<StorageIndex&, Scalar&>
|
||||
// used to define assignment, swap, and comparison operators for CompressedStorageIterator
|
||||
template <typename Scalar, typename StorageIndex>
|
||||
class StorageRef
|
||||
{
|
||||
public:
|
||||
using value_type = std::pair<StorageIndex, Scalar>;
|
||||
|
||||
inline StorageRef& operator=(const StorageRef& other) {
|
||||
*m_innerIndexIterator = *other.m_innerIndexIterator;
|
||||
*m_valueIterator = *other.m_valueIterator;
|
||||
return *this;
|
||||
}
|
||||
inline StorageRef& operator=(const value_type& other) {
|
||||
std::tie(*m_innerIndexIterator, *m_valueIterator) = other;
|
||||
return *this;
|
||||
}
|
||||
inline operator value_type() const { return std::make_pair(*m_innerIndexIterator, *m_valueIterator); }
|
||||
inline friend void swap(const StorageRef& a, const StorageRef& b) {
|
||||
std::iter_swap(a.m_innerIndexIterator, b.m_innerIndexIterator);
|
||||
std::iter_swap(a.m_valueIterator, b.m_valueIterator);
|
||||
}
|
||||
|
||||
inline static const StorageIndex& key(const StorageRef& a) { return *a.m_innerIndexIterator; }
|
||||
inline static const StorageIndex& key(const value_type& a) { return a.first; }
|
||||
#define REF_COMP_REF(OP) inline friend bool operator OP(const StorageRef& a, const StorageRef& b) { return key(a) OP key(b); };
|
||||
#define REF_COMP_VAL(OP) inline friend bool operator OP(const StorageRef& a, const value_type& b) { return key(a) OP key(b); };
|
||||
#define VAL_COMP_REF(OP) inline friend bool operator OP(const value_type& a, const StorageRef& b) { return key(a) OP key(b); };
|
||||
#define MAKE_COMPS(OP) REF_COMP_REF(OP) REF_COMP_VAL(OP) VAL_COMP_REF(OP)
|
||||
MAKE_COMPS(<) MAKE_COMPS(>) MAKE_COMPS(<=) MAKE_COMPS(>=) MAKE_COMPS(==) MAKE_COMPS(!=)
|
||||
|
||||
protected:
|
||||
StorageIndex* m_innerIndexIterator;
|
||||
Scalar* m_valueIterator;
|
||||
private:
|
||||
StorageRef() = delete;
|
||||
// these constructors are only called by the CompressedStorageIterator constructors for convenience only
|
||||
StorageRef(StorageIndex* innerIndexIterator, Scalar* valueIterator) : m_innerIndexIterator(innerIndexIterator), m_valueIterator(valueIterator) {}
|
||||
StorageRef(const StorageRef& other) : m_innerIndexIterator(other.m_innerIndexIterator), m_valueIterator(other.m_valueIterator) {}
|
||||
|
||||
friend class CompressedStorageIterator<Scalar, StorageIndex>;
|
||||
};
|
||||
|
||||
// STL-compatible iterator class that operates on inner indices and values
|
||||
template<typename Scalar, typename StorageIndex>
|
||||
class CompressedStorageIterator
|
||||
{
|
||||
public:
|
||||
using iterator_category = std::random_access_iterator_tag;
|
||||
using reference = StorageRef<Scalar, StorageIndex>;
|
||||
using difference_type = Index;
|
||||
using value_type = typename reference::value_type;
|
||||
using pointer = value_type*;
|
||||
|
||||
CompressedStorageIterator() = delete;
|
||||
CompressedStorageIterator(difference_type index, StorageIndex* innerIndexPtr, Scalar* valuePtr) : m_index(index), m_data(innerIndexPtr, valuePtr) {}
|
||||
CompressedStorageIterator(difference_type index, reference data) : m_index(index), m_data(data) {}
|
||||
CompressedStorageIterator(const CompressedStorageIterator& other) : m_index(other.m_index), m_data(other.m_data) {}
|
||||
inline CompressedStorageIterator& operator=(const CompressedStorageIterator& other) {
|
||||
m_index = other.m_index;
|
||||
m_data = other.m_data;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline CompressedStorageIterator operator+(difference_type offset) const { return CompressedStorageIterator(m_index + offset, m_data); }
|
||||
inline CompressedStorageIterator operator-(difference_type offset) const { return CompressedStorageIterator(m_index - offset, m_data); }
|
||||
inline difference_type operator-(const CompressedStorageIterator& other) const { return m_index - other.m_index; }
|
||||
inline CompressedStorageIterator& operator++() { ++m_index; return *this; }
|
||||
inline CompressedStorageIterator& operator--() { --m_index; return *this; }
|
||||
inline CompressedStorageIterator& operator+=(difference_type offset) { m_index += offset; return *this; }
|
||||
inline CompressedStorageIterator& operator-=(difference_type offset) { m_index -= offset; return *this; }
|
||||
inline reference operator*() const { return reference(m_data.m_innerIndexIterator + m_index, m_data.m_valueIterator + m_index); }
|
||||
|
||||
#define MAKE_COMP(OP) inline bool operator OP(const CompressedStorageIterator& other) const { return m_index OP other.m_index; }
|
||||
MAKE_COMP(<) MAKE_COMP(>) MAKE_COMP(>=) MAKE_COMP(<=) MAKE_COMP(!=) MAKE_COMP(==)
|
||||
|
||||
protected:
|
||||
difference_type m_index;
|
||||
reference m_data;
|
||||
};
|
||||
|
||||
template <typename Derived, class Comp, bool IsVector>
|
||||
struct inner_sort_impl {
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename Derived::StorageIndex StorageIndex;
|
||||
static inline void run(SparseCompressedBase<Derived>& obj, Index begin, Index end) {
|
||||
const bool is_compressed = obj.isCompressed();
|
||||
for (Index outer = begin; outer < end; outer++) {
|
||||
Index begin_offset = obj.outerIndexPtr()[outer];
|
||||
Index end_offset = is_compressed ? obj.outerIndexPtr()[outer + 1] : (begin_offset + obj.innerNonZeroPtr()[outer]);
|
||||
CompressedStorageIterator<Scalar, StorageIndex> begin_it(begin_offset, obj.innerIndexPtr(), obj.valuePtr());
|
||||
CompressedStorageIterator<Scalar, StorageIndex> end_it(end_offset, obj.innerIndexPtr(), obj.valuePtr());
|
||||
std::sort(begin_it, end_it, Comp());
|
||||
}
|
||||
}
|
||||
static inline Index check(const SparseCompressedBase<Derived>& obj, Index begin, Index end) {
|
||||
const bool is_compressed = obj.isCompressed();
|
||||
for (Index outer = begin; outer < end; outer++) {
|
||||
Index begin_offset = obj.outerIndexPtr()[outer];
|
||||
Index end_offset = is_compressed ? obj.outerIndexPtr()[outer + 1] : (begin_offset + obj.innerNonZeroPtr()[outer]);
|
||||
const StorageIndex* begin_it = obj.innerIndexPtr() + begin_offset;
|
||||
const StorageIndex* end_it = obj.innerIndexPtr() + end_offset;
|
||||
bool is_sorted = std::is_sorted(begin_it, end_it, Comp());
|
||||
if (!is_sorted) return outer;
|
||||
}
|
||||
return end;
|
||||
}
|
||||
};
|
||||
template <typename Derived, class Comp>
|
||||
struct inner_sort_impl<Derived, Comp, true> {
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename Derived::StorageIndex StorageIndex;
|
||||
static inline void run(SparseCompressedBase<Derived>& obj, Index, Index) {
|
||||
Index begin_offset = 0;
|
||||
Index end_offset = obj.nonZeros();
|
||||
CompressedStorageIterator<Scalar, StorageIndex> begin_it(begin_offset, obj.innerIndexPtr(), obj.valuePtr());
|
||||
CompressedStorageIterator<Scalar, StorageIndex> end_it(end_offset, obj.innerIndexPtr(), obj.valuePtr());
|
||||
std::sort(begin_it, end_it, Comp());
|
||||
}
|
||||
static inline Index check(const SparseCompressedBase<Derived>& obj, Index, Index) {
|
||||
Index begin_offset = 0;
|
||||
Index end_offset = obj.nonZeros();
|
||||
const StorageIndex* begin_it = obj.innerIndexPtr() + begin_offset;
|
||||
const StorageIndex* end_it = obj.innerIndexPtr() + end_offset;
|
||||
return std::is_sorted(begin_it, end_it, Comp()) ? 1 : 0;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct evaluator<SparseCompressedBase<Derived> >
|
||||
: evaluator_base<Derived>
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
|
||||
#define EIGEN_SPARSE_CWISE_BINARY_OP_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// Here we have to handle 3 cases:
|
||||
@@ -40,14 +42,11 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
|
||||
typedef SparseMatrixBase<Derived> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
|
||||
CwiseBinaryOpImpl()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((
|
||||
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
|
||||
typename internal::traits<Rhs>::StorageKind>::value)
|
||||
|| ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
|
||||
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
|
||||
}
|
||||
EIGEN_STATIC_ASSERT((
|
||||
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
|
||||
typename internal::traits<Rhs>::StorageKind>::value)
|
||||
|| ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
|
||||
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH)
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
|
||||
#define EIGEN_SPARSEDENSEPRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -26,9 +28,9 @@ struct sparse_time_dense_product_impl;
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef internal::remove_all_t<SparseLhsType> Lhs;
|
||||
typedef internal::remove_all_t<DenseRhsType> Rhs;
|
||||
typedef internal::remove_all_t<DenseResType> Res;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
@@ -63,18 +65,26 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
|
||||
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
|
||||
{
|
||||
typename Res::Scalar tmp(0);
|
||||
for(LhsInnerIterator it(lhsEval,i); it ;++it)
|
||||
tmp += it.value() * rhs.coeff(it.index(),col);
|
||||
res.coeffRef(i,col) += alpha * tmp;
|
||||
// Two accumulators, which breaks the dependency chain on the accumulator
|
||||
// and allows more instruction-level parallelism in the following loop
|
||||
typename Res::Scalar tmp_a(0);
|
||||
typename Res::Scalar tmp_b(0);
|
||||
for(LhsInnerIterator it(lhsEval,i); it ;++it) {
|
||||
tmp_a += it.value() * rhs.coeff(it.index(), col);
|
||||
++it;
|
||||
if(it) {
|
||||
tmp_b += it.value() * rhs.coeff(it.index(), col);
|
||||
}
|
||||
}
|
||||
res.coeffRef(i, col) += alpha * (tmp_a + tmp_b);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
|
||||
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
|
||||
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
|
||||
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
|
||||
// template<typename T1, typename T2/*, int Options_, typename StrideType_*/>
|
||||
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, Options_, StrideType_*/> >
|
||||
// {
|
||||
// enum {
|
||||
// Defined = 1
|
||||
@@ -85,9 +95,9 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef internal::remove_all_t<SparseLhsType> Lhs;
|
||||
typedef internal::remove_all_t<DenseRhsType> Rhs;
|
||||
typedef internal::remove_all_t<DenseResType> Res;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||
@@ -109,9 +119,9 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef internal::remove_all_t<SparseLhsType> Lhs;
|
||||
typedef internal::remove_all_t<DenseRhsType> Rhs;
|
||||
typedef internal::remove_all_t<DenseResType> Res;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
@@ -149,9 +159,9 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef internal::remove_all_t<SparseLhsType> Lhs;
|
||||
typedef internal::remove_all_t<DenseRhsType> Rhs;
|
||||
typedef internal::remove_all_t<DenseResType> Res;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
@@ -226,16 +236,16 @@ template<typename LhsT, typename RhsT, bool NeedToTranspose>
|
||||
struct sparse_dense_outer_product_evaluator
|
||||
{
|
||||
protected:
|
||||
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
|
||||
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
|
||||
typedef std::conditional_t<NeedToTranspose,RhsT,LhsT> Lhs1;
|
||||
typedef std::conditional_t<NeedToTranspose,LhsT,RhsT> ActualRhs;
|
||||
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
|
||||
|
||||
// if the actual left-hand side is a dense vector,
|
||||
// then build a sparse-view so that we can seamlessly iterate over it.
|
||||
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1, SparseView<Lhs1> >::type ActualLhs;
|
||||
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
|
||||
typedef std::conditional_t<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1, SparseView<Lhs1> > ActualLhs;
|
||||
typedef std::conditional_t<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1 const&, SparseView<Lhs1> > LhsArg;
|
||||
|
||||
typedef evaluator<ActualLhs> LhsEval;
|
||||
typedef evaluator<ActualRhs> RhsEval;
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
||||
#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// The product of a diagonal matrix with a sparse matrix can be easily
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_DOT_H
|
||||
#define EIGEN_SPARSE_DOT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_FUZZY_H
|
||||
#define EIGEN_SPARSE_FUZZY_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
@@ -17,9 +19,9 @@ template<typename OtherDerived>
|
||||
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
|
||||
{
|
||||
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
|
||||
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
|
||||
std::conditional_t<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
|
||||
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
|
||||
const PlainObject>::type actualB(other.derived());
|
||||
const PlainObject> actualB(other.derived());
|
||||
|
||||
return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_MAP_H
|
||||
#define EIGEN_SPARSE_MAP_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -58,12 +60,12 @@ class SparseMapBase<Derived,ReadOnlyAccessors>
|
||||
using Base::operator=;
|
||||
protected:
|
||||
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
Scalar *, const Scalar *>::type ScalarPointer;
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
StorageIndex *, const StorageIndex *>::type IndexPointer;
|
||||
typedef std::conditional_t<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
Scalar *, const Scalar *> ScalarPointer;
|
||||
typedef std::conditional_t<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
StorageIndex *, const StorageIndex *> IndexPointer;
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
@@ -237,6 +239,7 @@ class Map<SparseMatrixType>
|
||||
/** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
|
||||
* stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
|
||||
* If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
|
||||
* The inner indices must be sorted appropriately.
|
||||
*
|
||||
* This constructor is available only if \c SparseMatrixType is non-const.
|
||||
*
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEMATRIX_H
|
||||
#define EIGEN_SPARSEMATRIX_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -29,10 +31,10 @@ namespace Eigen {
|
||||
*
|
||||
* More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* \tparam Scalar_ the scalar type, i.e. the type of the coefficients
|
||||
* \tparam Options_ Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* is ColMajor or RowMajor. The default is 0 which means column-major.
|
||||
* \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
|
||||
* \tparam StorageIndex_ the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
|
||||
*
|
||||
* \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
|
||||
* whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
|
||||
@@ -43,11 +45,11 @@ namespace Eigen {
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@@ -55,21 +57,21 @@ struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
|
||||
ColsAtCompileTime = Dynamic,
|
||||
MaxRowsAtCompileTime = Dynamic,
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
|
||||
Flags = Options_ | NestByRefBit | LvalueBit | CompressedAccessBit,
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
|
||||
struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
|
||||
struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
|
||||
{
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
|
||||
typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
|
||||
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
|
||||
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef MatrixXpr XprKind;
|
||||
|
||||
enum {
|
||||
@@ -81,9 +83,9 @@ struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
|
||||
struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
|
||||
: public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
|
||||
struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
|
||||
: public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex> >
|
||||
{
|
||||
enum {
|
||||
Flags = 0
|
||||
@@ -92,13 +94,13 @@ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, Dia
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
class SparseMatrix
|
||||
: public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
|
||||
: public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_> >
|
||||
{
|
||||
typedef SparseCompressedBase<SparseMatrix> Base;
|
||||
using Base::convert_index;
|
||||
friend class SparseVector<_Scalar,0,_StorageIndex>;
|
||||
friend class SparseVector<Scalar_,0,StorageIndex_>;
|
||||
template<typename, typename, typename, typename, typename>
|
||||
friend struct internal::Assignment;
|
||||
public:
|
||||
@@ -108,7 +110,7 @@ class SparseMatrix
|
||||
using Base::operator+=;
|
||||
using Base::operator-=;
|
||||
|
||||
typedef MappedSparseMatrix<Scalar,Flags> Map;
|
||||
typedef Eigen::Map<SparseMatrix<Scalar,Flags,StorageIndex>> Map;
|
||||
typedef Diagonal<SparseMatrix> DiagonalReturnType;
|
||||
typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
|
||||
typedef typename Base::InnerIterator InnerIterator;
|
||||
@@ -118,13 +120,13 @@ class SparseMatrix
|
||||
using Base::IsRowMajor;
|
||||
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
|
||||
enum {
|
||||
Options = _Options
|
||||
Options = Options_
|
||||
};
|
||||
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
typedef typename Base::ScalarVector ScalarVector;
|
||||
protected:
|
||||
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
||||
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0),StorageIndex> TransposedSparseMatrix;
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
@@ -253,9 +255,10 @@ class SparseMatrix
|
||||
inline void setZero()
|
||||
{
|
||||
m_data.clear();
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
|
||||
if(m_innerNonZeros)
|
||||
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
|
||||
std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
|
||||
if(m_innerNonZeros) {
|
||||
std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
|
||||
}
|
||||
}
|
||||
|
||||
/** Preallocates \a reserveSize non zeros.
|
||||
@@ -285,10 +288,7 @@ class SparseMatrix
|
||||
#else
|
||||
template<class SizesType>
|
||||
inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
|
||||
#if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
|
||||
typename
|
||||
#endif
|
||||
SizesType::value_type())
|
||||
typename SizesType::value_type())
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(enableif);
|
||||
reserveInnerVectors(reserveSizes);
|
||||
@@ -302,8 +302,7 @@ class SparseMatrix
|
||||
{
|
||||
Index totalReserveSize = 0;
|
||||
// turn the matrix into non-compressed mode
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||
|
||||
// temporarily use m_innerSizes to hold the new starting points.
|
||||
StorageIndex* newOuterIndex = m_innerNonZeros;
|
||||
@@ -336,8 +335,7 @@ class SparseMatrix
|
||||
}
|
||||
else
|
||||
{
|
||||
StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
|
||||
|
||||
StorageIndex count = 0;
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
@@ -365,7 +363,7 @@ class SparseMatrix
|
||||
}
|
||||
|
||||
std::swap(m_outerIndex, newOuterIndex);
|
||||
std::free(newOuterIndex);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -488,7 +486,7 @@ class SparseMatrix
|
||||
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
|
||||
oldStart = nextOldStart;
|
||||
}
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
m_innerNonZeros = 0;
|
||||
m_data.resize(m_outerIndex[m_outerSize]);
|
||||
m_data.squeeze();
|
||||
@@ -499,7 +497,7 @@ class SparseMatrix
|
||||
{
|
||||
if(m_innerNonZeros != 0)
|
||||
return;
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||
for (Index i = 0; i < m_outerSize; i++)
|
||||
{
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
@@ -569,9 +567,8 @@ class SparseMatrix
|
||||
if (m_innerNonZeros)
|
||||
{
|
||||
// Resize m_innerNonZeros
|
||||
StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
|
||||
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = newInnerNonZeros;
|
||||
m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
|
||||
m_innerNonZeros, m_outerSize + outerChange, m_outerSize);
|
||||
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
||||
m_innerNonZeros[i] = 0;
|
||||
@@ -579,8 +576,7 @@ class SparseMatrix
|
||||
else if (innerChange < 0)
|
||||
{
|
||||
// Inner size decreased: allocate a new m_innerNonZeros
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + outerChange);
|
||||
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
|
||||
@@ -604,9 +600,8 @@ class SparseMatrix
|
||||
if (outerChange == 0)
|
||||
return;
|
||||
|
||||
StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
m_outerIndex = newOuterIndex;
|
||||
m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
|
||||
m_outerIndex, m_outerSize + outerChange + 1, m_outerSize + 1);
|
||||
if (outerChange > 0)
|
||||
{
|
||||
StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||
@@ -630,18 +625,16 @@ class SparseMatrix
|
||||
m_data.clear();
|
||||
if (m_outerSize != outerSize || m_outerSize==0)
|
||||
{
|
||||
std::free(m_outerIndex);
|
||||
m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
|
||||
if (!m_outerIndex) internal::throw_std_bad_alloc();
|
||||
|
||||
m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
|
||||
m_outerSize + 1);
|
||||
m_outerSize = outerSize;
|
||||
}
|
||||
if(m_innerNonZeros)
|
||||
{
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
m_innerNonZeros = 0;
|
||||
}
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
|
||||
std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
|
||||
}
|
||||
|
||||
/** \internal
|
||||
@@ -664,7 +657,6 @@ class SparseMatrix
|
||||
inline SparseMatrix()
|
||||
: m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
resize(0, 0);
|
||||
}
|
||||
|
||||
@@ -672,7 +664,6 @@ class SparseMatrix
|
||||
inline SparseMatrix(Index rows, Index cols)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
resize(rows, cols);
|
||||
}
|
||||
|
||||
@@ -683,7 +674,6 @@ class SparseMatrix
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
check_template_parameters();
|
||||
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
|
||||
if (needToTranspose)
|
||||
*this = other.derived();
|
||||
@@ -695,21 +685,24 @@ class SparseMatrix
|
||||
internal::call_assignment_no_alias(*this, other.derived());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
|
||||
template<typename OtherDerived, unsigned int UpLo>
|
||||
inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
Base::operator=(other);
|
||||
}
|
||||
|
||||
inline SparseMatrix(SparseMatrix&& other) : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
*this = other.derived().markAsRValue();
|
||||
}
|
||||
|
||||
/** Copy constructor (it performs a deep copy) */
|
||||
inline SparseMatrix(const SparseMatrix& other)
|
||||
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
@@ -718,17 +711,15 @@ class SparseMatrix
|
||||
SparseMatrix(const ReturnByValue<OtherDerived>& other)
|
||||
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
initAssignment(other);
|
||||
other.evalTo(*this);
|
||||
}
|
||||
|
||||
|
||||
/** \brief Copy constructor with in-place evaluation */
|
||||
template<typename OtherDerived>
|
||||
explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
|
||||
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
@@ -753,9 +744,10 @@ class SparseMatrix
|
||||
Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
|
||||
Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
|
||||
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
m_innerNonZeros = 0;
|
||||
}
|
||||
|
||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
@@ -781,6 +773,10 @@ class SparseMatrix
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline SparseMatrix& operator=(SparseMatrix&& other) {
|
||||
return *this = other.derived().markAsRValue();
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename OtherDerived>
|
||||
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
|
||||
@@ -793,6 +789,7 @@ class SparseMatrix
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
#ifndef EIGEN_NO_IO
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
|
||||
{
|
||||
EIGEN_DBG_SPARSE(
|
||||
@@ -837,12 +834,13 @@ class SparseMatrix
|
||||
s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
|
||||
return s;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Destructor */
|
||||
inline ~SparseMatrix()
|
||||
{
|
||||
std::free(m_outerIndex);
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
}
|
||||
|
||||
/** Overloaded for performance */
|
||||
@@ -860,7 +858,7 @@ protected:
|
||||
resize(other.rows(), other.cols());
|
||||
if(m_innerNonZeros)
|
||||
{
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
m_innerNonZeros = 0;
|
||||
}
|
||||
}
|
||||
@@ -1012,11 +1010,8 @@ protected:
|
||||
}
|
||||
|
||||
private:
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
|
||||
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS)
|
||||
|
||||
struct default_prunning_func {
|
||||
default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
|
||||
@@ -1103,11 +1098,11 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
||||
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
|
||||
* be explicitly stored into a std::vector for instance.
|
||||
*/
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar, int Options_, typename StorageIndex_>
|
||||
template<typename InputIterators>
|
||||
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
|
||||
void SparseMatrix<Scalar,Options_,StorageIndex_>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
|
||||
{
|
||||
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
|
||||
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
|
||||
}
|
||||
|
||||
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
|
||||
@@ -1119,17 +1114,17 @@ void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIte
|
||||
* mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
|
||||
* \endcode
|
||||
*/
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar, int Options_, typename StorageIndex_>
|
||||
template<typename InputIterators,typename DupFunctor>
|
||||
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
|
||||
void SparseMatrix<Scalar,Options_,StorageIndex_>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
|
||||
{
|
||||
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
|
||||
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar, int Options_, typename StorageIndex_>
|
||||
template<typename DupFunctor>
|
||||
void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
|
||||
void SparseMatrix<Scalar,Options_,StorageIndex_>::collapseDuplicates(DupFunctor dup_func)
|
||||
{
|
||||
eigen_assert(!isCompressed());
|
||||
// TODO, in practice we should be able to use m_innerNonZeros for that task
|
||||
@@ -1162,14 +1157,14 @@ void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor
|
||||
m_outerIndex[m_outerSize] = count;
|
||||
|
||||
// turn the matrix into compressed form
|
||||
std::free(m_innerNonZeros);
|
||||
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||
m_innerNonZeros = 0;
|
||||
m_data.resize(m_outerIndex[m_outerSize]);
|
||||
}
|
||||
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar, int Options_, typename StorageIndex_>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
EIGEN_DONT_INLINE SparseMatrix<Scalar,Options_,StorageIndex_>& SparseMatrix<Scalar,Options_,StorageIndex_>::operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
@@ -1189,8 +1184,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scal
|
||||
// 2 - do the actual copy/eval
|
||||
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
|
||||
typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
|
||||
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
|
||||
typedef internal::evaluator<_OtherCopy> OtherCopyEval;
|
||||
typedef internal::remove_all_t<OtherCopy> OtherCopy_;
|
||||
typedef internal::evaluator<OtherCopy_> OtherCopyEval;
|
||||
OtherCopy otherCopy(other.derived());
|
||||
OtherCopyEval otherCopyEval(otherCopy);
|
||||
|
||||
@@ -1240,8 +1235,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scal
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scalar_,Options_,StorageIndex_>::insert(Index row, Index col)
|
||||
{
|
||||
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
||||
|
||||
@@ -1257,10 +1252,9 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
|
||||
m_data.reserve(2*m_innerSize);
|
||||
|
||||
// turn the matrix into non-compressed mode
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||
|
||||
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
|
||||
std::fill(m_innerNonZeros, m_innerNonZeros + m_outerSize, StorageIndex(0));
|
||||
|
||||
// pack all inner-vectors to the end of the pre-allocated space
|
||||
// and allocate the entire free-space to the first inner-vector
|
||||
@@ -1271,8 +1265,7 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
|
||||
else
|
||||
{
|
||||
// turn the matrix into non-compressed mode
|
||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
||||
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
|
||||
}
|
||||
@@ -1360,8 +1353,8 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
|
||||
return insertUncompressed(row,col);
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
EIGEN_DONT_INLINE typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scalar_,Options_,StorageIndex_>::insertUncompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(!isCompressed());
|
||||
|
||||
@@ -1392,8 +1385,8 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
|
||||
return (m_data.value(p) = Scalar(0));
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
EIGEN_DONT_INLINE typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scalar_,Options_,StorageIndex_>::insertCompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(isCompressed());
|
||||
|
||||
@@ -1501,18 +1494,138 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
: evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
struct evaluator<SparseMatrix<Scalar_,Options_,StorageIndex_> >
|
||||
: evaluator<SparseCompressedBase<SparseMatrix<Scalar_,Options_,StorageIndex_> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
|
||||
typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
|
||||
typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_,Options_,StorageIndex_> > > Base;
|
||||
typedef SparseMatrix<Scalar_,Options_,StorageIndex_> SparseMatrixType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
// Specialization for SparseMatrix.
|
||||
// Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
|
||||
// innerNonZeros, outerIndices, innerIndices, values].
|
||||
template <typename Scalar, int Options, typename StorageIndex>
|
||||
class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
|
||||
public:
|
||||
typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
|
||||
|
||||
struct Header {
|
||||
typename SparseMat::Index rows;
|
||||
typename SparseMat::Index cols;
|
||||
bool compressed;
|
||||
Index outer_size;
|
||||
Index inner_buffer_size;
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
|
||||
// innerNonZeros.
|
||||
std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
|
||||
// Outer indices.
|
||||
num_storage_indices += value.outerSize() + 1;
|
||||
// Inner indices.
|
||||
const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
|
||||
num_storage_indices += inner_buffer_size;
|
||||
// Values.
|
||||
std::size_t num_values = inner_buffer_size;
|
||||
return sizeof(Header) + sizeof(Scalar) * num_values +
|
||||
sizeof(StorageIndex) * num_storage_indices;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end,
|
||||
const SparseMat& value) {
|
||||
if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
|
||||
if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
|
||||
|
||||
const size_t header_bytes = sizeof(Header);
|
||||
Header header = {value.rows(), value.cols(), value.isCompressed(),
|
||||
value.outerSize(), value.outerIndexPtr()[value.outerSize()]};
|
||||
EIGEN_USING_STD(memcpy)
|
||||
memcpy(dest, &header, header_bytes);
|
||||
dest += header_bytes;
|
||||
|
||||
// innerNonZeros.
|
||||
if (!header.compressed) {
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
|
||||
memcpy(dest, value.innerNonZeroPtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
}
|
||||
|
||||
// Outer indices.
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
|
||||
memcpy(dest, value.outerIndexPtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
|
||||
// Inner indices.
|
||||
data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
|
||||
memcpy(dest, value.innerIndexPtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
|
||||
// Values.
|
||||
data_bytes = sizeof(Scalar) * header.inner_buffer_size;
|
||||
memcpy(dest, value.valuePtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src,
|
||||
const uint8_t* end,
|
||||
SparseMat& value) const {
|
||||
if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
|
||||
if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
|
||||
|
||||
const size_t header_bytes = sizeof(Header);
|
||||
Header header;
|
||||
EIGEN_USING_STD(memcpy)
|
||||
memcpy(&header, src, header_bytes);
|
||||
src += header_bytes;
|
||||
|
||||
value.setZero();
|
||||
value.resize(header.rows, header.cols);
|
||||
if (header.compressed) {
|
||||
value.makeCompressed();
|
||||
} else {
|
||||
value.uncompress();
|
||||
}
|
||||
|
||||
// Adjust value ptr size.
|
||||
value.data().resize(header.inner_buffer_size);
|
||||
|
||||
// Initialize compressed state and inner non-zeros.
|
||||
if (!header.compressed) {
|
||||
// Inner non-zero counts.
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.innerNonZeroPtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
}
|
||||
|
||||
// Outer indices.
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.outerIndexPtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
|
||||
// Inner indices.
|
||||
data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.innerIndexPtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
|
||||
// Values.
|
||||
data_bytes = sizeof(Scalar) * header.inner_buffer_size;
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.valuePtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
return src;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEMATRIX_H
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEMATRIXBASE_H
|
||||
#define EIGEN_SPARSEMATRIXBASE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -69,8 +71,7 @@ template<typename Derived> class SparseMatrixBase
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
|
||||
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
|
||||
internal::traits<Derived>::ColsAtCompileTime>::ret),
|
||||
SizeAtCompileTime = (internal::size_of_xpr_at_compile_time<Derived>::ret),
|
||||
/**< This is equal to the number of coefficients, i.e. the number of
|
||||
* rows times the number of columns, or to \a Dynamic if this is not
|
||||
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
||||
@@ -78,8 +79,7 @@ template<typename Derived> class SparseMatrixBase
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
|
||||
MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime>::ret),
|
||||
MaxSizeAtCompileTime = internal::size_at_compile_time(MaxRowsAtCompileTime, MaxColsAtCompileTime),
|
||||
|
||||
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
|
||||
/**< This is set to true if either the number of rows or the number of
|
||||
@@ -103,17 +103,17 @@ template<typename Derived> class SparseMatrixBase
|
||||
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
|
||||
HasDirectAccess_ = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
|
||||
#endif
|
||||
};
|
||||
|
||||
/** \internal the return type of MatrixBase::adjoint() */
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
typedef std::conditional_t<NumTraits<Scalar>::IsComplex,
|
||||
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
|
||||
Transpose<const Derived>
|
||||
>::type AdjointReturnType;
|
||||
> AdjointReturnType;
|
||||
typedef Transpose<Derived> TransposeReturnType;
|
||||
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
|
||||
typedef Transpose<const Derived> ConstTransposeReturnType;
|
||||
|
||||
// FIXME storage order do not match evaluator storage order
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
|
||||
@@ -129,7 +129,7 @@ template<typename Derived> class SparseMatrixBase
|
||||
|
||||
/** \internal the return type of coeff()
|
||||
*/
|
||||
typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
|
||||
typedef std::conditional_t<HasDirectAccess_, const Scalar&, Scalar> CoeffReturnType;
|
||||
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
|
||||
@@ -137,8 +137,8 @@ template<typename Derived> class SparseMatrixBase
|
||||
/** type of the equivalent dense matrix */
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
|
||||
/** type of the equivalent square matrix */
|
||||
typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
|
||||
EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
|
||||
typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
|
||||
internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)> SquareMatrixType;
|
||||
|
||||
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
inline Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
@@ -214,11 +214,11 @@ template<typename Derived> class SparseMatrixBase
|
||||
inline void assignGeneric(const OtherDerived& other);
|
||||
|
||||
public:
|
||||
|
||||
#ifndef EIGEN_NO_IO
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
|
||||
{
|
||||
typedef typename Derived::Nested Nested;
|
||||
typedef typename internal::remove_all<Nested>::type NestedCleaned;
|
||||
typedef internal::remove_all_t<Nested> NestedCleaned;
|
||||
|
||||
if (Flags&RowMajorBit)
|
||||
{
|
||||
@@ -263,6 +263,7 @@ template<typename Derived> class SparseMatrixBase
|
||||
}
|
||||
return s;
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
|
||||
// This file implements sparse * permutation products
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -20,7 +22,7 @@ template<typename ExpressionType, int Side, bool Transposed>
|
||||
struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
|
||||
{
|
||||
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
|
||||
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
|
||||
typedef remove_all_t<MatrixType> MatrixTypeCleaned;
|
||||
|
||||
typedef typename MatrixTypeCleaned::Scalar Scalar;
|
||||
typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;
|
||||
@@ -30,9 +32,9 @@ struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
|
||||
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<MoveOuter,
|
||||
typedef std::conditional_t<MoveOuter,
|
||||
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> > ReturnType;
|
||||
|
||||
template<typename Dest,typename PermutationType>
|
||||
static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
|
||||
@@ -107,7 +109,7 @@ struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, Permut
|
||||
explicit product_evaluator(const XprType& xpr)
|
||||
: m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
internal::construct_at<Base>(this, m_result);
|
||||
generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEPRODUCT_H
|
||||
#define EIGEN_SPARSEPRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \returns an expression of the product of two sparse matrices.
|
||||
@@ -45,19 +47,19 @@ struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
|
||||
|
||||
// dense += sparse * sparse
|
||||
template<typename Dest,typename ActualLhs>
|
||||
static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
|
||||
static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, std::enable_if_t<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>* = 0)
|
||||
{
|
||||
typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
|
||||
internal::sparse_sparse_to_dense_product_selector<remove_all_t<LhsNested>,
|
||||
remove_all_t<RhsNested>, Dest>::run(lhsNested,rhsNested,dst);
|
||||
}
|
||||
|
||||
// dense -= sparse * sparse
|
||||
template<typename Dest>
|
||||
static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
|
||||
static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, std::enable_if_t<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>* = 0)
|
||||
{
|
||||
addTo(dst, -lhs, rhs);
|
||||
}
|
||||
@@ -72,8 +74,8 @@ protected:
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
|
||||
internal::conservative_sparse_sparse_product_selector<remove_all_t<LhsNested>,
|
||||
remove_all_t<RhsNested>, Dest>::run(lhsNested,rhsNested,dst);
|
||||
}
|
||||
|
||||
// dense = sparse * sparse
|
||||
@@ -147,14 +149,14 @@ struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
|
||||
: m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
using std::abs;
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
internal::construct_at<Base>(this, m_result);
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(xpr.nestedExpression().lhs());
|
||||
RhsNested rhsNested(xpr.nestedExpression().rhs());
|
||||
|
||||
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
|
||||
internal::sparse_sparse_product_with_pruning_selector<remove_all_t<LhsNested>,
|
||||
remove_all_t<RhsNested>, PlainObject>::run(lhsNested,rhsNested,m_result,
|
||||
abs(xpr.reference())*xpr.epsilon());
|
||||
}
|
||||
|
||||
@@ -165,9 +167,9 @@ protected:
|
||||
} // end namespace internal
|
||||
|
||||
// sparse matrix = sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar, int Options_, typename StorageIndex_>
|
||||
template<typename Lhs, typename Rhs>
|
||||
SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
|
||||
SparseMatrix<Scalar,Options_,StorageIndex_>& SparseMatrix<Scalar,Options_,StorageIndex_>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
|
||||
{
|
||||
// std::cout << "in Assignment : " << DstOptions << "\n";
|
||||
SparseMatrix dst(src.rows(),src.cols());
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEREDUX_H
|
||||
#define EIGEN_SPARSEREDUX_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
@@ -25,9 +27,9 @@ SparseMatrixBase<Derived>::sum() const
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
|
||||
SparseMatrix<_Scalar,_Options,_Index>::sum() const
|
||||
template<typename Scalar_, int Options_, typename Index_>
|
||||
typename internal::traits<SparseMatrix<Scalar_,Options_,Index_> >::Scalar
|
||||
SparseMatrix<Scalar_,Options_,Index_>::sum() const
|
||||
{
|
||||
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
if(this->isCompressed())
|
||||
@@ -36,9 +38,9 @@ SparseMatrix<_Scalar,_Options,_Index>::sum() const
|
||||
return Base::sum();
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
|
||||
SparseVector<_Scalar,_Options,_Index>::sum() const
|
||||
template<typename Scalar_, int Options_, typename Index_>
|
||||
typename internal::traits<SparseVector<Scalar_,Options_, Index_> >::Scalar
|
||||
SparseVector<Scalar_,Options_,Index_>::sum() const
|
||||
{
|
||||
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_REF_H
|
||||
#define EIGEN_SPARSE_REF_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum {
|
||||
@@ -20,13 +22,13 @@ namespace internal {
|
||||
|
||||
template<typename Derived> class SparseRefBase;
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options_, typename StrideType_>
|
||||
struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
enum {
|
||||
Options = _Options,
|
||||
Options = Options_,
|
||||
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
|
||||
};
|
||||
|
||||
@@ -35,27 +37,27 @@ struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _Stride
|
||||
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
|
||||
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
typedef std::conditional_t<MatchAtCompileTime,internal::true_type,internal::false_type> type;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options_, typename StrideType_>
|
||||
struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
: public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
{
|
||||
enum {
|
||||
Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
|
||||
};
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options_, typename StrideType_>
|
||||
struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
: public traits<SparseVector<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
enum {
|
||||
Options = _Options,
|
||||
Options = Options_,
|
||||
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
|
||||
};
|
||||
|
||||
@@ -63,14 +65,14 @@ struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _Stride
|
||||
enum {
|
||||
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
typedef std::conditional_t<MatchAtCompileTime,internal::true_type,internal::false_type> type;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options_, typename StrideType_>
|
||||
struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
: public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options_, StrideType_> >
|
||||
{
|
||||
enum {
|
||||
Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
|
||||
@@ -98,9 +100,9 @@ protected:
|
||||
void construct(Expression& expr)
|
||||
{
|
||||
if(expr.outerIndexPtr()==0)
|
||||
::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
|
||||
internal::construct_at<Base>(this, expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
|
||||
else
|
||||
::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
|
||||
internal::construct_at<Base>(this, expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
|
||||
}
|
||||
};
|
||||
|
||||
@@ -133,7 +135,7 @@ class Ref<SparseMatrixType, Options>
|
||||
template<int OtherOptions>
|
||||
inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
|
||||
template<int OtherOptions>
|
||||
inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
|
||||
inline Ref(const Map<SparseMatrix<MatScalar,OtherOptions,MatIndex>>& expr);
|
||||
public:
|
||||
|
||||
typedef internal::SparseRefBase<Ref> Base;
|
||||
@@ -148,15 +150,15 @@ class Ref<SparseMatrixType, Options>
|
||||
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
|
||||
|
||||
template<int OtherOptions>
|
||||
inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
|
||||
inline Ref(Map<SparseMatrix<MatScalar,OtherOptions,MatIndex> >& expr)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const SparseCompressedBase<Derived>& expr)
|
||||
#else
|
||||
@@ -201,8 +203,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
|
||||
|
||||
~Ref() {
|
||||
if(m_hasCopy) {
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
obj->~TPlainObjectType();
|
||||
internal::destroy_at(reinterpret_cast<TPlainObjectType*>(&m_storage));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,8 +214,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
|
||||
{
|
||||
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
TPlainObjectType* obj = internal::construct_at(reinterpret_cast<TPlainObjectType*>(&m_storage), expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
@@ -227,8 +227,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr, internal::false_type)
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
TPlainObjectType* obj = internal::construct_at(reinterpret_cast<TPlainObjectType*>(&m_storage), expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
@@ -319,8 +318,7 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
|
||||
|
||||
~Ref() {
|
||||
if(m_hasCopy) {
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
obj->~TPlainObjectType();
|
||||
internal::destroy_at(reinterpret_cast<TPlainObjectType*>(&m_storage));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,8 +333,7 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr, internal::false_type)
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
TPlainObjectType* obj = internal::construct_at(reinterpret_cast<TPlainObjectType*>(&m_storage), expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
@@ -355,7 +352,7 @@ struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, Strid
|
||||
: evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
@@ -365,7 +362,7 @@ struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options,
|
||||
: evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -40,13 +42,13 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
|
||||
}
|
||||
|
||||
template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
|
||||
template<typename MatrixType, unsigned int Mode_> class SparseSelfAdjointView
|
||||
: public EigenBase<SparseSelfAdjointView<MatrixType,Mode_> >
|
||||
{
|
||||
public:
|
||||
|
||||
enum {
|
||||
Mode = _Mode,
|
||||
Mode = Mode_,
|
||||
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
|
||||
RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
|
||||
@@ -57,7 +59,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
typedef internal::remove_all_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
|
||||
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
|
||||
{
|
||||
@@ -68,8 +70,8 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
/** \internal \returns a reference to the nested matrix */
|
||||
const _MatrixTypeNested& matrix() const { return m_matrix; }
|
||||
typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
|
||||
const MatrixTypeNested_& matrix() const { return m_matrix; }
|
||||
std::remove_reference_t<MatrixTypeNested>& matrix() { return m_matrix; }
|
||||
|
||||
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
|
||||
*
|
||||
@@ -124,9 +126,9 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
|
||||
/** \returns an expression of P H P^-1 */
|
||||
// TODO implement twists in a more evaluator friendly fashion
|
||||
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
SparseSymmetricPermutationProduct<MatrixTypeNested_,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
{
|
||||
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
|
||||
return SparseSymmetricPermutationProduct<MatrixTypeNested_,Mode>(m_matrix, perm);
|
||||
}
|
||||
|
||||
template<typename SrcMatrixType,int SrcMode>
|
||||
@@ -260,15 +262,6 @@ struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
|
||||
run(tmp, src, AssignOpType());
|
||||
dst -= tmp;
|
||||
}
|
||||
|
||||
template<typename DestScalar>
|
||||
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
||||
{
|
||||
// TODO directly evaluate into dst;
|
||||
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
|
||||
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
|
||||
dst = tmp;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
@@ -285,7 +278,7 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
|
||||
|
||||
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
|
||||
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
|
||||
typedef internal::remove_all_t<SparseLhsTypeNested> SparseLhsTypeNestedCleaned;
|
||||
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsIterator;
|
||||
typedef typename SparseLhsType::Scalar LhsScalar;
|
||||
@@ -347,7 +340,7 @@ struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, Pr
|
||||
template<typename Dest>
|
||||
static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
|
||||
{
|
||||
typedef typename LhsView::_MatrixTypeNested Lhs;
|
||||
typedef typename LhsView::MatrixTypeNested_ Lhs;
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhsView.matrix());
|
||||
@@ -364,7 +357,7 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
|
||||
template<typename Dest>
|
||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
|
||||
{
|
||||
typedef typename RhsView::_MatrixTypeNested Rhs;
|
||||
typedef typename RhsView::MatrixTypeNested_ Rhs;
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
@@ -390,7 +383,7 @@ struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, Spar
|
||||
product_evaluator(const XprType& xpr)
|
||||
: m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
internal::construct_at<Base>(this, m_result);
|
||||
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
|
||||
}
|
||||
|
||||
@@ -516,7 +509,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
}
|
||||
}
|
||||
|
||||
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
|
||||
template<int SrcMode_,int DstMode_,typename MatrixType,int DstOrder>
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
@@ -529,8 +522,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
enum {
|
||||
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
|
||||
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
|
||||
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
|
||||
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
|
||||
DstMode = DstOrder==RowMajor ? (DstMode_==Upper ? Lower : Upper) : DstMode_,
|
||||
SrcMode = SrcOrder==RowMajor ? (SrcMode_==Upper ? Lower : Upper) : SrcMode_
|
||||
};
|
||||
|
||||
MatEval matEval(mat);
|
||||
@@ -611,7 +604,7 @@ class SparseSymmetricPermutationProduct
|
||||
public:
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
|
||||
typedef internal::remove_all_t<MatrixTypeNested> NestedExpression;
|
||||
|
||||
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
|
||||
: m_matrix(mat), m_perm(perm)
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSESOLVERBASE_H
|
||||
#define EIGEN_SPARSESOLVERBASE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -19,7 +21,7 @@ namespace internal {
|
||||
* The rhs is decomposed into small vertical panels which are solved through dense temporaries.
|
||||
*/
|
||||
template<typename Decomposition, typename Rhs, typename Dest>
|
||||
typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
|
||||
std::enable_if_t<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>
|
||||
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
@@ -43,7 +45,7 @@ solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest
|
||||
|
||||
// Overload for vector as rhs
|
||||
template<typename Decomposition, typename Rhs, typename Dest>
|
||||
typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
|
||||
std::enable_if_t<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>
|
||||
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
|
||||
{
|
||||
typedef typename Dest::Scalar DestScalar;
|
||||
@@ -73,6 +75,8 @@ class SparseSolverBase : internal::noncopyable
|
||||
: m_isInitialized(false)
|
||||
{}
|
||||
|
||||
SparseSolverBase(SparseSolverBase&&other ) : internal::noncopyable{}, m_isInitialized{other.m_isInitialized} {}
|
||||
|
||||
~SparseSolverBase()
|
||||
{}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
||||
#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -21,9 +23,9 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
|
||||
{
|
||||
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
|
||||
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
|
||||
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
|
||||
typedef typename remove_all_t<Rhs>::Scalar RhsScalar;
|
||||
typedef typename remove_all_t<ResultType>::Scalar ResScalar;
|
||||
typedef typename remove_all_t<Lhs>::StorageIndex StorageIndex;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
@@ -90,7 +92,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
|
||||
remove_all_t<ResultType> _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
|
||||
res.swap(_res);
|
||||
}
|
||||
@@ -117,7 +119,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
// let's transpose the product to get a column x column product
|
||||
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
|
||||
remove_all_t<ResultType> _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
|
||||
res.swap(_res);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSETRANSPOSE_H
|
||||
#define EIGEN_SPARSETRANSPOSE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
|
||||
#define EIGEN_SPARSE_TRIANGULARVIEW_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -44,8 +46,8 @@ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<Matrix
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
|
||||
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
|
||||
typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNestedNonRef;
|
||||
typedef internal::remove_all_t<MatrixTypeNested> MatrixTypeNestedCleaned;
|
||||
|
||||
template<typename RhsType, typename DstType>
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEUTIL_H
|
||||
#define EIGEN_SPARSEUTIL_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
#ifdef NDEBUG
|
||||
@@ -49,10 +51,8 @@ const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
|
||||
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
||||
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
||||
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
|
||||
template<typename Scalar_, int Flags_ = 0, typename StorageIndex_ = int> class SparseMatrix;
|
||||
template<typename Scalar_, int Flags_ = 0, typename StorageIndex_ = int> class SparseVector;
|
||||
|
||||
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
|
||||
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
|
||||
@@ -65,10 +65,10 @@ template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProdu
|
||||
|
||||
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
|
||||
template<typename Lhs, typename Rhs,
|
||||
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
|
||||
int InnerSize = internal::min_size_prefer_fixed(internal::traits<Lhs>::ColsAtCompileTime, internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
|
||||
|
||||
template<typename Lhs, typename Rhs,
|
||||
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
|
||||
int InnerSize = internal::min_size_prefer_fixed(internal::traits<Lhs>::ColsAtCompileTime, internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
|
||||
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
|
||||
|
||||
namespace internal {
|
||||
@@ -80,41 +80,41 @@ template<typename T> struct eval<T,Sparse>
|
||||
{};
|
||||
|
||||
template<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
typedef typename traits<T>::StorageIndex StorageIndex_;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
|
||||
typedef SparseVector<Scalar_, RowMajor, StorageIndex_> type;
|
||||
};
|
||||
|
||||
template<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
typedef typename traits<T>::StorageIndex StorageIndex_;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
|
||||
typedef SparseVector<Scalar_, ColMajor, StorageIndex_> type;
|
||||
};
|
||||
|
||||
// TODO this seems almost identical to plain_matrix_type<T, Sparse>
|
||||
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
typedef typename traits<T>::StorageIndex StorageIndex_;
|
||||
enum { Options_ = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
typedef SparseMatrix<Scalar_, Options_, StorageIndex_> type;
|
||||
};
|
||||
|
||||
template<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
public:
|
||||
typedef Matrix<_Scalar, 1, 1> type;
|
||||
typedef Matrix<Scalar_, 1, 1> type;
|
||||
};
|
||||
|
||||
template<typename T> struct plain_matrix_type<T,Sparse>
|
||||
{
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
typedef typename traits<T>::StorageIndex StorageIndex_;
|
||||
enum { Options_ = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
typedef SparseMatrix<Scalar_, Options_, StorageIndex_> type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSEVECTOR_H
|
||||
#define EIGEN_SPARSEVECTOR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
@@ -17,7 +19,7 @@ namespace Eigen {
|
||||
*
|
||||
* \brief a sparse vector class
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \tparam Scalar_ the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
@@ -26,21 +28,21 @@ namespace Eigen {
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
struct traits<SparseVector<Scalar_, Options_, StorageIndex_> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
IsColVector = (_Options & RowMajorBit) ? 0 : 1,
|
||||
IsColVector = (Options_ & RowMajorBit) ? 0 : 1,
|
||||
|
||||
RowsAtCompileTime = IsColVector ? Dynamic : 1,
|
||||
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
|
||||
Flags = Options_ | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
@@ -60,9 +62,9 @@ struct sparse_vector_assign_selector;
|
||||
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Scalar_, int Options_, typename StorageIndex_>
|
||||
class SparseVector
|
||||
: public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
: public SparseCompressedBase<SparseVector<Scalar_, Options_, StorageIndex_> >
|
||||
{
|
||||
typedef SparseCompressedBase<SparseVector> Base;
|
||||
using Base::convert_index;
|
||||
@@ -75,7 +77,7 @@ class SparseVector
|
||||
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
|
||||
|
||||
enum {
|
||||
Options = _Options
|
||||
Options = Options_
|
||||
};
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
|
||||
@@ -207,9 +209,33 @@ class SparseVector
|
||||
inline void finalize() {}
|
||||
|
||||
/** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
|
||||
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
Index prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
|
||||
return prune([&](const Scalar& val){ return !internal::isMuchSmallerThan(val, reference, epsilon); });
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Prunes the entries of the vector based on a `predicate`
|
||||
* \tparam F Type of the predicate.
|
||||
* \param keep_predicate The predicate that is used to test whether a value should be kept. A callable that
|
||||
* gets passed om a `Scalar` value and returns a boolean. If the predicate returns true, the value is kept.
|
||||
* \return The new number of structural non-zeros.
|
||||
*/
|
||||
template<class F>
|
||||
Index prune(F&& keep_predicate)
|
||||
{
|
||||
m_data.prune(reference,epsilon);
|
||||
Index k = 0;
|
||||
Index n = m_data.size();
|
||||
for (Index i = 0; i < n; ++i)
|
||||
{
|
||||
if (keep_predicate(m_data.value(i)))
|
||||
{
|
||||
m_data.value(k) = std::move(m_data.value(i));
|
||||
m_data.index(k) = m_data.index(i);
|
||||
++k;
|
||||
}
|
||||
}
|
||||
m_data.resize(k);
|
||||
return k;
|
||||
}
|
||||
|
||||
/** Resizes the sparse vector to \a rows x \a cols
|
||||
@@ -256,11 +282,11 @@ class SparseVector
|
||||
|
||||
void resizeNonZeros(Index size) { m_data.resize(size); }
|
||||
|
||||
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
|
||||
inline SparseVector() : m_size(0) { resize(0); }
|
||||
|
||||
explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
|
||||
explicit inline SparseVector(Index size) : m_size(0) { resize(size); }
|
||||
|
||||
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
|
||||
inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
|
||||
@@ -269,14 +295,12 @@ class SparseVector
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline SparseVector(const SparseVector& other)
|
||||
: Base(other), m_size(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
@@ -329,6 +353,7 @@ class SparseVector
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_NO_IO
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
|
||||
{
|
||||
for (Index i=0; i<m.nonZeros(); ++i)
|
||||
@@ -336,6 +361,7 @@ class SparseVector
|
||||
s << std::endl;
|
||||
return s;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Destructor */
|
||||
inline ~SparseVector() {}
|
||||
@@ -393,30 +419,26 @@ class SparseVector
|
||||
# endif
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
|
||||
EIGEN_STATIC_ASSERT((Options_&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS)
|
||||
|
||||
Storage m_data;
|
||||
Index m_size;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
struct evaluator<SparseVector<_Scalar,_Options,_Index> >
|
||||
: evaluator_base<SparseVector<_Scalar,_Options,_Index> >
|
||||
template<typename Scalar_, int Options_, typename Index_>
|
||||
struct evaluator<SparseVector<Scalar_,Options_,Index_> >
|
||||
: evaluator_base<SparseVector<Scalar_,Options_,Index_> >
|
||||
{
|
||||
typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
|
||||
typedef SparseVector<Scalar_,Options_,Index_> SparseVectorType;
|
||||
typedef evaluator_base<SparseVectorType> Base;
|
||||
typedef typename SparseVectorType::InnerIterator InnerIterator;
|
||||
typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
|
||||
CoeffReadCost = NumTraits<Scalar_>::ReadCost,
|
||||
Flags = SparseVectorType::Flags
|
||||
};
|
||||
|
||||
@@ -473,6 +495,78 @@ struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
|
||||
|
||||
}
|
||||
|
||||
// Specialization for SparseVector.
|
||||
// Serializes [size, numNonZeros, innerIndices, values].
|
||||
template <typename Scalar, int Options, typename StorageIndex>
|
||||
class Serializer<SparseVector<Scalar, Options, StorageIndex>, void> {
|
||||
public:
|
||||
typedef SparseVector<Scalar, Options, StorageIndex> SparseMat;
|
||||
|
||||
struct Header {
|
||||
typename SparseMat::Index size;
|
||||
Index num_non_zeros;
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
|
||||
return sizeof(Header) +
|
||||
(sizeof(Scalar) + sizeof(StorageIndex)) * value.nonZeros();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end,
|
||||
const SparseMat& value) {
|
||||
if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
|
||||
if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
|
||||
|
||||
const size_t header_bytes = sizeof(Header);
|
||||
Header header = {value.innerSize(), value.nonZeros()};
|
||||
EIGEN_USING_STD(memcpy)
|
||||
memcpy(dest, &header, header_bytes);
|
||||
dest += header_bytes;
|
||||
|
||||
// Inner indices.
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * header.num_non_zeros;
|
||||
memcpy(dest, value.innerIndexPtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
|
||||
// Values.
|
||||
data_bytes = sizeof(Scalar) * header.num_non_zeros;
|
||||
memcpy(dest, value.valuePtr(), data_bytes);
|
||||
dest += data_bytes;
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src,
|
||||
const uint8_t* end,
|
||||
SparseMat& value) const {
|
||||
if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
|
||||
if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
|
||||
|
||||
const size_t header_bytes = sizeof(Header);
|
||||
Header header;
|
||||
EIGEN_USING_STD(memcpy)
|
||||
memcpy(&header, src, header_bytes);
|
||||
src += header_bytes;
|
||||
|
||||
value.setZero();
|
||||
value.resize(header.size);
|
||||
value.resizeNonZeros(header.num_non_zeros);
|
||||
|
||||
// Inner indices.
|
||||
std::size_t data_bytes = sizeof(StorageIndex) * header.num_non_zeros;
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.innerIndexPtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
|
||||
// Values.
|
||||
data_bytes = sizeof(Scalar) * header.num_non_zeros;
|
||||
if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
|
||||
memcpy(value.valuePtr(), src, data_bytes);
|
||||
src += data_bytes;
|
||||
return src;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEVECTOR_H
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_SPARSEVIEW_H
|
||||
#define EIGEN_SPARSEVIEW_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -45,11 +47,11 @@ template<typename MatrixType>
|
||||
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
|
||||
{
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
typedef internal::remove_all_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
typedef SparseMatrixBase<SparseView > Base;
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
|
||||
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
|
||||
typedef internal::remove_all_t<MatrixType> NestedExpression;
|
||||
|
||||
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
|
||||
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
|
||||
@@ -62,7 +64,7 @@ public:
|
||||
inline Index outerSize() const { return m_matrix.outerSize(); }
|
||||
|
||||
/** \returns the nested expression */
|
||||
const typename internal::remove_all<MatrixTypeNested>::type&
|
||||
const internal::remove_all_t<MatrixTypeNested>&
|
||||
nestedExpression() const { return m_matrix; }
|
||||
|
||||
Scalar reference() const { return m_reference; }
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
|
||||
#define EIGEN_SPARSETRIANGULARSOLVER_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -114,7 +116,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
|
||||
for(Index i=0; i<lhs.cols(); ++i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
if (!numext::is_exactly_zero(tmp)) // optimization when other is actually sparse
|
||||
{
|
||||
LhsIterator it(lhsEval, i);
|
||||
while(it && it.index()<i)
|
||||
@@ -149,7 +151,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
|
||||
for(Index i=lhs.cols()-1; i>=0; --i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
if (!numext::is_exactly_zero(tmp)) // optimization when other is actually sparse
|
||||
{
|
||||
if(!(Mode & UnitDiag))
|
||||
{
|
||||
@@ -182,11 +184,11 @@ void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<Oth
|
||||
|
||||
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
|
||||
|
||||
typedef typename internal::conditional<copy,
|
||||
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
|
||||
typedef std::conditional_t<copy,
|
||||
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&> OtherCopy;
|
||||
OtherCopy otherCopy(other.derived());
|
||||
|
||||
internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
|
||||
internal::sparse_solve_triangular_selector<ExpressionType, std::remove_reference_t<OtherCopy>, Mode>::run(derived().nestedExpression(), otherCopy);
|
||||
|
||||
if (copy)
|
||||
other = otherCopy;
|
||||
@@ -239,7 +241,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
{
|
||||
tempVector.restart();
|
||||
Scalar& ci = tempVector.coeffRef(i);
|
||||
if (ci!=Scalar(0))
|
||||
if (!numext::is_exactly_zero(ci))
|
||||
{
|
||||
// find
|
||||
typename Lhs::InnerIterator it(lhs, i);
|
||||
@@ -270,11 +272,11 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
}
|
||||
|
||||
|
||||
Index count = 0;
|
||||
// Index count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
++ count;
|
||||
// ++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
// std::cout << it.value() << " ";
|
||||
// FIXME use insertBack
|
||||
@@ -299,8 +301,8 @@ void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBa
|
||||
|
||||
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
|
||||
|
||||
// typedef typename internal::conditional<copy,
|
||||
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
|
||||
// typedef std::conditional_t<copy,
|
||||
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&> OtherCopy;
|
||||
// OtherCopy otherCopy(other.derived());
|
||||
|
||||
internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
|
||||
|
||||
Reference in New Issue
Block a user