ADD: added other eigen lib
This commit is contained in:
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_AUTODIFF_JACOBIAN_H
|
||||
#define EIGEN_AUTODIFF_JACOBIAN_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
|
||||
@@ -20,17 +22,8 @@ public:
|
||||
AutoDiffJacobian(const Functor& f) : Functor(f) {}
|
||||
|
||||
// forward constructors
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... T>
|
||||
AutoDiffJacobian(const T& ...Values) : Functor(Values...) {}
|
||||
#else
|
||||
template<typename T0>
|
||||
AutoDiffJacobian(const T0& a0) : Functor(a0) {}
|
||||
template<typename T0, typename T1>
|
||||
AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
|
||||
template<typename T0, typename T1, typename T2>
|
||||
AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
|
||||
#endif
|
||||
|
||||
typedef typename Functor::InputType InputType;
|
||||
typedef typename Functor::ValueType ValueType;
|
||||
@@ -50,7 +43,6 @@ public:
|
||||
typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
|
||||
typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
|
||||
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
// Some compilers don't accept variadic parameters after a default parameter,
|
||||
// i.e., we can't just write _jac=0 but we need to overload operator():
|
||||
EIGEN_STRONG_INLINE
|
||||
@@ -61,19 +53,12 @@ public:
|
||||
template<typename... ParamsType>
|
||||
void operator() (const InputType& x, ValueType* v, JacobianType* _jac,
|
||||
const ParamsType&... Params) const
|
||||
#else
|
||||
void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
|
||||
#endif
|
||||
{
|
||||
eigen_assert(v!=0);
|
||||
|
||||
if (!_jac)
|
||||
{
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
Functor::operator()(x, v, Params...);
|
||||
#else
|
||||
Functor::operator()(x, v);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -89,11 +74,7 @@ public:
|
||||
for (Index i=0; i<jac.cols(); i++)
|
||||
ax[i].derivatives() = DerivativeType::Unit(x.rows(),i);
|
||||
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
Functor::operator()(ax, &av, Params...);
|
||||
#else
|
||||
Functor::operator()(ax, &av);
|
||||
#endif
|
||||
|
||||
for (Index i=0; i<jac.rows(); i++)
|
||||
{
|
||||
|
||||
101
libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
Executable file → Normal file
101
libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
Executable file → Normal file
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_AUTODIFF_SCALAR_H
|
||||
#define EIGEN_AUTODIFF_SCALAR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -66,14 +68,14 @@ inline AutoDiffScalar<NewDerType> MakeAutoDiffScalar(const typename NewDerType::
|
||||
template<typename DerivativeType>
|
||||
class AutoDiffScalar
|
||||
: public internal::auto_diff_special_op
|
||||
<DerivativeType, !internal::is_same<typename internal::traits<typename internal::remove_all<DerivativeType>::type>::Scalar,
|
||||
typename NumTraits<typename internal::traits<typename internal::remove_all<DerivativeType>::type>::Scalar>::Real>::value>
|
||||
<DerivativeType, !internal::is_same<typename internal::traits<internal::remove_all_t<DerivativeType>>::Scalar,
|
||||
typename NumTraits<typename internal::traits<internal::remove_all_t<DerivativeType>>::Scalar>::Real>::value>
|
||||
{
|
||||
public:
|
||||
typedef internal::auto_diff_special_op
|
||||
<DerivativeType, !internal::is_same<typename internal::traits<typename internal::remove_all<DerivativeType>::type>::Scalar,
|
||||
typename NumTraits<typename internal::traits<typename internal::remove_all<DerivativeType>::type>::Scalar>::Real>::value> Base;
|
||||
typedef typename internal::remove_all<DerivativeType>::type DerType;
|
||||
<DerivativeType, !internal::is_same<typename internal::traits<internal::remove_all_t<DerivativeType>>::Scalar,
|
||||
typename NumTraits<typename internal::traits<internal::remove_all_t<DerivativeType>>::Scalar>::Real>::value> Base;
|
||||
typedef internal::remove_all_t<DerivativeType> DerType;
|
||||
typedef typename internal::traits<DerType>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real Real;
|
||||
|
||||
@@ -108,9 +110,9 @@ class AutoDiffScalar
|
||||
template<typename OtherDerType>
|
||||
AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
, typename internal::enable_if<
|
||||
internal::is_same<Scalar, typename internal::traits<typename internal::remove_all<OtherDerType>::type>::Scalar>::value
|
||||
&& internal::is_convertible<OtherDerType,DerType>::value , void*>::type = 0
|
||||
, std::enable_if_t<
|
||||
internal::is_same<Scalar, typename internal::traits<internal::remove_all_t<OtherDerType>>::Scalar>::value
|
||||
&& internal::is_convertible<OtherDerType,DerType>::value , void*> = 0
|
||||
#endif
|
||||
)
|
||||
: m_value(other.value()), m_derivatives(other.derivatives())
|
||||
@@ -178,12 +180,12 @@ class AutoDiffScalar
|
||||
template<typename OtherDerType> inline bool operator==(const AutoDiffScalar<OtherDerType>& b) const { return m_value == b.value(); }
|
||||
template<typename OtherDerType> inline bool operator!=(const AutoDiffScalar<OtherDerType>& b) const { return m_value != b.value(); }
|
||||
|
||||
inline const AutoDiffScalar<DerType&> operator+(const Scalar& other) const
|
||||
inline AutoDiffScalar<DerType&> operator+(const Scalar& other) const
|
||||
{
|
||||
return AutoDiffScalar<DerType&>(m_value + other, m_derivatives);
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<DerType&> operator+(const Scalar& a, const AutoDiffScalar& b)
|
||||
friend inline AutoDiffScalar<DerType&> operator+(const Scalar& a, const AutoDiffScalar& b)
|
||||
{
|
||||
return AutoDiffScalar<DerType&>(a + b.value(), b.derivatives());
|
||||
}
|
||||
@@ -205,11 +207,11 @@ class AutoDiffScalar
|
||||
}
|
||||
|
||||
template<typename OtherDerType>
|
||||
inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const typename internal::remove_all<OtherDerType>::type> >
|
||||
inline AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const internal::remove_all_t<OtherDerType>> >
|
||||
operator+(const AutoDiffScalar<OtherDerType>& other) const
|
||||
{
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
return AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const typename internal::remove_all<OtherDerType>::type> >(
|
||||
return AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const internal::remove_all_t<OtherDerType>> >(
|
||||
m_value + other.value(),
|
||||
m_derivatives + other.derivatives());
|
||||
}
|
||||
@@ -222,12 +224,12 @@ class AutoDiffScalar
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline const AutoDiffScalar<DerType&> operator-(const Scalar& b) const
|
||||
inline AutoDiffScalar<DerType&> operator-(const Scalar& b) const
|
||||
{
|
||||
return AutoDiffScalar<DerType&>(m_value - b, m_derivatives);
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
|
||||
friend inline AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
|
||||
operator-(const Scalar& a, const AutoDiffScalar& b)
|
||||
{
|
||||
return AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
|
||||
@@ -241,11 +243,11 @@ class AutoDiffScalar
|
||||
}
|
||||
|
||||
template<typename OtherDerType>
|
||||
inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const typename internal::remove_all<OtherDerType>::type> >
|
||||
inline AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const internal::remove_all_t<OtherDerType>> >
|
||||
operator-(const AutoDiffScalar<OtherDerType>& other) const
|
||||
{
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
return AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const typename internal::remove_all<OtherDerType>::type> >(
|
||||
return AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const internal::remove_all_t<OtherDerType>> >(
|
||||
m_value - other.value(),
|
||||
m_derivatives - other.derivatives());
|
||||
}
|
||||
@@ -258,7 +260,7 @@ class AutoDiffScalar
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
|
||||
inline AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
|
||||
operator-() const
|
||||
{
|
||||
return AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >(
|
||||
@@ -266,13 +268,13 @@ class AutoDiffScalar
|
||||
-m_derivatives);
|
||||
}
|
||||
|
||||
inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
operator*(const Scalar& other) const
|
||||
{
|
||||
return MakeAutoDiffScalar(m_value * other, m_derivatives * other);
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
friend inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
operator*(const Scalar& other, const AutoDiffScalar& a)
|
||||
{
|
||||
return MakeAutoDiffScalar(a.value() * other, a.derivatives() * other);
|
||||
@@ -294,13 +296,13 @@ class AutoDiffScalar
|
||||
// a.derivatives() * other);
|
||||
// }
|
||||
|
||||
inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
operator/(const Scalar& other) const
|
||||
{
|
||||
return MakeAutoDiffScalar(m_value / other, (m_derivatives * (Scalar(1)/other)));
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
friend inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) >
|
||||
operator/(const Scalar& other, const AutoDiffScalar& a)
|
||||
{
|
||||
return MakeAutoDiffScalar(other / a.value(), a.derivatives() * (Scalar(-other) / (a.value()*a.value())));
|
||||
@@ -323,10 +325,10 @@ class AutoDiffScalar
|
||||
// }
|
||||
|
||||
template<typename OtherDerType>
|
||||
inline const AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
|
||||
CwiseBinaryOp<internal::scalar_difference_op<Scalar> EIGEN_COMMA
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) EIGEN_COMMA
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<OtherDerType>::type,Scalar,product) >,Scalar,product) >
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(internal::remove_all_t<OtherDerType>,Scalar,product) >,Scalar,product) >
|
||||
operator/(const AutoDiffScalar<OtherDerType>& other) const
|
||||
{
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
@@ -337,9 +339,9 @@ class AutoDiffScalar
|
||||
}
|
||||
|
||||
template<typename OtherDerType>
|
||||
inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
|
||||
inline AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product),
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<OtherDerType>::type,Scalar,product) > >
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(internal::remove_all_t<OtherDerType>,Scalar,product) > >
|
||||
operator*(const AutoDiffScalar<OtherDerType>& other) const
|
||||
{
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
@@ -387,7 +389,7 @@ struct auto_diff_special_op<DerivativeType, true>
|
||||
// : auto_diff_scalar_op<DerivativeType, typename NumTraits<Scalar>::Real,
|
||||
// is_same<Scalar,typename NumTraits<Scalar>::Real>::value>
|
||||
{
|
||||
typedef typename remove_all<DerivativeType>::type DerType;
|
||||
typedef remove_all_t<DerivativeType> DerType;
|
||||
typedef typename traits<DerType>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real Real;
|
||||
|
||||
@@ -405,12 +407,12 @@ struct auto_diff_special_op<DerivativeType, true>
|
||||
AutoDiffScalar<DerivativeType>& derived() { return *static_cast<AutoDiffScalar<DerivativeType>*>(this); }
|
||||
|
||||
|
||||
inline const AutoDiffScalar<DerType&> operator+(const Real& other) const
|
||||
inline AutoDiffScalar<DerType&> operator+(const Real& other) const
|
||||
{
|
||||
return AutoDiffScalar<DerType&>(derived().value() + other, derived().derivatives());
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<DerType&> operator+(const Real& a, const AutoDiffScalar<DerivativeType>& b)
|
||||
friend inline AutoDiffScalar<DerType&> operator+(const Real& a, const AutoDiffScalar<DerivativeType>& b)
|
||||
{
|
||||
return AutoDiffScalar<DerType&>(a + b.value(), b.derivatives());
|
||||
}
|
||||
@@ -422,7 +424,7 @@ struct auto_diff_special_op<DerivativeType, true>
|
||||
}
|
||||
|
||||
|
||||
inline const AutoDiffScalar<typename CwiseUnaryOp<bind2nd_op<scalar_product_op<Scalar,Real> >, DerType>::Type >
|
||||
inline AutoDiffScalar<typename CwiseUnaryOp<bind2nd_op<scalar_product_op<Scalar,Real> >, DerType>::Type >
|
||||
operator*(const Real& other) const
|
||||
{
|
||||
return AutoDiffScalar<typename CwiseUnaryOp<bind2nd_op<scalar_product_op<Scalar,Real> >, DerType>::Type >(
|
||||
@@ -430,7 +432,7 @@ struct auto_diff_special_op<DerivativeType, true>
|
||||
derived().derivatives() * other);
|
||||
}
|
||||
|
||||
friend inline const AutoDiffScalar<typename CwiseUnaryOp<bind1st_op<scalar_product_op<Real,Scalar> >, DerType>::Type >
|
||||
friend inline AutoDiffScalar<typename CwiseUnaryOp<bind1st_op<scalar_product_op<Real,Scalar> >, DerType>::Type >
|
||||
operator*(const Real& other, const AutoDiffScalar<DerivativeType>& a)
|
||||
{
|
||||
return AutoDiffScalar<typename CwiseUnaryOp<bind1st_op<scalar_product_op<Real,Scalar> >, DerType>::Type >(
|
||||
@@ -556,18 +558,18 @@ struct ScalarBinaryOpTraits<typename DerType::Scalar,AutoDiffScalar<DerType>, Bi
|
||||
|
||||
#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \
|
||||
template<typename DerType> \
|
||||
inline const Eigen::AutoDiffScalar< \
|
||||
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename Eigen::internal::remove_all<DerType>::type, typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar, product) > \
|
||||
inline Eigen::AutoDiffScalar< \
|
||||
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Eigen::internal::remove_all_t<DerType>, typename Eigen::internal::traits<Eigen::internal::remove_all_t<DerType>>::Scalar, product) > \
|
||||
FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
|
||||
using namespace Eigen; \
|
||||
typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
|
||||
typedef typename Eigen::internal::traits<Eigen::internal::remove_all_t<DerType>>::Scalar Scalar; \
|
||||
EIGEN_UNUSED_VARIABLE(sizeof(Scalar)); \
|
||||
CODE; \
|
||||
}
|
||||
|
||||
template<typename DerType>
|
||||
struct CleanedUpDerType {
|
||||
typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> type;
|
||||
typedef AutoDiffScalar<typename Eigen::internal::remove_all_t<DerType>::PlainObject> type;
|
||||
};
|
||||
|
||||
template<typename DerType>
|
||||
@@ -639,9 +641,9 @@ EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log,
|
||||
return Eigen::MakeAutoDiffScalar(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));)
|
||||
|
||||
template<typename DerType>
|
||||
inline const Eigen::AutoDiffScalar<
|
||||
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all<DerType>::type,typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar,product) >
|
||||
pow(const Eigen::AutoDiffScalar<DerType> &x, const typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar &y)
|
||||
inline Eigen::AutoDiffScalar<
|
||||
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(internal::remove_all_t<DerType>, typename internal::traits<internal::remove_all_t<DerType>>::Scalar,product) >
|
||||
pow(const Eigen::AutoDiffScalar<DerType> &x, const typename internal::traits<internal::remove_all_t<DerType>>::Scalar &y)
|
||||
{
|
||||
using namespace Eigen;
|
||||
using std::pow;
|
||||
@@ -650,11 +652,11 @@ pow(const Eigen::AutoDiffScalar<DerType> &x, const typename internal::traits<typ
|
||||
|
||||
|
||||
template<typename DerTypeA,typename DerTypeB>
|
||||
inline const AutoDiffScalar<Matrix<typename internal::traits<typename internal::remove_all<DerTypeA>::type>::Scalar,Dynamic,1> >
|
||||
inline AutoDiffScalar<Matrix<typename internal::traits<internal::remove_all_t<DerTypeA>>::Scalar,Dynamic,1> >
|
||||
atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
|
||||
{
|
||||
using std::atan2;
|
||||
typedef typename internal::traits<typename internal::remove_all<DerTypeA>::type>::Scalar Scalar;
|
||||
typedef typename internal::traits<internal::remove_all_t<DerTypeA>>::Scalar Scalar;
|
||||
typedef AutoDiffScalar<Matrix<Scalar,Dynamic,1> > PlainADS;
|
||||
PlainADS ret;
|
||||
ret.value() = atan2(a.value(), b.value());
|
||||
@@ -700,9 +702,9 @@ EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cosh,
|
||||
#undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY
|
||||
|
||||
template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
|
||||
: NumTraits< typename NumTraits<typename internal::remove_all<DerType>::type::Scalar>::Real >
|
||||
: NumTraits< typename NumTraits<typename internal::remove_all_t<DerType>::Scalar>::Real >
|
||||
{
|
||||
typedef typename internal::remove_all<DerType>::type DerTypeCleaned;
|
||||
typedef internal::remove_all_t<DerType> DerTypeCleaned;
|
||||
typedef AutoDiffScalar<Matrix<typename NumTraits<typename DerTypeCleaned::Scalar>::Real,DerTypeCleaned::RowsAtCompileTime,DerTypeCleaned::ColsAtCompileTime,
|
||||
0, DerTypeCleaned::MaxRowsAtCompileTime, DerTypeCleaned::MaxColsAtCompileTime> > Real;
|
||||
typedef AutoDiffScalar<DerType> NonInteger;
|
||||
@@ -713,6 +715,23 @@ template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
|
||||
};
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
template<typename DerivativeType>
|
||||
struct is_identically_zero_impl<AutoDiffScalar<DerivativeType>> {
|
||||
static inline bool run(const AutoDiffScalar<DerivativeType>& s)
|
||||
{
|
||||
const DerivativeType& derivatives = s.derivatives();
|
||||
for(int i=0; i<derivatives.size(); ++i)
|
||||
{
|
||||
if(!numext::is_exactly_zero(derivatives[i]))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return numext::is_exactly_zero(s.value());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
namespace std {
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_AUTODIFF_VECTOR_H
|
||||
#define EIGEN_AUTODIFF_VECTOR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/* \class AutoDiffScalar
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_AUTODIFF_MODULE_H
|
||||
#error "Please include unsupported/Eigen/AutoDiff instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_BVALGORITHMS_H
|
||||
#define EIGEN_BVALGORITHMS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_BVH_MODULE_H
|
||||
#error "Please include unsupported/Eigen/BVH instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef KDBVH_H_INCLUDED
|
||||
#define KDBVH_H_INCLUDED
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -55,23 +57,23 @@ struct get_boxes_helper<ObjectList, VolumeList, int> {
|
||||
/** \class KdBVH
|
||||
* \brief A simple bounding volume hierarchy based on AlignedBox
|
||||
*
|
||||
* \param _Scalar The underlying scalar type of the bounding boxes
|
||||
* \param _Dim The dimension of the space in which the hierarchy lives
|
||||
* \param Scalar_ The underlying scalar type of the bounding boxes
|
||||
* \param Dim_ The dimension of the space in which the hierarchy lives
|
||||
* \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must
|
||||
* be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer.
|
||||
* be defined and return an AlignedBox<Scalar_, Dim_> or bounding boxes must be provided to the tree initializer.
|
||||
*
|
||||
* This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree.
|
||||
* Given a sequence of objects, it computes their bounding boxes, constructs a Kd-tree of their centers
|
||||
* and builds a BVH with the structure of that Kd-tree. When the elements of the tree are too expensive to be copied around,
|
||||
* it is useful for _Object to be a pointer.
|
||||
*/
|
||||
template<typename _Scalar, int _Dim, typename _Object> class KdBVH
|
||||
template<typename Scalar_, int Dim_, typename _Object> class KdBVH
|
||||
{
|
||||
public:
|
||||
enum { Dim = _Dim };
|
||||
enum { Dim = Dim_ };
|
||||
typedef _Object Object;
|
||||
typedef std::vector<Object, aligned_allocator<Object> > ObjectList;
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef AlignedBox<Scalar, Dim> Volume;
|
||||
typedef std::vector<Volume, aligned_allocator<Volume> > VolumeList;
|
||||
typedef int Index;
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
|
||||
#include "../../../../Eigen/Dense"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_EIGENVALUES_MODULE_H
|
||||
#error "Please include unsupported/Eigen/Eigenvalues instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_EULERANGLESCLASS_H// TODO: Fix previous "EIGEN_EULERANGLES_H" definition?
|
||||
#define EIGEN_EULERANGLESCLASS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
/** \class EulerAngles
|
||||
@@ -92,18 +94,18 @@ namespace Eigen
|
||||
*
|
||||
* More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the angles.
|
||||
* \tparam Scalar_ the scalar type, i.e. the type of the angles.
|
||||
*
|
||||
* \tparam _System the EulerSystem to use, which represents the axes of rotation.
|
||||
*/
|
||||
template <typename _Scalar, class _System>
|
||||
class EulerAngles : public RotationBase<EulerAngles<_Scalar, _System>, 3>
|
||||
template <typename Scalar_, class _System>
|
||||
class EulerAngles : public RotationBase<EulerAngles<Scalar_, _System>, 3>
|
||||
{
|
||||
public:
|
||||
typedef RotationBase<EulerAngles<_Scalar, _System>, 3> Base;
|
||||
typedef RotationBase<EulerAngles<Scalar_, _System>, 3> Base;
|
||||
|
||||
/** the scalar type of the angles */
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** the EulerSystem to use, which represents the axes of rotation. */
|
||||
@@ -322,10 +324,10 @@ EIGEN_EULER_ANGLES_TYPEDEFS(double, d)
|
||||
|
||||
namespace internal
|
||||
{
|
||||
template<typename _Scalar, class _System>
|
||||
struct traits<EulerAngles<_Scalar, _System> >
|
||||
template<typename Scalar_, class _System>
|
||||
struct traits<EulerAngles<Scalar_, _System> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
};
|
||||
|
||||
// set from a rotation matrix
|
||||
|
||||
@@ -10,10 +10,12 @@
|
||||
#ifndef EIGEN_EULERSYSTEM_H
|
||||
#define EIGEN_EULERSYSTEM_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
// Forward declarations
|
||||
template <typename _Scalar, class _System>
|
||||
template <typename Scalar_, class _System>
|
||||
class EulerAngles;
|
||||
|
||||
namespace internal
|
||||
@@ -130,13 +132,13 @@ namespace Eigen
|
||||
// that enum is not guerantee to support negative numbers
|
||||
|
||||
/** The first rotation axis */
|
||||
static const int AlphaAxis = _AlphaAxis;
|
||||
static constexpr int AlphaAxis = _AlphaAxis;
|
||||
|
||||
/** The second rotation axis */
|
||||
static const int BetaAxis = _BetaAxis;
|
||||
static constexpr int BetaAxis = _BetaAxis;
|
||||
|
||||
/** The third rotation axis */
|
||||
static const int GammaAxis = _GammaAxis;
|
||||
static constexpr int GammaAxis = _GammaAxis;
|
||||
|
||||
enum
|
||||
{
|
||||
@@ -260,7 +262,7 @@ namespace Eigen
|
||||
{
|
||||
CalcEulerAngles_imp(
|
||||
res.angles(), mat,
|
||||
typename internal::conditional<IsTaitBryan, internal::true_type, internal::false_type>::type());
|
||||
std::conditional_t<IsTaitBryan, internal::true_type, internal::false_type>());
|
||||
|
||||
if (IsAlphaOpposite)
|
||||
res.alpha() = -res.alpha();
|
||||
@@ -272,7 +274,7 @@ namespace Eigen
|
||||
res.gamma() = -res.gamma();
|
||||
}
|
||||
|
||||
template <typename _Scalar, class _System>
|
||||
template <typename Scalar_, class _System>
|
||||
friend class Eigen::EulerAngles;
|
||||
|
||||
template<typename System,
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_EULERANGLES_MODULE_H
|
||||
#error "Please include unsupported/Eigen/EulerAngles instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_FFT_MODULE_H
|
||||
#error "Please include unsupported/Eigen/FFT instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -7,6 +7,8 @@
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -173,10 +175,10 @@ namespace internal {
|
||||
}
|
||||
};
|
||||
|
||||
template <typename _Scalar>
|
||||
template <typename Scalar_>
|
||||
struct fftw_impl
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
|
||||
inline
|
||||
|
||||
288
libs/eigen/unsupported/Eigen/src/FFT/ei_imklfft_impl.h
Normal file
288
libs/eigen/unsupported/Eigen/src/FFT/ei_imklfft_impl.h
Normal file
@@ -0,0 +1,288 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include <mkl_dfti.h>
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
#include <complex>
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
namespace imklfft {
|
||||
|
||||
#define RUN_OR_ASSERT(EXPR, ERROR_MSG) \
|
||||
{ \
|
||||
MKL_LONG status = (EXPR); \
|
||||
eigen_assert(status == DFTI_NO_ERROR && (ERROR_MSG)); \
|
||||
};
|
||||
|
||||
inline MKL_Complex16* complex_cast(const std::complex<double>* p) {
|
||||
return const_cast<MKL_Complex16*>(reinterpret_cast<const MKL_Complex16*>(p));
|
||||
}
|
||||
|
||||
inline MKL_Complex8* complex_cast(const std::complex<float>* p) {
|
||||
return const_cast<MKL_Complex8*>(reinterpret_cast<const MKL_Complex8*>(p));
|
||||
}
|
||||
|
||||
/*
|
||||
* Parameters:
|
||||
* precision: enum, Precision of the transform: DFTI_SINGLE or DFTI_DOUBLE.
|
||||
* forward_domain: enum, Forward domain of the transform: DFTI_COMPLEX or
|
||||
* DFTI_REAL. dimension: MKL_LONG Dimension of the transform. sizes: MKL_LONG if
|
||||
* dimension = 1.Length of the transform for a one-dimensional transform. sizes:
|
||||
* Array of type MKL_LONG otherwise. Lengths of each dimension for a
|
||||
* multi-dimensional transform.
|
||||
*/
|
||||
inline void configure_descriptor(DFTI_DESCRIPTOR_HANDLE* handl,
|
||||
enum DFTI_CONFIG_VALUE precision,
|
||||
enum DFTI_CONFIG_VALUE forward_domain,
|
||||
MKL_LONG dimension, MKL_LONG* sizes) {
|
||||
eigen_assert(dimension == 1 ||
|
||||
dimension == 2 &&
|
||||
"Transformation dimension must be less than 3.");
|
||||
|
||||
if (dimension == 1) {
|
||||
RUN_OR_ASSERT(DftiCreateDescriptor(handl, precision, forward_domain,
|
||||
dimension, *sizes),
|
||||
"DftiCreateDescriptor failed.")
|
||||
if (forward_domain == DFTI_REAL) {
|
||||
// Set CCE storage
|
||||
RUN_OR_ASSERT(DftiSetValue(*handl, DFTI_CONJUGATE_EVEN_STORAGE,
|
||||
DFTI_COMPLEX_COMPLEX),
|
||||
"DftiSetValue failed.")
|
||||
}
|
||||
} else {
|
||||
RUN_OR_ASSERT(
|
||||
DftiCreateDescriptor(handl, precision, DFTI_COMPLEX, dimension, sizes),
|
||||
"DftiCreateDescriptor failed.")
|
||||
}
|
||||
|
||||
RUN_OR_ASSERT(DftiSetValue(*handl, DFTI_PLACEMENT, DFTI_NOT_INPLACE),
|
||||
"DftiSetValue failed.")
|
||||
RUN_OR_ASSERT(DftiCommitDescriptor(*handl), "DftiCommitDescriptor failed.")
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct plan {};
|
||||
|
||||
template <>
|
||||
struct plan<float> {
|
||||
typedef float scalar_type;
|
||||
typedef MKL_Complex8 complex_type;
|
||||
|
||||
DFTI_DESCRIPTOR_HANDLE m_plan;
|
||||
|
||||
plan() : m_plan(0) {}
|
||||
~plan() {
|
||||
if (m_plan) DftiFreeDescriptor(&m_plan);
|
||||
};
|
||||
|
||||
enum DFTI_CONFIG_VALUE precision = DFTI_SINGLE;
|
||||
|
||||
inline void forward(complex_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse(complex_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
|
||||
inline void forward(complex_type* dst, scalar_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_REAL, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse(scalar_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_REAL, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
|
||||
inline void forward2(complex_type* dst, complex_type* src, int n0, int n1) {
|
||||
if (m_plan == 0) {
|
||||
MKL_LONG sizes[2] = {n0, n1};
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 2, sizes);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse2(complex_type* dst, complex_type* src, int n0, int n1) {
|
||||
if (m_plan == 0) {
|
||||
MKL_LONG sizes[2] = {n0, n1};
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 2, sizes);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct plan<double> {
|
||||
typedef double scalar_type;
|
||||
typedef MKL_Complex16 complex_type;
|
||||
|
||||
DFTI_DESCRIPTOR_HANDLE m_plan;
|
||||
|
||||
plan() : m_plan(0) {}
|
||||
~plan() {
|
||||
if (m_plan) DftiFreeDescriptor(&m_plan);
|
||||
};
|
||||
|
||||
enum DFTI_CONFIG_VALUE precision = DFTI_DOUBLE;
|
||||
|
||||
inline void forward(complex_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse(complex_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
|
||||
inline void forward(complex_type* dst, scalar_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_REAL, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse(scalar_type* dst, complex_type* src, MKL_LONG nfft) {
|
||||
if (m_plan == 0) {
|
||||
configure_descriptor(&m_plan, precision, DFTI_REAL, 1, &nfft);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
|
||||
inline void forward2(complex_type* dst, complex_type* src, int n0, int n1) {
|
||||
if (m_plan == 0) {
|
||||
MKL_LONG sizes[2] = {n0, n1};
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 2, sizes);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeForward(m_plan, src, dst),
|
||||
"DftiComputeForward failed.")
|
||||
}
|
||||
|
||||
inline void inverse2(complex_type* dst, complex_type* src, int n0, int n1) {
|
||||
if (m_plan == 0) {
|
||||
MKL_LONG sizes[2] = {n0, n1};
|
||||
configure_descriptor(&m_plan, precision, DFTI_COMPLEX, 2, sizes);
|
||||
}
|
||||
RUN_OR_ASSERT(DftiComputeBackward(m_plan, src, dst),
|
||||
"DftiComputeBackward failed.")
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar_>
|
||||
struct imklfft_impl {
|
||||
typedef Scalar_ Scalar;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
|
||||
inline void clear() { m_plans.clear(); }
|
||||
|
||||
// complex-to-complex forward FFT
|
||||
inline void fwd(Complex* dst, const Complex* src, int nfft) {
|
||||
MKL_LONG size = nfft;
|
||||
get_plan(nfft, dst, src)
|
||||
.forward(complex_cast(dst), complex_cast(src), size);
|
||||
}
|
||||
|
||||
// real-to-complex forward FFT
|
||||
inline void fwd(Complex* dst, const Scalar* src, int nfft) {
|
||||
MKL_LONG size = nfft;
|
||||
get_plan(nfft, dst, src)
|
||||
.forward(complex_cast(dst), const_cast<Scalar*>(src), nfft);
|
||||
}
|
||||
|
||||
// 2-d complex-to-complex
|
||||
inline void fwd2(Complex* dst, const Complex* src, int n0, int n1) {
|
||||
get_plan(n0, n1, dst, src)
|
||||
.forward2(complex_cast(dst), complex_cast(src), n0, n1);
|
||||
}
|
||||
|
||||
// inverse complex-to-complex
|
||||
inline void inv(Complex* dst, const Complex* src, int nfft) {
|
||||
MKL_LONG size = nfft;
|
||||
get_plan(nfft, dst, src)
|
||||
.inverse(complex_cast(dst), complex_cast(src), nfft);
|
||||
}
|
||||
|
||||
// half-complex to scalar
|
||||
inline void inv(Scalar* dst, const Complex* src, int nfft) {
|
||||
MKL_LONG size = nfft;
|
||||
get_plan(nfft, dst, src)
|
||||
.inverse(const_cast<Scalar*>(dst), complex_cast(src), nfft);
|
||||
}
|
||||
|
||||
// 2-d complex-to-complex
|
||||
inline void inv2(Complex* dst, const Complex* src, int n0, int n1) {
|
||||
get_plan(n0, n1, dst, src)
|
||||
.inverse2(complex_cast(dst), complex_cast(src), n0, n1);
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<int64_t, plan<Scalar>> m_plans;
|
||||
|
||||
inline plan<Scalar>& get_plan(int nfft, void* dst,
|
||||
const void* src) {
|
||||
int inplace = dst == src ? 1 : 0;
|
||||
int aligned = ((reinterpret_cast<size_t>(src) & 15) |
|
||||
(reinterpret_cast<size_t>(dst) & 15)) == 0
|
||||
? 1
|
||||
: 0;
|
||||
int64_t key = ((nfft << 2) | (inplace << 1) | aligned)
|
||||
<< 1;
|
||||
|
||||
// Create element if key does not exist.
|
||||
return m_plans[key];
|
||||
}
|
||||
|
||||
inline plan<Scalar>& get_plan(int n0, int n1, void* dst,
|
||||
const void* src) {
|
||||
int inplace = (dst == src) ? 1 : 0;
|
||||
int aligned = ((reinterpret_cast<size_t>(src) & 15) |
|
||||
(reinterpret_cast<size_t>(dst) & 15)) == 0
|
||||
? 1
|
||||
: 0;
|
||||
int64_t key = (((((int64_t)n0) << 31) | (n1 << 2) |
|
||||
(inplace << 1) | aligned)
|
||||
<< 1) +
|
||||
1;
|
||||
|
||||
// Create element if key does not exist.
|
||||
return m_plans[key];
|
||||
}
|
||||
};
|
||||
|
||||
#undef RUN_OR_ASSERT
|
||||
|
||||
} // namespace imklfft
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
@@ -7,6 +7,8 @@
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -14,10 +16,10 @@ namespace internal {
|
||||
// This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft
|
||||
// Copyright 2003-2009 Mark Borgerding
|
||||
|
||||
template <typename _Scalar>
|
||||
template <typename Scalar_>
|
||||
struct kiss_cpx_fft
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
std::vector<Complex> m_twiddles;
|
||||
std::vector<int> m_stageRadix;
|
||||
@@ -90,9 +92,9 @@ struct kiss_cpx_fft
|
||||
}while(n>1);
|
||||
}
|
||||
|
||||
template <typename _Src>
|
||||
template <typename Src_>
|
||||
inline
|
||||
void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride)
|
||||
void work( int stage,Complex * xout, const Src_ * xin, size_t fstride,size_t in_stride)
|
||||
{
|
||||
int p = m_stageRadix[stage];
|
||||
int m = m_stageRemainder[stage];
|
||||
@@ -292,10 +294,10 @@ struct kiss_cpx_fft
|
||||
}
|
||||
};
|
||||
|
||||
template <typename _Scalar>
|
||||
template <typename Scalar_>
|
||||
struct kissfft_impl
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
|
||||
void clear()
|
||||
|
||||
69
libs/eigen/unsupported/Eigen/src/FFT/ei_pocketfft_impl.h
Normal file
69
libs/eigen/unsupported/Eigen/src/FFT/ei_pocketfft_impl.h
Normal file
@@ -0,0 +1,69 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
using namespace pocketfft;
|
||||
using namespace pocketfft::detail;
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar>
|
||||
struct pocketfft_impl
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef std::complex<Scalar> Complex;
|
||||
|
||||
inline void clear() {}
|
||||
|
||||
inline void fwd(Complex* dst, const Scalar* src, int nfft){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft) };
|
||||
const shape_t axes_{ 0 };
|
||||
const stride_t stride_in{ sizeof(Scalar) };
|
||||
const stride_t stride_out{ sizeof(Complex) };
|
||||
r2c(shape_, stride_in, stride_out, axes_, FORWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
|
||||
inline void fwd(Complex* dst, const Complex* src, int nfft){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft) };
|
||||
const shape_t axes_{ 0 };
|
||||
const stride_t stride_{ sizeof(Complex) };
|
||||
c2c(shape_, stride_, stride_, axes_, FORWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
|
||||
inline void inv(Scalar* dst, const Complex* src, int nfft){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft) };
|
||||
const shape_t axes_{ 0 };
|
||||
const stride_t stride_in{ sizeof(Complex) };
|
||||
const stride_t stride_out{ sizeof(Scalar) };
|
||||
c2r(shape_, stride_in, stride_out, axes_, BACKWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
|
||||
inline void inv(Complex* dst, const Complex* src, int nfft){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft) };
|
||||
const shape_t axes_{ 0 };
|
||||
const stride_t stride_{ sizeof(Complex) };
|
||||
c2c(shape_, stride_, stride_, axes_, BACKWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
|
||||
inline void fwd2(Complex* dst, const Complex* src, int nfft0, int nfft1){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft0), static_cast<size_t>(nfft1) };
|
||||
const shape_t axes_{ 0, 1 };
|
||||
const stride_t stride_{ static_cast<ptrdiff_t>(sizeof(Complex)*nfft1), static_cast<ptrdiff_t>(sizeof(Complex)) };
|
||||
c2c(shape_, stride_, stride_, axes_, FORWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
|
||||
inline void inv2(Complex* dst, const Complex* src, int nfft0, int nfft1){
|
||||
const shape_t shape_{ static_cast<size_t>(nfft0), static_cast<size_t>(nfft1) };
|
||||
const shape_t axes_{ 0, 1 };
|
||||
const stride_t stride_{ static_cast<ptrdiff_t>(sizeof(Complex)*nfft1), static_cast<ptrdiff_t>(sizeof(Complex)) };
|
||||
c2c(shape_, stride_, stride_, axes_, BACKWARD, src, dst, static_cast<Scalar>(1));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
339
libs/eigen/unsupported/Eigen/src/IterativeSolvers/BiCGSTABL.h
Normal file
339
libs/eigen/unsupported/Eigen/src/IterativeSolvers/BiCGSTABL.h
Normal file
@@ -0,0 +1,339 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2020 Chris Schoutrop <c.e.m.schoutrop@tue.nl>
|
||||
// Copyright (C) 2020 Jens Wehner <j.wehner@esciencecenter.nl>
|
||||
// Copyright (C) 2020 Jan van Dijk <j.v.dijk@tue.nl>
|
||||
// Copyright (C) 2020 Adithya Vijaykumar
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
/*
|
||||
|
||||
This implementation of BiCGStab(L) is based on the papers
|
||||
General algorithm:
|
||||
1. G.L.G. Sleijpen, D.R. Fokkema. (1993). BiCGstab(l) for linear equations
|
||||
involving unsymmetric matrices with complex spectrum. Electronic Transactions
|
||||
on Numerical Analysis. Polynomial step update:
|
||||
2. G.L.G. Sleijpen, M.B. Van Gijzen. (2010) Exploiting BiCGstab(l)
|
||||
strategies to induce dimension reduction SIAM Journal on Scientific Computing.
|
||||
3. Fokkema, Diederik R. Enhanced implementation of BiCGstab (l) for
|
||||
solving linear systems of equations. Universiteit Utrecht. Mathematisch
|
||||
Instituut, 1996
|
||||
4. Sleijpen, G. L., & van der Vorst, H. A. (1996). Reliable updated
|
||||
residuals in hybrid Bi-CG methods. Computing, 56(2), 141-163.
|
||||
*/
|
||||
|
||||
#ifndef EIGEN_BICGSTABL_H
|
||||
#define EIGEN_BICGSTABL_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
/** \internal Low-level bi conjugate gradient stabilized algorithm with L
|
||||
additional residual minimization steps \param mat The matrix A \param rhs The
|
||||
right hand side vector b \param x On input and initial solution, on output
|
||||
the computed solution. \param precond A preconditioner being able to
|
||||
efficiently solve for an approximation of Ax=b (regardless of b) \param iters
|
||||
On input the max number of iteration, on output the number of performed
|
||||
iterations. \param tol_error On input the tolerance error, on output an
|
||||
estimation of the relative error. \param L On input Number of additional
|
||||
GMRES steps to take. If L is too large (~20) instabilities occur. \return
|
||||
false in the case of numerical issue, for example a break down of BiCGSTABL.
|
||||
*/
|
||||
template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool bicgstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters,
|
||||
typename Dest::RealScalar &tol_error, Index L) {
|
||||
using numext::abs;
|
||||
using numext::sqrt;
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
const Index N = rhs.size();
|
||||
L = L < x.rows() ? L : x.rows();
|
||||
|
||||
Index k = 0;
|
||||
|
||||
const RealScalar tol = tol_error;
|
||||
const Index maxIters = iters;
|
||||
|
||||
typedef Matrix<Scalar, Dynamic, 1> VectorType;
|
||||
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
|
||||
|
||||
DenseMatrixType rHat(N, L + 1);
|
||||
DenseMatrixType uHat(N, L + 1);
|
||||
|
||||
// We start with an initial guess x_0 and let us set r_0 as (residual
|
||||
// calculated from x_0)
|
||||
VectorType x0 = x;
|
||||
rHat.col(0) = rhs - mat * x0; // r_0
|
||||
|
||||
x.setZero(); // This will contain the updates to the solution.
|
||||
// rShadow is arbritary, but must never be orthogonal to any residual.
|
||||
VectorType rShadow = VectorType::Random(N);
|
||||
|
||||
VectorType x_prime = x;
|
||||
|
||||
// Redundant: x is already set to 0
|
||||
// x.setZero();
|
||||
VectorType b_prime = rHat.col(0);
|
||||
|
||||
// Other vectors and scalars initialization
|
||||
Scalar rho0 = 1.0;
|
||||
Scalar alpha = 0.0;
|
||||
Scalar omega = 1.0;
|
||||
|
||||
uHat.col(0).setZero();
|
||||
|
||||
bool bicg_convergence = false;
|
||||
|
||||
const RealScalar normb = rhs.stableNorm();
|
||||
if (internal::isApprox(normb, RealScalar(0))) {
|
||||
x.setZero();
|
||||
iters = 0;
|
||||
return true;
|
||||
}
|
||||
RealScalar normr = rHat.col(0).stableNorm();
|
||||
RealScalar Mx = normr;
|
||||
RealScalar Mr = normr;
|
||||
|
||||
// Keep track of the solution with the lowest residual
|
||||
RealScalar normr_min = normr;
|
||||
VectorType x_min = x_prime + x;
|
||||
|
||||
// Criterion for when to apply the group-wise update, conform ref 3.
|
||||
const RealScalar delta = 0.01;
|
||||
|
||||
bool compute_res = false;
|
||||
bool update_app = false;
|
||||
|
||||
while (normr > tol * normb && k < maxIters) {
|
||||
rho0 *= -omega;
|
||||
|
||||
for (Index j = 0; j < L; ++j) {
|
||||
const Scalar rho1 = rShadow.dot(rHat.col(j));
|
||||
|
||||
if (!(numext::isfinite)(rho1) || rho0 == RealScalar(0.0)) {
|
||||
// We cannot continue computing, return the best solution found.
|
||||
x += x_prime;
|
||||
|
||||
// Check if x is better than the best stored solution thus far.
|
||||
normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
|
||||
|
||||
if (normr > normr_min || !(numext::isfinite)(normr)) {
|
||||
// x_min is a better solution than x, return x_min
|
||||
x = x_min;
|
||||
normr = normr_min;
|
||||
}
|
||||
tol_error = normr / normb;
|
||||
iters = k;
|
||||
// x contains the updates to x0, add those back to obtain the solution
|
||||
x = precond.solve(x);
|
||||
x += x0;
|
||||
return (normr < tol * normb);
|
||||
}
|
||||
|
||||
const Scalar beta = alpha * (rho1 / rho0);
|
||||
rho0 = rho1;
|
||||
// Update search directions
|
||||
uHat.leftCols(j + 1) = rHat.leftCols(j + 1) - beta * uHat.leftCols(j + 1);
|
||||
uHat.col(j + 1) = mat * precond.solve(uHat.col(j));
|
||||
const Scalar sigma = rShadow.dot(uHat.col(j + 1));
|
||||
alpha = rho1 / sigma;
|
||||
// Update residuals
|
||||
rHat.leftCols(j + 1) -= alpha * uHat.middleCols(1, j + 1);
|
||||
rHat.col(j + 1) = mat * precond.solve(rHat.col(j));
|
||||
// Complete BiCG iteration by updating x
|
||||
x += alpha * uHat.col(0);
|
||||
normr = rHat.col(0).stableNorm();
|
||||
// Check for early exit
|
||||
if (normr < tol * normb) {
|
||||
/*
|
||||
Convergence was achieved during BiCG step.
|
||||
Without this check BiCGStab(L) fails for trivial matrices, such as
|
||||
when the preconditioner already is the inverse, or the input matrix is
|
||||
identity.
|
||||
*/
|
||||
bicg_convergence = true;
|
||||
break;
|
||||
} else if (normr < normr_min) {
|
||||
// We found an x with lower residual, keep this one.
|
||||
x_min = x + x_prime;
|
||||
normr_min = normr;
|
||||
}
|
||||
}
|
||||
if (!bicg_convergence) {
|
||||
/*
|
||||
The polynomial/minimize residual step.
|
||||
|
||||
QR Householder method for argmin is more stable than (modified)
|
||||
Gram-Schmidt, in the sense that there is less loss of orthogonality. It
|
||||
is more accurate than solving the normal equations, since the normal
|
||||
equations scale with condition number squared.
|
||||
*/
|
||||
const VectorType gamma = rHat.rightCols(L).householderQr().solve(rHat.col(0));
|
||||
x += rHat.leftCols(L) * gamma;
|
||||
rHat.col(0) -= rHat.rightCols(L) * gamma;
|
||||
uHat.col(0) -= uHat.rightCols(L) * gamma;
|
||||
normr = rHat.col(0).stableNorm();
|
||||
omega = gamma(L - 1);
|
||||
}
|
||||
if (normr < normr_min) {
|
||||
// We found an x with lower residual, keep this one.
|
||||
x_min = x + x_prime;
|
||||
normr_min = normr;
|
||||
}
|
||||
|
||||
k++;
|
||||
|
||||
/*
|
||||
Reliable update part
|
||||
|
||||
The recursively computed residual can deviate from the actual residual
|
||||
after several iterations. However, computing the residual from the
|
||||
definition costs extra MVs and should not be done at each iteration. The
|
||||
reliable update strategy computes the true residual from the definition:
|
||||
r=b-A*x at strategic intervals. Furthermore a "group wise update" strategy
|
||||
is used to combine updates, which improves accuracy.
|
||||
*/
|
||||
|
||||
// Maximum norm of residuals since last update of x.
|
||||
Mx = numext::maxi(Mx, normr);
|
||||
// Maximum norm of residuals since last computation of the true residual.
|
||||
Mr = numext::maxi(Mr, normr);
|
||||
|
||||
if (normr < delta * normb && normb <= Mx) {
|
||||
update_app = true;
|
||||
}
|
||||
|
||||
if (update_app || (normr < delta * Mr && normb <= Mr)) {
|
||||
compute_res = true;
|
||||
}
|
||||
|
||||
if (bicg_convergence) {
|
||||
update_app = true;
|
||||
compute_res = true;
|
||||
bicg_convergence = false;
|
||||
}
|
||||
|
||||
if (compute_res) {
|
||||
// Explicitly compute residual from the definition
|
||||
|
||||
// This is equivalent to the shifted version of rhs - mat *
|
||||
// (precond.solve(x)+x0)
|
||||
rHat.col(0) = b_prime - mat * precond.solve(x);
|
||||
normr = rHat.col(0).stableNorm();
|
||||
Mr = normr;
|
||||
|
||||
if (update_app) {
|
||||
// After the group wise update, the original problem is translated to a
|
||||
// shifted one.
|
||||
x_prime += x;
|
||||
x.setZero();
|
||||
b_prime = rHat.col(0);
|
||||
Mx = normr;
|
||||
}
|
||||
}
|
||||
if (normr < normr_min) {
|
||||
// We found an x with lower residual, keep this one.
|
||||
x_min = x + x_prime;
|
||||
normr_min = normr;
|
||||
}
|
||||
|
||||
compute_res = false;
|
||||
update_app = false;
|
||||
}
|
||||
|
||||
// Convert internal variable to the true solution vector x
|
||||
x += x_prime;
|
||||
|
||||
normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
|
||||
if (normr > normr_min || !(numext::isfinite)(normr)) {
|
||||
// x_min is a better solution than x, return x_min
|
||||
x = x_min;
|
||||
normr = normr_min;
|
||||
}
|
||||
tol_error = normr / normb;
|
||||
iters = k;
|
||||
|
||||
// x contains the updates to x0, add those back to obtain the solution
|
||||
x = precond.solve(x);
|
||||
x += x0;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar>>
|
||||
class BiCGSTABL;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
struct traits<Eigen::BiCGSTABL<MatrixType_, Preconditioner_>> {
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
class BiCGSTABL : public IterativeSolverBase<BiCGSTABL<MatrixType_, Preconditioner_>> {
|
||||
typedef IterativeSolverBase<BiCGSTABL> Base;
|
||||
using Base::m_error;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
using Base::m_iterations;
|
||||
using Base::matrix;
|
||||
Index m_L;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
|
||||
/** Default constructor. */
|
||||
BiCGSTABL() : m_L(2) {}
|
||||
|
||||
/**
|
||||
Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
|
||||
This constructor is a shortcut for the default constructor followed
|
||||
by a call to compute().
|
||||
|
||||
\warning this class stores a reference to the matrix A as well as some
|
||||
precomputed values that depend on it. Therefore, if \a A is changed
|
||||
this class becomes invalid. Call compute() to update it with the new
|
||||
matrix A, or modify a copy of A.
|
||||
*/
|
||||
template <typename MatrixDerived>
|
||||
explicit BiCGSTABL(const EigenBase<MatrixDerived> &A) : Base(A.derived()), m_L(2) {}
|
||||
|
||||
/** \internal */
|
||||
/** Loops over the number of columns of b and does the following:
|
||||
1. sets the tolerence and maxIterations
|
||||
2. Calls the function that has the core solver routine
|
||||
*/
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_vector_with_guess_impl(const Rhs &b, Dest &x) const {
|
||||
m_iterations = Base::maxIterations();
|
||||
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
bool ret = internal::bicgstabl(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_L);
|
||||
m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
|
||||
}
|
||||
|
||||
/** Sets the parameter L, indicating how many minimize residual steps are
|
||||
* used. Default: 2 */
|
||||
void setL(Index L) {
|
||||
eigen_assert(L >= 1 && "L needs to be positive");
|
||||
m_L = L;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif /* EIGEN_BICGSTABL_H */
|
||||
@@ -33,6 +33,8 @@
|
||||
|
||||
#include "../../../../Eigen/Core"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -163,7 +165,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
||||
p = z + gamma*p;
|
||||
|
||||
++iter;
|
||||
// one dimensionnal optimization
|
||||
// one dimensional optimization
|
||||
q = A * p;
|
||||
lambda = rho / q.dot(p);
|
||||
for (Index i = 0; i < C.rows(); ++i)
|
||||
|
||||
@@ -12,19 +12,21 @@
|
||||
|
||||
#include "../../../../Eigen/Eigenvalues"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template< typename _MatrixType,
|
||||
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
||||
template< typename MatrixType_,
|
||||
typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar> >
|
||||
class DGMRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
struct traits<DGMRES<_MatrixType,_Preconditioner> >
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
struct traits<DGMRES<MatrixType_,Preconditioner_> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
/** \brief Computes a permutation vector to have a sorted sequence
|
||||
@@ -68,8 +70,8 @@ void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::
|
||||
* the IncompleteLUT for instance. The preconditioner is applied
|
||||
* at right of the matrix and the combination is multiplicative.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
* Typical usage :
|
||||
* \code
|
||||
* SparseMatrix<double> A;
|
||||
@@ -97,8 +99,8 @@ void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::
|
||||
|
||||
*
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
class DGMRES : public IterativeSolverBase<DGMRES<MatrixType_,Preconditioner_> >
|
||||
{
|
||||
typedef IterativeSolverBase<DGMRES> Base;
|
||||
using Base::matrix;
|
||||
@@ -110,11 +112,11 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
using Base::_solve_with_guess_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
typedef Matrix<RealScalar,Dynamic,Dynamic> DenseRealMatrix;
|
||||
typedef Matrix<Scalar,Dynamic,1> DenseVector;
|
||||
@@ -223,9 +225,9 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
|
||||
* A right preconditioner is used combined with deflation.
|
||||
*
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
template<typename Rhs, typename Dest>
|
||||
void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
|
||||
void DGMRES<MatrixType_, Preconditioner_>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond) const
|
||||
{
|
||||
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
|
||||
@@ -281,9 +283,9 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rh
|
||||
* \param normRhs The norm of the right hand side vector
|
||||
* \param nbIts The number of iterations
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
template<typename Dest>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const
|
||||
Index DGMRES<MatrixType_, Preconditioner_>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const
|
||||
{
|
||||
//Initialization
|
||||
DenseVector g(m_restart+1); // Right hand side of the least square problem
|
||||
@@ -374,8 +376,8 @@ Index DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, c
|
||||
}
|
||||
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
void DGMRES<MatrixType_, Preconditioner_>::dgmresInitDeflation(Index& rows) const
|
||||
{
|
||||
m_U.resize(rows, m_maxNeig);
|
||||
m_MU.resize(rows, m_maxNeig);
|
||||
@@ -384,14 +386,14 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) cons
|
||||
m_isDeflAllocated = true;
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
inline typename DGMRES<MatrixType_, Preconditioner_>::ComplexVector DGMRES<MatrixType_, Preconditioner_>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
|
||||
{
|
||||
return schurofH.matrixT().diagonal();
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
inline typename DGMRES<MatrixType_, Preconditioner_>::ComplexVector DGMRES<MatrixType_, Preconditioner_>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
|
||||
{
|
||||
const DenseMatrix& T = schurofH.matrixT();
|
||||
Index it = T.rows();
|
||||
@@ -415,11 +417,11 @@ inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_Matr
|
||||
return eig;
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
Index DGMRES<MatrixType_, Preconditioner_>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
|
||||
{
|
||||
// First, find the Schur form of the Hessenberg matrix H
|
||||
typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
|
||||
std::conditional_t<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> > schurofH;
|
||||
bool computeU = true;
|
||||
DenseMatrix matrixQ(it,it);
|
||||
matrixQ.setIdentity();
|
||||
@@ -498,9 +500,9 @@ Index DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const Mat
|
||||
m_isDeflInitialized = true;
|
||||
return 0;
|
||||
}
|
||||
template<typename _MatrixType, typename _Preconditioner>
|
||||
template<typename MatrixType_, typename Preconditioner_>
|
||||
template<typename RhsType, typename DestType>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
|
||||
Index DGMRES<MatrixType_, Preconditioner_>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
|
||||
{
|
||||
DenseVector x1 = m_U.leftCols(m_r).transpose() * x;
|
||||
y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_GMRES_H
|
||||
#define EIGEN_GMRES_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -216,17 +218,17 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
|
||||
}
|
||||
|
||||
template< typename _MatrixType,
|
||||
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
||||
template< typename MatrixType_,
|
||||
typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar> >
|
||||
class GMRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
struct traits<GMRES<_MatrixType,_Preconditioner> >
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
struct traits<GMRES<MatrixType_,Preconditioner_> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -237,8 +239,8 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
|
||||
* This class allows to solve for A.x = b sparse linear problems using a generalized minimal
|
||||
* residual method. The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
@@ -265,8 +267,8 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
|
||||
template< typename MatrixType_, typename Preconditioner_>
|
||||
class GMRES : public IterativeSolverBase<GMRES<MatrixType_,Preconditioner_> >
|
||||
{
|
||||
typedef IterativeSolverBase<GMRES> Base;
|
||||
using Base::matrix;
|
||||
@@ -280,10 +282,10 @@ private:
|
||||
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
|
||||
public:
|
||||
|
||||
|
||||
684
libs/eigen/unsupported/Eigen/src/IterativeSolvers/IDRS.h
Executable file → Normal file
684
libs/eigen/unsupported/Eigen/src/IterativeSolvers/IDRS.h
Executable file → Normal file
@@ -9,427 +9,385 @@
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
#ifndef EIGEN_IDRS_H
|
||||
#define EIGEN_IDRS_H
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace internal
|
||||
{
|
||||
/** \internal Low-level Induced Dimension Reduction algoritm
|
||||
\param A The matrix A
|
||||
\param b The right hand side vector b
|
||||
\param x On input and initial solution, on output the computed solution.
|
||||
\param precond A preconditioner being able to efficiently solve for an
|
||||
approximation of Ax=b (regardless of b)
|
||||
\param iter On input the max number of iteration, on output the number of performed iterations.
|
||||
\param relres On input the tolerance error, on output an estimation of the relative error.
|
||||
\param S On input Number of the dimension of the shadow space.
|
||||
\param smoothing switches residual smoothing on.
|
||||
\param angle small omega lead to faster convergence at the expense of numerical stability
|
||||
\param replacement switches on a residual replacement strategy to increase accuracy of residual at the expense of more Mat*vec products
|
||||
\return false in the case of numerical issue, for example a break down of IDRS.
|
||||
*/
|
||||
template<typename Vector, typename RealScalar>
|
||||
typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle)
|
||||
{
|
||||
using numext::abs;
|
||||
typedef typename Vector::Scalar Scalar;
|
||||
const RealScalar ns = s.norm();
|
||||
const RealScalar nt = t.norm();
|
||||
const Scalar ts = t.dot(s);
|
||||
const RealScalar rho = abs(ts / (nt * ns));
|
||||
namespace Eigen {
|
||||
|
||||
if (rho < angle) {
|
||||
if (ts == Scalar(0)) {
|
||||
return Scalar(0);
|
||||
}
|
||||
// Original relation for om is given by
|
||||
// om = om * angle / rho;
|
||||
// To alleviate potential (near) division by zero this can be rewritten as
|
||||
// om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts)
|
||||
return angle * (ns / nt) * (ts / abs(ts));
|
||||
}
|
||||
return ts / (nt * nt);
|
||||
}
|
||||
namespace internal {
|
||||
/** \internal Low-level Induced Dimension Reduction algorithm
|
||||
\param A The matrix A
|
||||
\param b The right hand side vector b
|
||||
\param x On input and initial solution, on output the computed solution.
|
||||
\param precond A preconditioner being able to efficiently solve for an
|
||||
approximation of Ax=b (regardless of b)
|
||||
\param iter On input the max number of iteration, on output the number of performed iterations.
|
||||
\param relres On input the tolerance error, on output an estimation of the relative error.
|
||||
\param S On input Number of the dimension of the shadow space.
|
||||
\param smoothing switches residual smoothing on.
|
||||
\param angle small omega lead to faster convergence at the expense of numerical stability
|
||||
\param replacement switches on a residual replacement strategy to increase accuracy of residual at the
|
||||
expense of more Mat*vec products \return false in the case of numerical issue, for example a break down of IDRS.
|
||||
*/
|
||||
template <typename Vector, typename RealScalar>
|
||||
typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle) {
|
||||
using numext::abs;
|
||||
typedef typename Vector::Scalar Scalar;
|
||||
const RealScalar ns = s.stableNorm();
|
||||
const RealScalar nt = t.stableNorm();
|
||||
const Scalar ts = t.dot(s);
|
||||
const RealScalar rho = abs(ts / (nt * ns));
|
||||
|
||||
template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& precond,
|
||||
Index& iter,
|
||||
typename Dest::RealScalar& relres, Index S, bool smoothing, typename Dest::RealScalar angle, bool replacement)
|
||||
{
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef Matrix<Scalar, Dynamic, 1> VectorType;
|
||||
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
|
||||
const Index N = b.size();
|
||||
S = S < x.rows() ? S : x.rows();
|
||||
const RealScalar tol = relres;
|
||||
const Index maxit = iter;
|
||||
if (rho < angle) {
|
||||
if (ts == Scalar(0)) {
|
||||
return Scalar(0);
|
||||
}
|
||||
// Original relation for om is given by
|
||||
// om = om * angle / rho;
|
||||
// To alleviate potential (near) division by zero this can be rewritten as
|
||||
// om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts)
|
||||
return angle * (ns / nt) * (ts / abs(ts));
|
||||
}
|
||||
return ts / (nt * nt);
|
||||
}
|
||||
|
||||
Index replacements = 0;
|
||||
bool trueres = false;
|
||||
template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& precond, Index& iter,
|
||||
typename Dest::RealScalar& relres, Index S, bool smoothing, typename Dest::RealScalar angle,
|
||||
bool replacement) {
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef Matrix<Scalar, Dynamic, 1> VectorType;
|
||||
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
|
||||
const Index N = b.size();
|
||||
S = S < x.rows() ? S : x.rows();
|
||||
const RealScalar tol = relres;
|
||||
const Index maxit = iter;
|
||||
|
||||
FullPivLU<DenseMatrixType> lu_solver;
|
||||
bool trueres = false;
|
||||
|
||||
DenseMatrixType P;
|
||||
{
|
||||
HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
|
||||
P = (qr.householderQ() * DenseMatrixType::Identity(N, S));
|
||||
}
|
||||
FullPivLU<DenseMatrixType> lu_solver;
|
||||
|
||||
const RealScalar normb = b.norm();
|
||||
DenseMatrixType P;
|
||||
{
|
||||
HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
|
||||
P = (qr.householderQ() * DenseMatrixType::Identity(N, S));
|
||||
}
|
||||
|
||||
if (internal::isApprox(normb, RealScalar(0)))
|
||||
{
|
||||
//Solution is the zero vector
|
||||
x.setZero();
|
||||
iter = 0;
|
||||
relres = 0;
|
||||
return true;
|
||||
}
|
||||
// from http://homepage.tudelft.nl/1w5b5/IDRS/manual.pdf
|
||||
// A peak in the residual is considered dangerously high if‖ri‖/‖b‖> C(tol/epsilon).
|
||||
// With epsilon the
|
||||
// relative machine precision. The factor tol/epsilon corresponds to the size of a
|
||||
// finite precision number that is so large that the absolute round-off error in
|
||||
// this number, when propagated through the process, makes it impossible to
|
||||
// achieve the required accuracy.The factor C accounts for the accumulation of
|
||||
// round-off errors. This parameter has beenset to 10−3.
|
||||
// mp is epsilon/C
|
||||
// 10^3 * eps is very conservative, so normally no residual replacements will take place.
|
||||
// It only happens if things go very wrong. Too many restarts may ruin the convergence.
|
||||
const RealScalar mp = RealScalar(1e3) * NumTraits<Scalar>::epsilon();
|
||||
const RealScalar normb = b.stableNorm();
|
||||
|
||||
if (internal::isApprox(normb, RealScalar(0))) {
|
||||
// Solution is the zero vector
|
||||
x.setZero();
|
||||
iter = 0;
|
||||
relres = 0;
|
||||
return true;
|
||||
}
|
||||
// from http://homepage.tudelft.nl/1w5b5/IDRS/manual.pdf
|
||||
// A peak in the residual is considered dangerously high if‖ri‖/‖b‖> C(tol/epsilon).
|
||||
// With epsilon the relative machine precision. The factor tol/epsilon corresponds
|
||||
// to the size of a finite precision number that is so large that the absolute
|
||||
// round-off error in this number, when propagated through the process, makes it
|
||||
// impossible to achieve the required accuracy. The factor C accounts for the
|
||||
// accumulation of round-off errors. This parameter has been set to 10^{-3}.
|
||||
// mp is epsilon/C 10^3 * eps is very conservative, so normally no residual
|
||||
// replacements will take place. It only happens if things go very wrong. Too many
|
||||
// restarts may ruin the convergence.
|
||||
const RealScalar mp = RealScalar(1e3) * NumTraits<Scalar>::epsilon();
|
||||
|
||||
// Compute initial residual
|
||||
const RealScalar tolb = tol * normb; // Relative tolerance
|
||||
VectorType r = b - A * x;
|
||||
|
||||
//Compute initial residual
|
||||
const RealScalar tolb = tol * normb; //Relative tolerance
|
||||
VectorType r = b - A * x;
|
||||
VectorType x_s, r_s;
|
||||
|
||||
VectorType x_s, r_s;
|
||||
if (smoothing) {
|
||||
x_s = x;
|
||||
r_s = r;
|
||||
}
|
||||
|
||||
if (smoothing)
|
||||
{
|
||||
x_s = x;
|
||||
r_s = r;
|
||||
}
|
||||
RealScalar normr = r.stableNorm();
|
||||
|
||||
RealScalar normr = r.norm();
|
||||
if (normr <= tolb) {
|
||||
// Initial guess is a good enough solution
|
||||
iter = 0;
|
||||
relres = normr / normb;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (normr <= tolb)
|
||||
{
|
||||
//Initial guess is a good enough solution
|
||||
iter = 0;
|
||||
relres = normr / normb;
|
||||
return true;
|
||||
}
|
||||
DenseMatrixType G = DenseMatrixType::Zero(N, S);
|
||||
DenseMatrixType U = DenseMatrixType::Zero(N, S);
|
||||
DenseMatrixType M = DenseMatrixType::Identity(S, S);
|
||||
VectorType t(N), v(N);
|
||||
Scalar om = 1.;
|
||||
|
||||
DenseMatrixType G = DenseMatrixType::Zero(N, S);
|
||||
DenseMatrixType U = DenseMatrixType::Zero(N, S);
|
||||
DenseMatrixType M = DenseMatrixType::Identity(S, S);
|
||||
VectorType t(N), v(N);
|
||||
Scalar om = 1.;
|
||||
// Main iteration loop, guild G-spaces:
|
||||
iter = 0;
|
||||
|
||||
//Main iteration loop, guild G-spaces:
|
||||
iter = 0;
|
||||
while (normr > tolb && iter < maxit) {
|
||||
// New right hand size for small system:
|
||||
VectorType f = (r.adjoint() * P).adjoint();
|
||||
|
||||
while (normr > tolb && iter < maxit)
|
||||
{
|
||||
//New right hand size for small system:
|
||||
VectorType f = (r.adjoint() * P).adjoint();
|
||||
for (Index k = 0; k < S; ++k) {
|
||||
// Solve small system and make v orthogonal to P:
|
||||
// c = M(k:s,k:s)\f(k:s);
|
||||
lu_solver.compute(M.block(k, k, S - k, S - k));
|
||||
VectorType c = lu_solver.solve(f.segment(k, S - k));
|
||||
// v = r - G(:,k:s)*c;
|
||||
v = r - G.rightCols(S - k) * c;
|
||||
// Preconditioning
|
||||
v = precond.solve(v);
|
||||
|
||||
for (Index k = 0; k < S; ++k)
|
||||
{
|
||||
//Solve small system and make v orthogonal to P:
|
||||
//c = M(k:s,k:s)\f(k:s);
|
||||
lu_solver.compute(M.block(k , k , S -k, S - k ));
|
||||
VectorType c = lu_solver.solve(f.segment(k , S - k ));
|
||||
//v = r - G(:,k:s)*c;
|
||||
v = r - G.rightCols(S - k ) * c;
|
||||
//Preconditioning
|
||||
v = precond.solve(v);
|
||||
// Compute new U(:,k) and G(:,k), G(:,k) is in space G_j
|
||||
U.col(k) = U.rightCols(S - k) * c + om * v;
|
||||
G.col(k) = A * U.col(k);
|
||||
|
||||
//Compute new U(:,k) and G(:,k), G(:,k) is in space G_j
|
||||
U.col(k) = U.rightCols(S - k ) * c + om * v;
|
||||
G.col(k) = A * U.col(k );
|
||||
// Bi-Orthogonalise the new basis vectors:
|
||||
for (Index i = 0; i < k - 1; ++i) {
|
||||
// alpha = ( P(:,i)'*G(:,k) )/M(i,i);
|
||||
Scalar alpha = P.col(i).dot(G.col(k)) / M(i, i);
|
||||
G.col(k) = G.col(k) - alpha * G.col(i);
|
||||
U.col(k) = U.col(k) - alpha * U.col(i);
|
||||
}
|
||||
|
||||
//Bi-Orthogonalise the new basis vectors:
|
||||
for (Index i = 0; i < k-1 ; ++i)
|
||||
{
|
||||
//alpha = ( P(:,i)'*G(:,k) )/M(i,i);
|
||||
Scalar alpha = P.col(i ).dot(G.col(k )) / M(i, i );
|
||||
G.col(k ) = G.col(k ) - alpha * G.col(i );
|
||||
U.col(k ) = U.col(k ) - alpha * U.col(i );
|
||||
}
|
||||
// New column of M = P'*G (first k-1 entries are zero)
|
||||
// M(k:s,k) = (G(:,k)'*P(:,k:s))';
|
||||
M.block(k, k, S - k, 1) = (G.col(k).adjoint() * P.rightCols(S - k)).adjoint();
|
||||
|
||||
//New column of M = P'*G (first k-1 entries are zero)
|
||||
//M(k:s,k) = (G(:,k)'*P(:,k:s))';
|
||||
M.block(k , k , S - k , 1) = (G.col(k ).adjoint() * P.rightCols(S - k )).adjoint();
|
||||
if (internal::isApprox(M(k, k), Scalar(0))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (internal::isApprox(M(k,k), Scalar(0)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Make r orthogonal to q_i, i = 0..k-1
|
||||
Scalar beta = f(k) / M(k, k);
|
||||
r = r - beta * G.col(k);
|
||||
x = x + beta * U.col(k);
|
||||
normr = r.stableNorm();
|
||||
|
||||
//Make r orthogonal to q_i, i = 0..k-1
|
||||
Scalar beta = f(k ) / M(k , k );
|
||||
r = r - beta * G.col(k );
|
||||
x = x + beta * U.col(k );
|
||||
normr = r.norm();
|
||||
if (replacement && normr > tolb / mp) {
|
||||
trueres = true;
|
||||
}
|
||||
|
||||
if (replacement && normr > tolb / mp)
|
||||
{
|
||||
trueres = true;
|
||||
}
|
||||
// Smoothing:
|
||||
if (smoothing) {
|
||||
t = r_s - r;
|
||||
// gamma is a Scalar, but the conversion is not allowed
|
||||
Scalar gamma = t.dot(r_s) / t.stableNorm();
|
||||
r_s = r_s - gamma * t;
|
||||
x_s = x_s - gamma * (x_s - x);
|
||||
normr = r_s.stableNorm();
|
||||
}
|
||||
|
||||
//Smoothing:
|
||||
if (smoothing)
|
||||
{
|
||||
t = r_s - r;
|
||||
//gamma is a Scalar, but the conversion is not allowed
|
||||
Scalar gamma = t.dot(r_s) / t.norm();
|
||||
r_s = r_s - gamma * t;
|
||||
x_s = x_s - gamma * (x_s - x);
|
||||
normr = r_s.norm();
|
||||
}
|
||||
if (normr < tolb || iter == maxit) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (normr < tolb || iter == maxit)
|
||||
{
|
||||
break;
|
||||
}
|
||||
// New f = P'*r (first k components are zero)
|
||||
if (k < S - 1) {
|
||||
f.segment(k + 1, S - (k + 1)) = f.segment(k + 1, S - (k + 1)) - beta * M.block(k + 1, k, S - (k + 1), 1);
|
||||
}
|
||||
} // end for
|
||||
|
||||
//New f = P'*r (first k components are zero)
|
||||
if (k < S-1)
|
||||
{
|
||||
f.segment(k + 1, S - (k + 1) ) = f.segment(k + 1 , S - (k + 1)) - beta * M.block(k + 1 , k , S - (k + 1), 1);
|
||||
}
|
||||
}//end for
|
||||
if (normr < tolb || iter == maxit) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (normr < tolb || iter == maxit)
|
||||
{
|
||||
break;
|
||||
}
|
||||
// Now we have sufficient vectors in G_j to compute residual in G_j+1
|
||||
// Note: r is already perpendicular to P so v = r
|
||||
// Preconditioning
|
||||
v = r;
|
||||
v = precond.solve(v);
|
||||
|
||||
//Now we have sufficient vectors in G_j to compute residual in G_j+1
|
||||
//Note: r is already perpendicular to P so v = r
|
||||
//Preconditioning
|
||||
v = r;
|
||||
v = precond.solve(v);
|
||||
// Matrix-vector multiplication:
|
||||
t = A * v;
|
||||
|
||||
//Matrix-vector multiplication:
|
||||
t = A * v;
|
||||
// Computation of a new omega
|
||||
om = internal::omega(t, r, angle);
|
||||
|
||||
//Computation of a new omega
|
||||
om = internal::omega(t, r, angle);
|
||||
if (om == RealScalar(0.0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (om == RealScalar(0.0))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
r = r - om * t;
|
||||
x = x + om * v;
|
||||
normr = r.stableNorm();
|
||||
|
||||
r = r - om * t;
|
||||
x = x + om * v;
|
||||
normr = r.norm();
|
||||
if (replacement && normr > tolb / mp) {
|
||||
trueres = true;
|
||||
}
|
||||
|
||||
if (replacement && normr > tolb / mp)
|
||||
{
|
||||
trueres = true;
|
||||
}
|
||||
// Residual replacement?
|
||||
if (trueres && normr < normb) {
|
||||
r = b - A * x;
|
||||
trueres = false;
|
||||
}
|
||||
|
||||
//Residual replacement?
|
||||
if (trueres && normr < normb)
|
||||
{
|
||||
r = b - A * x;
|
||||
trueres = false;
|
||||
replacements++;
|
||||
}
|
||||
// Smoothing:
|
||||
if (smoothing) {
|
||||
t = r_s - r;
|
||||
Scalar gamma = t.dot(r_s) / t.stableNorm();
|
||||
r_s = r_s - gamma * t;
|
||||
x_s = x_s - gamma * (x_s - x);
|
||||
normr = r_s.stableNorm();
|
||||
}
|
||||
|
||||
//Smoothing:
|
||||
if (smoothing)
|
||||
{
|
||||
t = r_s - r;
|
||||
Scalar gamma = t.dot(r_s) /t.norm();
|
||||
r_s = r_s - gamma * t;
|
||||
x_s = x_s - gamma * (x_s - x);
|
||||
normr = r_s.norm();
|
||||
}
|
||||
iter++;
|
||||
|
||||
iter++;
|
||||
} // end while
|
||||
|
||||
}//end while
|
||||
if (smoothing) {
|
||||
x = x_s;
|
||||
}
|
||||
relres = normr / normb;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (smoothing)
|
||||
{
|
||||
x = x_s;
|
||||
}
|
||||
relres=normr/normb;
|
||||
return true;
|
||||
}
|
||||
} // namespace internal
|
||||
|
||||
} // namespace internal
|
||||
template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar> >
|
||||
class IDRS;
|
||||
|
||||
template <typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
||||
class IDRS;
|
||||
namespace internal {
|
||||
|
||||
namespace internal
|
||||
{
|
||||
|
||||
template <typename _MatrixType, typename _Preconditioner>
|
||||
struct traits<Eigen::IDRS<_MatrixType, _Preconditioner> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
struct traits<Eigen::IDRS<MatrixType_, Preconditioner_> > {
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \ingroup IterativeLinearSolvers_Module
|
||||
* \brief The Induced Dimension Reduction method (IDR(s)) is a short-recurrences Krylov method for sparse square problems.
|
||||
*
|
||||
* This class allows to solve for A.x = b sparse linear problems. The vectors x and b can be either dense or sparse.
|
||||
* he Induced Dimension Reduction method, IDR(), is a robust and efficient short-recurrence Krylov subspace method for
|
||||
* solving large nonsymmetric systems of linear equations.
|
||||
*
|
||||
* For indefinite systems IDR(S) outperforms both BiCGStab and BiCGStab(L). Additionally, IDR(S) can handle matrices
|
||||
* with complex eigenvalues more efficiently than BiCGStab.
|
||||
*
|
||||
* Many problems that do not converge for BiCGSTAB converge for IDR(s) (for larger values of s). And if both methods
|
||||
* converge the convergence for IDR(s) is typically much faster for difficult systems (for example indefinite problems).
|
||||
*
|
||||
* IDR(s) is a limited memory finite termination method. In exact arithmetic it converges in at most N+N/s iterations,
|
||||
* with N the system size. It uses a fixed number of 4+3s vector. In comparison, BiCGSTAB terminates in 2N iterations
|
||||
* and uses 7 vectors. GMRES terminates in at most N iterations, and uses I+3 vectors, with I the number of iterations.
|
||||
* Restarting GMRES limits the memory consumption, but destroys the finite termination property.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
||||
*
|
||||
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
|
||||
*
|
||||
* \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
|
||||
* Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
|
||||
* See \ref TopicMultiThreading for details.
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* IDR(s) can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
* \brief The Induced Dimension Reduction method (IDR(s)) is a short-recurrences Krylov method for sparse square
|
||||
* problems.
|
||||
*
|
||||
* This class allows to solve for A.x = b sparse linear problems. The vectors x and b can be either dense or sparse.
|
||||
* he Induced Dimension Reduction method, IDR(), is a robust and efficient short-recurrence Krylov subspace method for
|
||||
* solving large nonsymmetric systems of linear equations.
|
||||
*
|
||||
* For indefinite systems IDR(S) outperforms both BiCGStab and BiCGStab(L). Additionally, IDR(S) can handle matrices
|
||||
* with complex eigenvalues more efficiently than BiCGStab.
|
||||
*
|
||||
* Many problems that do not converge for BiCGSTAB converge for IDR(s) (for larger values of s). And if both methods
|
||||
* converge the convergence for IDR(s) is typically much faster for difficult systems (for example indefinite problems).
|
||||
*
|
||||
* IDR(s) is a limited memory finite termination method. In exact arithmetic it converges in at most N+N/s iterations,
|
||||
* with N the system size. It uses a fixed number of 4+3s vector. In comparison, BiCGSTAB terminates in 2N iterations
|
||||
* and uses 7 vectors. GMRES terminates in at most N iterations, and uses I+3 vectors, with I the number of iterations.
|
||||
* Restarting GMRES limits the memory consumption, but destroys the finite termination property.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
||||
*
|
||||
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
|
||||
*
|
||||
* \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
|
||||
* Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
|
||||
* See \ref TopicMultiThreading for details.
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* IDR(s) can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
class IDRS : public IterativeSolverBase<IDRS<MatrixType_, Preconditioner_> > {
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
|
||||
private:
|
||||
typedef IterativeSolverBase<IDRS> Base;
|
||||
using Base::m_error;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
using Base::m_iterations;
|
||||
using Base::matrix;
|
||||
Index m_S;
|
||||
bool m_smoothing;
|
||||
RealScalar m_angle;
|
||||
bool m_residual;
|
||||
|
||||
public:
|
||||
/** Default constructor. */
|
||||
IDRS() : m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
|
||||
This constructor is a shortcut for the default constructor followed
|
||||
by a call to compute().
|
||||
|
||||
\warning this class stores a reference to the matrix A as well as some
|
||||
precomputed values that depend on it. Therefore, if \a A is changed
|
||||
this class becomes invalid. Call compute() to update it with the new
|
||||
matrix A, or modify a copy of A.
|
||||
*/
|
||||
template <typename _MatrixType, typename _Preconditioner>
|
||||
class IDRS : public IterativeSolverBase<IDRS<_MatrixType, _Preconditioner> >
|
||||
{
|
||||
template <typename MatrixDerived>
|
||||
explicit IDRS(const EigenBase<MatrixDerived>& A)
|
||||
: Base(A.derived()), m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
|
||||
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
/** \internal */
|
||||
/** Loops over the number of columns of b and does the following:
|
||||
1. sets the tolerance and maxIterations
|
||||
2. Calls the function that has the core solver routine
|
||||
*/
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const {
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
private:
|
||||
typedef IterativeSolverBase<IDRS> Base;
|
||||
using Base::m_error;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
using Base::m_iterations;
|
||||
using Base::matrix;
|
||||
Index m_S;
|
||||
bool m_smoothing;
|
||||
RealScalar m_angle;
|
||||
bool m_residual;
|
||||
bool ret = internal::idrs(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_S, m_smoothing, m_angle,
|
||||
m_residual);
|
||||
|
||||
public:
|
||||
/** Default constructor. */
|
||||
IDRS(): m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
|
||||
m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
|
||||
}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
/** Sets the parameter S, indicating the dimension of the shadow space. Default is 4*/
|
||||
void setS(Index S) {
|
||||
if (S < 1) {
|
||||
S = 4;
|
||||
}
|
||||
|
||||
This constructor is a shortcut for the default constructor followed
|
||||
by a call to compute().
|
||||
m_S = S;
|
||||
}
|
||||
|
||||
\warning this class stores a reference to the matrix A as well as some
|
||||
precomputed values that depend on it. Therefore, if \a A is changed
|
||||
this class becomes invalid. Call compute() to update it with the new
|
||||
matrix A, or modify a copy of A.
|
||||
*/
|
||||
template <typename MatrixDerived>
|
||||
explicit IDRS(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_S(4), m_smoothing(false),
|
||||
m_angle(RealScalar(0.7)), m_residual(false) {}
|
||||
/** Switches off and on smoothing.
|
||||
Residual smoothing results in monotonically decreasing residual norms at
|
||||
the expense of two extra vectors of storage and a few extra vector
|
||||
operations. Although monotonic decrease of the residual norms is a
|
||||
desirable property, the rate of convergence of the unsmoothed process and
|
||||
the smoothed process is basically the same. Default is off */
|
||||
void setSmoothing(bool smoothing) { m_smoothing = smoothing; }
|
||||
|
||||
/** The angle must be a real scalar. In IDR(s), a value for the
|
||||
iteration parameter omega must be chosen in every s+1th step. The most
|
||||
natural choice is to select a value to minimize the norm of the next residual.
|
||||
This corresponds to the parameter omega = 0. In practice, this may lead to
|
||||
values of omega that are so small that the other iteration parameters
|
||||
cannot be computed with sufficient accuracy. In such cases it is better to
|
||||
increase the value of omega sufficiently such that a compromise is reached
|
||||
between accurate computations and reduction of the residual norm. The
|
||||
parameter angle =0.7 (”maintaining the convergence strategy”)
|
||||
results in such a compromise. */
|
||||
void setAngle(RealScalar angle) { m_angle = angle; }
|
||||
|
||||
/** \internal */
|
||||
/** Loops over the number of columns of b and does the following:
|
||||
1. sets the tolerence and maxIterations
|
||||
2. Calls the function that has the core solver routine
|
||||
*/
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
bool ret = internal::idrs(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_S,m_smoothing,m_angle,m_residual);
|
||||
|
||||
m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
|
||||
}
|
||||
|
||||
/** Sets the parameter S, indicating the dimension of the shadow space. Default is 4*/
|
||||
void setS(Index S)
|
||||
{
|
||||
if (S < 1)
|
||||
{
|
||||
S = 4;
|
||||
}
|
||||
|
||||
m_S = S;
|
||||
}
|
||||
|
||||
/** Switches off and on smoothing.
|
||||
Residual smoothing results in monotonically decreasing residual norms at
|
||||
the expense of two extra vectors of storage and a few extra vector
|
||||
operations. Although monotonic decrease of the residual norms is a
|
||||
desirable property, the rate of convergence of the unsmoothed process and
|
||||
the smoothed process is basically the same. Default is off */
|
||||
void setSmoothing(bool smoothing)
|
||||
{
|
||||
m_smoothing=smoothing;
|
||||
}
|
||||
|
||||
/** The angle must be a real scalar. In IDR(s), a value for the
|
||||
iteration parameter omega must be chosen in every s+1th step. The most
|
||||
natural choice is to select a value to minimize the norm of the next residual.
|
||||
This corresponds to the parameter omega = 0. In practice, this may lead to
|
||||
values of omega that are so small that the other iteration parameters
|
||||
cannot be computed with sufficient accuracy. In such cases it is better to
|
||||
increase the value of omega sufficiently such that a compromise is reached
|
||||
between accurate computations and reduction of the residual norm. The
|
||||
parameter angle =0.7 (”maintaining the convergence strategy”)
|
||||
results in such a compromise. */
|
||||
void setAngle(RealScalar angle)
|
||||
{
|
||||
m_angle=angle;
|
||||
}
|
||||
|
||||
/** The parameter replace is a logical that determines whether a
|
||||
residual replacement strategy is employed to increase the accuracy of the
|
||||
solution. */
|
||||
void setResidualUpdate(bool update)
|
||||
{
|
||||
m_residual=update;
|
||||
}
|
||||
|
||||
};
|
||||
/** The parameter replace is a logical that determines whether a
|
||||
residual replacement strategy is employed to increase the accuracy of the
|
||||
solution. */
|
||||
void setResidualUpdate(bool update) { m_residual = update; }
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
|
||||
476
libs/eigen/unsupported/Eigen/src/IterativeSolvers/IDRSTABL.h
Normal file
476
libs/eigen/unsupported/Eigen/src/IterativeSolvers/IDRSTABL.h
Normal file
@@ -0,0 +1,476 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2020 Chris Schoutrop <c.e.m.schoutrop@tue.nl>
|
||||
// Copyright (C) 2020 Mischa Senders <m.j.senders@student.tue.nl>
|
||||
// Copyright (C) 2020 Lex Kuijpers <l.kuijpers@student.tue.nl>
|
||||
// Copyright (C) 2020 Jens Wehner <j.wehner@esciencecenter.nl>
|
||||
// Copyright (C) 2020 Jan van Dijk <j.v.dijk@tue.nl>
|
||||
// Copyright (C) 2020 Adithya Vijaykumar
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
/*
|
||||
|
||||
The IDR(S)Stab(L) method is a combination of IDR(S) and BiCGStab(L)
|
||||
|
||||
This implementation of IDRSTABL is based on
|
||||
1. Aihara, K., Abe, K., & Ishiwata, E. (2014). A variant of IDRstab with
|
||||
reliable update strategies for solving sparse linear systems. Journal of
|
||||
Computational and Applied Mathematics, 259, 244-258.
|
||||
doi:10.1016/j.cam.2013.08.028
|
||||
2. Aihara, K., Abe, K., & Ishiwata, E. (2015). Preconditioned
|
||||
IDRSTABL Algorithms for Solving Nonsymmetric Linear Systems. International
|
||||
Journal of Applied Mathematics, 45(3).
|
||||
3. Saad, Y. (2003). Iterative Methods for Sparse Linear Systems:
|
||||
Second Edition. Philadelphia, PA: SIAM.
|
||||
4. Sonneveld, P., & Van Gijzen, M. B. (2009). IDR(s): A Family
|
||||
of Simple and Fast Algorithms for Solving Large Nonsymmetric Systems of Linear
|
||||
Equations. SIAM Journal on Scientific Computing, 31(2), 1035-1062.
|
||||
doi:10.1137/070685804
|
||||
5. Sonneveld, P. (2012). On the convergence behavior of IDR (s)
|
||||
and related methods. SIAM Journal on Scientific Computing, 34(5), A2576-A2598.
|
||||
|
||||
Right-preconditioning based on Ref. 3 is implemented here.
|
||||
*/
|
||||
|
||||
#ifndef EIGEN_IDRSTABL_H
|
||||
#define EIGEN_IDRSTABL_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool idrstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters,
|
||||
typename Dest::RealScalar &tol_error, Index L, Index S) {
|
||||
/*
|
||||
Setup and type definitions.
|
||||
*/
|
||||
using numext::abs;
|
||||
using numext::sqrt;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef Matrix<Scalar, Dynamic, 1> VectorType;
|
||||
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
|
||||
|
||||
const Index N = x.rows();
|
||||
|
||||
Index k = 0; // Iteration counter
|
||||
const Index maxIters = iters;
|
||||
|
||||
const RealScalar rhs_norm = rhs.stableNorm();
|
||||
const RealScalar tol = tol_error * rhs_norm;
|
||||
|
||||
if (rhs_norm == 0) {
|
||||
/*
|
||||
If b==0, then the exact solution is x=0.
|
||||
rhs_norm is needed for other calculations anyways, this exit is a freebie.
|
||||
*/
|
||||
x.setZero();
|
||||
tol_error = 0.0;
|
||||
return true;
|
||||
}
|
||||
// Construct decomposition objects beforehand.
|
||||
FullPivLU<DenseMatrixType> lu_solver;
|
||||
|
||||
if (S >= N || L >= N) {
|
||||
/*
|
||||
The matrix is very small, or the choice of L and S is very poor
|
||||
in that case solving directly will be best.
|
||||
*/
|
||||
lu_solver.compute(DenseMatrixType(mat));
|
||||
x = lu_solver.solve(rhs);
|
||||
tol_error = (rhs - mat * x).stableNorm() / rhs_norm;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Define maximum sizes to prevent any reallocation later on.
|
||||
DenseMatrixType u(N, L + 1);
|
||||
DenseMatrixType r(N, L + 1);
|
||||
|
||||
DenseMatrixType V(N * (L + 1), S);
|
||||
|
||||
VectorType alpha(S);
|
||||
VectorType gamma(L);
|
||||
VectorType update(N);
|
||||
|
||||
/*
|
||||
Main IDRSTABL algorithm
|
||||
*/
|
||||
// Set up the initial residual
|
||||
VectorType x0 = x;
|
||||
r.col(0) = rhs - mat * x;
|
||||
x.setZero(); // The final solution will be x0+x
|
||||
|
||||
tol_error = r.col(0).stableNorm();
|
||||
|
||||
// FOM = Full orthogonalisation method
|
||||
DenseMatrixType h_FOM = DenseMatrixType::Zero(S, S - 1);
|
||||
|
||||
// Construct an initial U matrix of size N x S
|
||||
DenseMatrixType U(N * (L + 1), S);
|
||||
for (Index col_index = 0; col_index < S; ++col_index) {
|
||||
// Arnoldi-like process to generate a set of orthogonal vectors spanning
|
||||
// {u,A*u,A*A*u,...,A^(S-1)*u}. This construction can be combined with the
|
||||
// Full Orthogonalization Method (FOM) from Ref.3 to provide a possible
|
||||
// early exit with no additional MV.
|
||||
if (col_index != 0) {
|
||||
/*
|
||||
Modified Gram-Schmidt strategy:
|
||||
*/
|
||||
VectorType w = mat * precond.solve(u.col(0));
|
||||
for (Index i = 0; i < col_index; ++i) {
|
||||
auto v = U.col(i).head(N);
|
||||
h_FOM(i, col_index - 1) = v.dot(w);
|
||||
w -= h_FOM(i, col_index - 1) * v;
|
||||
}
|
||||
u.col(0) = w;
|
||||
h_FOM(col_index, col_index - 1) = u.col(0).stableNorm();
|
||||
|
||||
if (abs(h_FOM(col_index, col_index - 1)) != RealScalar(0)) {
|
||||
/*
|
||||
This only happens if u is NOT exactly zero. In case it is exactly zero
|
||||
it would imply that that this u has no component in the direction of the
|
||||
current residual.
|
||||
|
||||
By then setting u to zero it will not contribute any further (as it
|
||||
should). Whereas attempting to normalize results in division by zero.
|
||||
|
||||
Such cases occur if:
|
||||
1. The basis of dimension <S is sufficient to exactly solve the linear
|
||||
system. I.e. the current residual is in span{r,Ar,...A^{m-1}r}, where
|
||||
(m-1)<=S.
|
||||
2. Two vectors vectors generated from r, Ar,... are (numerically)
|
||||
parallel.
|
||||
|
||||
In case 1, the exact solution to the system can be obtained from the
|
||||
"Full Orthogonalization Method" (Algorithm 6.4 in the book of Saad),
|
||||
without any additional MV.
|
||||
|
||||
Contrary to what one would suspect, the comparison with ==0.0 for
|
||||
floating-point types is intended here. Any arbritary non-zero u is fine
|
||||
to continue, however if u contains either NaN or Inf the algorithm will
|
||||
break down.
|
||||
*/
|
||||
u.col(0) /= h_FOM(col_index, col_index - 1);
|
||||
}
|
||||
} else {
|
||||
u.col(0) = r.col(0);
|
||||
u.col(0).normalize();
|
||||
}
|
||||
|
||||
U.col(col_index).head(N) = u.col(0);
|
||||
}
|
||||
|
||||
if (S > 1) {
|
||||
// Check for early FOM exit.
|
||||
Scalar beta = r.col(0).stableNorm();
|
||||
VectorType e1 = VectorType::Zero(S - 1);
|
||||
e1(0) = beta;
|
||||
lu_solver.compute(h_FOM.topLeftCorner(S - 1, S - 1));
|
||||
VectorType y = lu_solver.solve(e1);
|
||||
VectorType x2 = x + U.topLeftCorner(N, S - 1) * y;
|
||||
|
||||
// Using proposition 6.7 in Saad, one MV can be saved to calculate the
|
||||
// residual
|
||||
RealScalar FOM_residual = (h_FOM(S - 1, S - 2) * y(S - 2) * U.col(S - 1).head(N)).stableNorm();
|
||||
|
||||
if (FOM_residual < tol) {
|
||||
// Exit, the FOM algorithm was already accurate enough
|
||||
iters = k;
|
||||
// Convert back to the unpreconditioned solution
|
||||
x = precond.solve(x2);
|
||||
// x contains the updates to x0, add those back to obtain the solution
|
||||
x += x0;
|
||||
tol_error = FOM_residual / rhs_norm;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Select an initial (N x S) matrix R0.
|
||||
1. Generate random R0, orthonormalize the result.
|
||||
2. This results in R0, however to save memory and compute we only need the
|
||||
adjoint of R0. This is given by the matrix R_T.\ Additionally, the matrix
|
||||
(mat.adjoint()*R_tilde).adjoint()=R_tilde.adjoint()*mat by the
|
||||
anti-distributivity property of the adjoint. This results in AR_T, which is
|
||||
constant if R_T does not have to be regenerated and can be precomputed.
|
||||
Based on reference 4, this has zero probability in exact arithmetic.
|
||||
*/
|
||||
|
||||
// Original IDRSTABL and Kensuke choose S random vectors:
|
||||
const HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
|
||||
DenseMatrixType R_T = (qr.householderQ() * DenseMatrixType::Identity(N, S)).adjoint();
|
||||
DenseMatrixType AR_T = DenseMatrixType(R_T * mat);
|
||||
|
||||
// Pre-allocate sigma.
|
||||
DenseMatrixType sigma(S, S);
|
||||
|
||||
bool reset_while = false; // Should the while loop be reset for some reason?
|
||||
|
||||
while (k < maxIters) {
|
||||
for (Index j = 1; j <= L; ++j) {
|
||||
/*
|
||||
The IDR Step
|
||||
*/
|
||||
// Construction of the sigma-matrix, and the decomposition of sigma.
|
||||
for (Index i = 0; i < S; ++i) {
|
||||
sigma.col(i).noalias() = AR_T * precond.solve(U.block(N * (j - 1), i, N, 1));
|
||||
}
|
||||
|
||||
lu_solver.compute(sigma);
|
||||
// Obtain the update coefficients alpha
|
||||
if (j == 1) {
|
||||
// alpha=inverse(sigma)*(R_T*r_0);
|
||||
alpha.noalias() = lu_solver.solve(R_T * r.col(0));
|
||||
} else {
|
||||
// alpha=inverse(sigma)*(AR_T*r_{j-2})
|
||||
alpha.noalias() = lu_solver.solve(AR_T * precond.solve(r.col(j - 2)));
|
||||
}
|
||||
|
||||
// Obtain new solution and residual from this update
|
||||
update.noalias() = U.topRows(N) * alpha;
|
||||
r.col(0) -= mat * precond.solve(update);
|
||||
x += update;
|
||||
|
||||
for (Index i = 1; i <= j - 2; ++i) {
|
||||
// This only affects the case L>2
|
||||
r.col(i) -= U.block(N * (i + 1), 0, N, S) * alpha;
|
||||
}
|
||||
if (j > 1) {
|
||||
// r=[r;A*r_{j-2}]
|
||||
r.col(j - 1).noalias() = mat * precond.solve(r.col(j - 2));
|
||||
}
|
||||
tol_error = r.col(0).stableNorm();
|
||||
|
||||
if (tol_error < tol) {
|
||||
// If at this point the algorithm has converged, exit.
|
||||
reset_while = true;
|
||||
break;
|
||||
}
|
||||
|
||||
bool break_normalization = false;
|
||||
for (Index q = 1; q <= S; ++q) {
|
||||
if (q == 1) {
|
||||
// u = r;
|
||||
u.leftCols(j + 1) = r.leftCols(j + 1);
|
||||
} else {
|
||||
// u=[u_1;u_2;...;u_j]
|
||||
u.leftCols(j) = u.middleCols(1, j);
|
||||
}
|
||||
|
||||
// Obtain the update coefficients beta implicitly
|
||||
// beta=lu_sigma.solve(AR_T * u.block(N * (j - 1), 0, N, 1)
|
||||
u.reshaped().head(u.rows() * j) -= U.topRows(N * j) * lu_solver.solve(AR_T * precond.solve(u.col(j - 1)));
|
||||
|
||||
// u=[u;Au_{j-1}]
|
||||
u.col(j).noalias() = mat * precond.solve(u.col(j - 1));
|
||||
|
||||
// Orthonormalize u_j to the columns of V_j(:,1:q-1)
|
||||
if (q > 1) {
|
||||
/*
|
||||
Modified Gram-Schmidt-like procedure to make u orthogonal to the
|
||||
columns of V from Ref. 1.
|
||||
|
||||
The vector mu from Ref. 1 is obtained implicitly:
|
||||
mu=V.block(N * j, 0, N, q - 1).adjoint() * u.block(N * j, 0, N, 1).
|
||||
*/
|
||||
for (Index i = 0; i <= q - 2; ++i) {
|
||||
auto v = V.col(i).segment(N * j, N);
|
||||
Scalar h = v.squaredNorm();
|
||||
h = v.dot(u.col(j)) / h;
|
||||
u.reshaped().head(u.rows() * (j + 1)) -= h * V.block(0, i, N * (j + 1), 1);
|
||||
}
|
||||
}
|
||||
// Normalize u and assign to a column of V
|
||||
Scalar normalization_constant = u.col(j).stableNorm();
|
||||
// If u is exactly zero, this will lead to a NaN. Small, non-zero u is
|
||||
// fine.
|
||||
if (normalization_constant == RealScalar(0.0)) {
|
||||
break_normalization = true;
|
||||
break;
|
||||
} else {
|
||||
u.leftCols(j + 1) /= normalization_constant;
|
||||
}
|
||||
|
||||
V.block(0, q - 1, N * (j + 1), 1).noalias() = u.reshaped().head(u.rows() * (j + 1));
|
||||
}
|
||||
|
||||
if (break_normalization == false) {
|
||||
U = V;
|
||||
}
|
||||
}
|
||||
if (reset_while) {
|
||||
break;
|
||||
}
|
||||
|
||||
// r=[r;mat*r_{L-1}]
|
||||
r.col(L).noalias() = mat * precond.solve(r.col(L - 1));
|
||||
|
||||
/*
|
||||
The polynomial step
|
||||
*/
|
||||
ColPivHouseholderQR<DenseMatrixType> qr_solver(r.rightCols(L));
|
||||
gamma.noalias() = qr_solver.solve(r.col(0));
|
||||
|
||||
// Update solution and residual using the "minimized residual coefficients"
|
||||
update.noalias() = r.leftCols(L) * gamma;
|
||||
x += update;
|
||||
r.col(0) -= mat * precond.solve(update);
|
||||
|
||||
// Update iteration info
|
||||
++k;
|
||||
tol_error = r.col(0).stableNorm();
|
||||
|
||||
if (tol_error < tol) {
|
||||
// Slightly early exit by moving the criterion before the update of U,
|
||||
// after the main while loop the result of that calculation would not be
|
||||
// needed.
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
U=U0-sum(gamma_j*U_j)
|
||||
Consider the first iteration. Then U only contains U0, so at the start of
|
||||
the while-loop U should be U0. Therefore only the first N rows of U have to
|
||||
be updated.
|
||||
*/
|
||||
for (Index i = 1; i <= L; ++i) {
|
||||
U.topRows(N) -= U.block(N * i, 0, N, S) * gamma(i - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Exit after the while loop terminated.
|
||||
*/
|
||||
iters = k;
|
||||
// Convert back to the unpreconditioned solution
|
||||
x = precond.solve(x);
|
||||
// x contains the updates to x0, add those back to obtain the solution
|
||||
x += x0;
|
||||
tol_error = tol_error / rhs_norm;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar>>
|
||||
class IDRSTABL;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
struct traits<IDRSTABL<MatrixType_, Preconditioner_>> {
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \ingroup IterativeLinearSolvers_Module
|
||||
* \brief The IDR(s)STAB(l) is a combination of IDR(s) and BiCGSTAB(l). It is a
|
||||
* short-recurrences Krylov method for sparse square problems. It can outperform
|
||||
* both IDR(s) and BiCGSTAB(l). IDR(s)STAB(l) generally closely follows the
|
||||
* optimal GMRES convergence in terms of the number of Matrix-Vector products.
|
||||
* However, without the increasing cost per iteration of GMRES. IDR(s)STAB(l) is
|
||||
* suitable for both indefinite systems and systems with complex eigenvalues.
|
||||
*
|
||||
* This class allows solving for A.x = b sparse linear problems. The vectors x
|
||||
* and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a
|
||||
* sparse matrix. \tparam Preconditioner_ the type of the preconditioner.
|
||||
* Default is DiagonalPreconditioner
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* The maximum number of iterations and tolerance value can be controlled via
|
||||
* the setMaxIterations() and setTolerance() methods. The defaults are the size
|
||||
* of the problem for the maximum number of iterations and
|
||||
* NumTraits<Scalar>::epsilon() for the tolerance.
|
||||
*
|
||||
* The tolerance is the maximum relative residual error: |Ax-b|/|b| for which
|
||||
* the linear system is considered solved.
|
||||
*
|
||||
* \b Performance: When using sparse matrices, best performance is achieved for
|
||||
* a row-major sparse matrix format. Moreover, in this case multi-threading can
|
||||
* be exploited if the user code is compiled with OpenMP enabled. See \ref
|
||||
* TopicMultiThreading for details.
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* IDR(s)STAB(l) can also be used in a matrix-free context, see the following
|
||||
* \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
|
||||
template <typename MatrixType_, typename Preconditioner_>
|
||||
class IDRSTABL : public IterativeSolverBase<IDRSTABL<MatrixType_, Preconditioner_>> {
|
||||
typedef IterativeSolverBase<IDRSTABL> Base;
|
||||
using Base::m_error;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
using Base::m_iterations;
|
||||
using Base::matrix;
|
||||
Index m_L;
|
||||
Index m_S;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
|
||||
public:
|
||||
/** Default constructor. */
|
||||
IDRSTABL() : m_L(2), m_S(4) {}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
|
||||
This constructor is a shortcut for the default constructor followed
|
||||
by a call to compute().
|
||||
|
||||
\warning this class stores a reference to the matrix A as well as some
|
||||
precomputed values that depend on it. Therefore, if \a A is changed
|
||||
this class becomes invalid. Call compute() to update it with the new
|
||||
matrix A, or modify a copy of A.
|
||||
*/
|
||||
template <typename MatrixDerived>
|
||||
explicit IDRSTABL(const EigenBase<MatrixDerived> &A) : Base(A.derived()), m_L(2), m_S(4) {}
|
||||
|
||||
/** \internal */
|
||||
/** Loops over the number of columns of b and does the following:
|
||||
1. sets the tolerance and maxIterations
|
||||
2. Calls the function that has the core solver
|
||||
routine
|
||||
*/
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_vector_with_guess_impl(const Rhs &b, Dest &x) const {
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
bool ret = internal::idrstabl(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_L, m_S);
|
||||
|
||||
m_info = (!ret) ? NumericalIssue : m_error <= 10 * Base::m_tolerance ? Success : NoConvergence;
|
||||
}
|
||||
|
||||
/** Sets the parameter L, indicating the amount of minimize residual steps are
|
||||
* used. */
|
||||
void setL(Index L) {
|
||||
eigen_assert(L >= 1 && "L needs to be positive");
|
||||
m_L = L;
|
||||
}
|
||||
/** Sets the parameter S, indicating the dimension of the shadow residual
|
||||
* space.. */
|
||||
void setS(Index S) {
|
||||
eigen_assert(S >= 1 && "S needs to be positive");
|
||||
m_S = S;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif /* EIGEN_IDRSTABL_H */
|
||||
@@ -10,16 +10,18 @@
|
||||
#ifndef EIGEN_INCOMPLETE_LU_H
|
||||
#define EIGEN_INCOMPLETE_LU_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename _Scalar>
|
||||
class IncompleteLU : public SparseSolverBase<IncompleteLU<_Scalar> >
|
||||
template <typename Scalar_>
|
||||
class IncompleteLU : public SparseSolverBase<IncompleteLU<Scalar_> >
|
||||
{
|
||||
protected:
|
||||
typedef SparseSolverBase<IncompleteLU<_Scalar> > Base;
|
||||
typedef SparseSolverBase<IncompleteLU<Scalar_> > Base;
|
||||
using Base::m_isInitialized;
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef typename Vector::Index Index;
|
||||
typedef SparseMatrix<Scalar,RowMajor> FactorType;
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H
|
||||
#error "Please include unsupported/Eigen/IterativeSolvers instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -58,6 +58,8 @@
|
||||
#ifndef EIGEN_ITERATION_CONTROLLER_H
|
||||
#define EIGEN_ITERATION_CONTROLLER_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup IterativeLinearSolvers_Module
|
||||
|
||||
@@ -10,10 +10,12 @@
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
#ifndef EIGEN_MINRES_H_
|
||||
#define EIGEN_MINRES_H_
|
||||
#ifndef EIGEN_MINRES_H
|
||||
#define EIGEN_MINRES_H
|
||||
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -138,17 +140,17 @@ namespace Eigen {
|
||||
|
||||
}
|
||||
|
||||
template< typename _MatrixType, int _UpLo=Lower,
|
||||
typename _Preconditioner = IdentityPreconditioner>
|
||||
template< typename MatrixType_, int UpLo_=Lower,
|
||||
typename Preconditioner_ = IdentityPreconditioner>
|
||||
class MINRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||
struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
|
||||
template< typename MatrixType_, int UpLo_, typename Preconditioner_>
|
||||
struct traits<MINRES<MatrixType_,UpLo_,Preconditioner_> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -160,10 +162,10 @@ namespace Eigen {
|
||||
* of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
|
||||
* The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower,
|
||||
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
* \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
@@ -191,8 +193,8 @@ namespace Eigen {
|
||||
*
|
||||
* \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||
class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
|
||||
template< typename MatrixType_, int UpLo_, typename Preconditioner_>
|
||||
class MINRES : public IterativeSolverBase<MINRES<MatrixType_,UpLo_,Preconditioner_> >
|
||||
{
|
||||
|
||||
typedef IterativeSolverBase<MINRES> Base;
|
||||
@@ -203,12 +205,12 @@ namespace Eigen {
|
||||
using Base::m_isInitialized;
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef Preconditioner_ Preconditioner;
|
||||
|
||||
enum {UpLo = _UpLo};
|
||||
enum {UpLo = UpLo_};
|
||||
|
||||
public:
|
||||
|
||||
@@ -243,12 +245,12 @@ namespace Eigen {
|
||||
&& (!MatrixType::IsRowMajor)
|
||||
&& (!NumTraits<Scalar>::IsComplex)
|
||||
};
|
||||
typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
|
||||
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
|
||||
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
||||
typedef std::conditional_t<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&> RowMajorWrapper;
|
||||
EIGEN_STATIC_ASSERT(internal::check_implication(MatrixWrapper::MatrixFree, UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
|
||||
typedef std::conditional_t<UpLo==(Lower|Upper),
|
||||
RowMajorWrapper,
|
||||
typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
|
||||
>::type SelfAdjointWrapper;
|
||||
> SelfAdjointWrapper;
|
||||
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_ITERSCALING_H
|
||||
#define EIGEN_ITERSCALING_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/**
|
||||
@@ -38,17 +40,17 @@ namespace Eigen {
|
||||
* x = scal.RightScaling().cwiseProduct(x);
|
||||
* \endcode
|
||||
*
|
||||
* \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix
|
||||
* \tparam MatrixType_ the type of the matrix. It should be a real square sparsematrix
|
||||
*
|
||||
* References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552
|
||||
*
|
||||
* \sa \ref IncompleteLUT
|
||||
*/
|
||||
template<typename _MatrixType>
|
||||
template<typename MatrixType_>
|
||||
class IterScaling
|
||||
{
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H
|
||||
#error "Please include unsupported/Eigen/KroneckerProduct instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -12,6 +12,8 @@
|
||||
#ifndef KRONECKER_TENSOR_PRODUCT_H
|
||||
#define KRONECKER_TENSOR_PRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/*!
|
||||
@@ -152,10 +154,10 @@ void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
|
||||
// 1 - evaluate the operands if needed:
|
||||
typedef typename internal::nested_eval<Lhs,Dynamic>::type Lhs1;
|
||||
typedef typename internal::remove_all<Lhs1>::type Lhs1Cleaned;
|
||||
typedef internal::remove_all_t<Lhs1> Lhs1Cleaned;
|
||||
const Lhs1 lhs1(m_A);
|
||||
typedef typename internal::nested_eval<Rhs,Dynamic>::type Rhs1;
|
||||
typedef typename internal::remove_all<Rhs1>::type Rhs1Cleaned;
|
||||
typedef internal::remove_all_t<Rhs1> Rhs1Cleaned;
|
||||
const Rhs1 rhs1(m_B);
|
||||
|
||||
// 2 - construct respective iterators
|
||||
@@ -198,30 +200,30 @@ void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Lhs, typename _Rhs>
|
||||
struct traits<KroneckerProduct<_Lhs,_Rhs> >
|
||||
template<typename Lhs_, typename Rhs_>
|
||||
struct traits<KroneckerProduct<Lhs_,Rhs_> >
|
||||
{
|
||||
typedef typename remove_all<_Lhs>::type Lhs;
|
||||
typedef typename remove_all<_Rhs>::type Rhs;
|
||||
typedef remove_all_t<Lhs_> Lhs;
|
||||
typedef remove_all_t<Rhs_> Rhs;
|
||||
typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
|
||||
typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
|
||||
|
||||
enum {
|
||||
Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
|
||||
Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
|
||||
MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
|
||||
MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret
|
||||
Rows = size_at_compile_time(traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime),
|
||||
Cols = size_at_compile_time(traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime),
|
||||
MaxRows = size_at_compile_time(traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime),
|
||||
MaxCols = size_at_compile_time(traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime)
|
||||
};
|
||||
|
||||
typedef Matrix<Scalar,Rows,Cols> ReturnType;
|
||||
};
|
||||
|
||||
template<typename _Lhs, typename _Rhs>
|
||||
struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
|
||||
template<typename Lhs_, typename Rhs_>
|
||||
struct traits<KroneckerProductSparse<Lhs_,Rhs_> >
|
||||
{
|
||||
typedef MatrixXpr XprKind;
|
||||
typedef typename remove_all<_Lhs>::type Lhs;
|
||||
typedef typename remove_all<_Rhs>::type Rhs;
|
||||
typedef remove_all_t<Lhs_> Lhs;
|
||||
typedef remove_all_t<Rhs_> Rhs;
|
||||
typedef typename ScalarBinaryOpTraits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
|
||||
typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
|
||||
typedef typename promote_index_type<typename Lhs::StorageIndex, typename Rhs::StorageIndex>::type StorageIndex;
|
||||
@@ -230,10 +232,10 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
|
||||
LhsFlags = Lhs::Flags,
|
||||
RhsFlags = Rhs::Flags,
|
||||
|
||||
RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
|
||||
ColsAtCompileTime = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
|
||||
MaxRowsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
|
||||
MaxColsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
|
||||
RowsAtCompileTime = size_at_compile_time(traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime),
|
||||
ColsAtCompileTime = size_at_compile_time(traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime),
|
||||
MaxRowsAtCompileTime = size_at_compile_time(traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime),
|
||||
MaxColsAtCompileTime = size_at_compile_time(traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime),
|
||||
|
||||
EvalToRowMajor = (int(LhsFlags) & int(RhsFlags) & RowMajorBit),
|
||||
RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_LEVENBERGMARQUARDT_MODULE_H
|
||||
#error "Please include unsupported/Eigen/LevenbergMarquardt instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -12,6 +12,8 @@
|
||||
#ifndef EIGEN_LMCOVAR_H
|
||||
#define EIGEN_LMCOVAR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#ifndef EIGEN_LMONESTEP_H
|
||||
#define EIGEN_LMONESTEP_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename FunctorType>
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
#ifndef EIGEN_LMPAR_H
|
||||
#define EIGEN_LMPAR_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
#ifndef EIGEN_LMQRSOLV_H
|
||||
#define EIGEN_LMQRSOLV_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -98,9 +100,9 @@ void lmqrsolv(
|
||||
x = iPerm * wa;
|
||||
}
|
||||
|
||||
template <typename Scalar, int _Options, typename Index>
|
||||
template <typename Scalar, int Options_, typename Index>
|
||||
void lmqrsolv(
|
||||
SparseMatrix<Scalar,_Options,Index> &s,
|
||||
SparseMatrix<Scalar,Options_,Index> &s,
|
||||
const PermutationMatrix<Dynamic,Dynamic> &iPerm,
|
||||
const Matrix<Scalar,Dynamic,1> &diag,
|
||||
const Matrix<Scalar,Dynamic,1> &qtb,
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
#define EIGEN_LEVENBERGMARQUARDT_H
|
||||
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace LevenbergMarquardtSpace {
|
||||
enum Status {
|
||||
@@ -38,10 +40,10 @@ namespace LevenbergMarquardtSpace {
|
||||
};
|
||||
}
|
||||
|
||||
template <typename _Scalar, int NX=Dynamic, int NY=Dynamic>
|
||||
template <typename Scalar_, int NX=Dynamic, int NY=Dynamic>
|
||||
struct DenseFunctor
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
enum {
|
||||
InputsAtCompileTime = NX,
|
||||
ValuesAtCompileTime = NY
|
||||
@@ -65,11 +67,11 @@ struct DenseFunctor
|
||||
// should be defined in derived classes
|
||||
};
|
||||
|
||||
template <typename _Scalar, typename _Index>
|
||||
template <typename Scalar_, typename Index_>
|
||||
struct SparseFunctor
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Index_ Index;
|
||||
typedef Matrix<Scalar,Dynamic,1> InputType;
|
||||
typedef Matrix<Scalar,Dynamic,1> ValueType;
|
||||
typedef SparseMatrix<Scalar, ColMajor, Index> JacobianType;
|
||||
@@ -106,11 +108,11 @@ void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb,
|
||||
* Check wikipedia for more information.
|
||||
* http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
|
||||
*/
|
||||
template<typename _FunctorType>
|
||||
template<typename FunctorType_>
|
||||
class LevenbergMarquardt : internal::no_assignment_operator
|
||||
{
|
||||
public:
|
||||
typedef _FunctorType FunctorType;
|
||||
typedef FunctorType_ FunctorType;
|
||||
typedef typename FunctorType::QRSolver QRSolver;
|
||||
typedef typename FunctorType::JacobianType JacobianType;
|
||||
typedef typename JacobianType::Scalar Scalar;
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_MATRIX_FUNCTIONS_MODULE_H
|
||||
#error "Please include unsupported/Eigen/MatrixFunctions instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -13,6 +13,8 @@
|
||||
|
||||
#include "StemFunction.h"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#include "StemFunction.h"
|
||||
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -494,7 +496,7 @@ template<typename Derived> class MatrixFunctionReturnValue
|
||||
inline void evalTo(ResultType& result) const
|
||||
{
|
||||
typedef typename internal::nested_eval<Derived, 10>::type NestedEvalType;
|
||||
typedef typename internal::remove_all<NestedEvalType>::type NestedEvalTypeClean;
|
||||
typedef internal::remove_all_t<NestedEvalType> NestedEvalTypeClean;
|
||||
typedef internal::traits<NestedEvalTypeClean> Traits;
|
||||
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
|
||||
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_MATRIX_LOGARITHM
|
||||
#define EIGEN_MATRIX_LOGARITHM
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -134,7 +136,7 @@ void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
const int minPadeDegree = 3;
|
||||
const int maxPadeDegree = 11;
|
||||
assert(degree >= minPadeDegree && degree <= maxPadeDegree);
|
||||
eigen_assert(degree >= minPadeDegree && degree <= maxPadeDegree);
|
||||
// FIXME this creates float-conversion-warnings if these are enabled.
|
||||
// Either manually convert each value, or disable the warning locally
|
||||
const RealScalar nodes[][maxPadeDegree] = {
|
||||
@@ -332,7 +334,7 @@ public:
|
||||
inline void evalTo(ResultType& result) const
|
||||
{
|
||||
typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
|
||||
typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
|
||||
typedef internal::remove_all_t<DerivedEvalType> DerivedEvalTypeClean;
|
||||
typedef internal::traits<DerivedEvalTypeClean> Traits;
|
||||
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
|
||||
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_MATRIX_POWER
|
||||
#define EIGEN_MATRIX_POWER
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename MatrixType> class MatrixPower;
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_MATRIX_SQUARE_ROOT
|
||||
#define EIGEN_MATRIX_SQUARE_ROOT
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -336,7 +338,7 @@ template<typename Derived> class MatrixSquareRootReturnValue
|
||||
inline void evalTo(ResultType& result) const
|
||||
{
|
||||
typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
|
||||
typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
|
||||
typedef internal::remove_all_t<DerivedEvalType> DerivedEvalTypeClean;
|
||||
DerivedEvalType tmp(m_src);
|
||||
internal::matrix_sqrt_compute<DerivedEvalTypeClean>::run(tmp, result);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_STEM_FUNCTION
|
||||
#define EIGEN_STEM_FUNCTION
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_MOREVECTORIZATION_MODULE_H
|
||||
#error "Please include unsupported/Eigen/MoreVectorization instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
|
||||
#define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -22,20 +24,20 @@ template<typename Packet> inline static Packet pasin(Packet a) { return std::asi
|
||||
|
||||
template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x)
|
||||
{
|
||||
_EIGEN_DECLARE_CONST_Packet4f(half, 0.5);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(3half, 1.5);
|
||||
EIGEN_DECLARE_CONST_Packet4f(half, 0.5);
|
||||
EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5);
|
||||
EIGEN_DECLARE_CONST_Packet4f(3half, 1.5);
|
||||
|
||||
_EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
|
||||
EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
|
||||
|
||||
_EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5);
|
||||
EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654);
|
||||
EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5);
|
||||
|
||||
_EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1);
|
||||
EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2);
|
||||
EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2);
|
||||
EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2);
|
||||
EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2);
|
||||
EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1);
|
||||
|
||||
Packet4f a = pabs(x);//got the absolute value
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H
|
||||
#define EIGEN_HYBRIDNONLINEARSOLVER_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace HybridNonLinearSolverSpace {
|
||||
@@ -428,7 +430,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
||||
assert(x.size()==n); // check the caller is not cheating us
|
||||
eigen_assert(x.size()==n); // check the caller is not cheating us
|
||||
|
||||
Index j;
|
||||
std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n);
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE_H
|
||||
#error "Please include unsupported/Eigen/NonLinearOptimization instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -13,6 +13,8 @@
|
||||
#ifndef EIGEN_LEVENBERGMARQUARDT__H
|
||||
#define EIGEN_LEVENBERGMARQUARDT__H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace LevenbergMarquardtSpace {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#define chkder_log10e 0.43429448190325182765
|
||||
#define chkder_factor 100.
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_NUMERICALDIFF_MODULE_H
|
||||
#error "Please include unsupported/Eigen/NumericalDiff instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -13,6 +13,8 @@
|
||||
#ifndef EIGEN_NUMERICAL_DIFF_H
|
||||
#define EIGEN_NUMERICAL_DIFF_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum NumericalDiffMode {
|
||||
@@ -32,11 +34,11 @@ enum NumericalDiffMode {
|
||||
*
|
||||
* Currently only "Forward" and "Central" scheme are implemented.
|
||||
*/
|
||||
template<typename _Functor, NumericalDiffMode mode=Forward>
|
||||
class NumericalDiff : public _Functor
|
||||
template<typename Functor_, NumericalDiffMode mode=Forward>
|
||||
class NumericalDiff : public Functor_
|
||||
{
|
||||
public:
|
||||
typedef _Functor Functor;
|
||||
typedef Functor_ Functor;
|
||||
typedef typename Functor::Scalar Scalar;
|
||||
typedef typename Functor::InputType InputType;
|
||||
typedef typename Functor::ValueType ValueType;
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
// * Eigen/Core
|
||||
// * Eigen/src/PolynomialSolver.h
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -29,32 +31,32 @@ struct decrement_if_fixed_size
|
||||
|
||||
#endif
|
||||
|
||||
template< typename _Scalar, int _Deg >
|
||||
template< typename Scalar_, int Deg_ >
|
||||
class companion
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar_,Deg_==Dynamic ? Dynamic : Deg_)
|
||||
|
||||
enum {
|
||||
Deg = _Deg,
|
||||
Deg = Deg_,
|
||||
Deg_1=decrement_if_fixed_size<Deg>::ret
|
||||
};
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef Matrix<Scalar, Deg, 1> RightColumn;
|
||||
//typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal;
|
||||
typedef Matrix<Scalar, Deg_1, 1> BottomLeftDiagonal;
|
||||
|
||||
typedef Matrix<Scalar, Deg, Deg> DenseCompanionMatrixType;
|
||||
typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock;
|
||||
typedef Matrix< Scalar, Deg_, Deg_1 > LeftBlock;
|
||||
typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock;
|
||||
typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow;
|
||||
|
||||
typedef DenseIndex Index;
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const
|
||||
EIGEN_STRONG_INLINE const Scalar_ operator()(Index row, Index col ) const
|
||||
{
|
||||
if( m_bl_diag.rows() > col )
|
||||
{
|
||||
@@ -130,9 +132,9 @@ class companion
|
||||
|
||||
|
||||
|
||||
template< typename _Scalar, int _Deg >
|
||||
template< typename Scalar_, int Deg_ >
|
||||
inline
|
||||
bool companion<_Scalar,_Deg>::balanced( RealScalar colNorm, RealScalar rowNorm,
|
||||
bool companion<Scalar_,Deg_>::balanced( RealScalar colNorm, RealScalar rowNorm,
|
||||
bool& isBalanced, RealScalar& colB, RealScalar& rowB )
|
||||
{
|
||||
if( RealScalar(0) == colNorm || RealScalar(0) == rowNorm
|
||||
@@ -184,9 +186,9 @@ bool companion<_Scalar,_Deg>::balanced( RealScalar colNorm, RealScalar rowNorm,
|
||||
}
|
||||
}
|
||||
|
||||
template< typename _Scalar, int _Deg >
|
||||
template< typename Scalar_, int Deg_ >
|
||||
inline
|
||||
bool companion<_Scalar,_Deg>::balancedR( RealScalar colNorm, RealScalar rowNorm,
|
||||
bool companion<Scalar_,Deg_>::balancedR( RealScalar colNorm, RealScalar rowNorm,
|
||||
bool& isBalanced, RealScalar& colB, RealScalar& rowB )
|
||||
{
|
||||
if( RealScalar(0) == colNorm || RealScalar(0) == rowNorm ){ return true; }
|
||||
@@ -197,7 +199,7 @@ bool companion<_Scalar,_Deg>::balancedR( RealScalar colNorm, RealScalar rowNorm,
|
||||
* of the row and column norm
|
||||
*/
|
||||
const RealScalar q = colNorm/rowNorm;
|
||||
if( !isApprox( q, _Scalar(1) ) )
|
||||
if( !isApprox( q, Scalar_(1) ) )
|
||||
{
|
||||
rowB = sqrt( colNorm/rowNorm );
|
||||
colB = RealScalar(1)/rowB;
|
||||
@@ -211,8 +213,8 @@ bool companion<_Scalar,_Deg>::balancedR( RealScalar colNorm, RealScalar rowNorm,
|
||||
}
|
||||
|
||||
|
||||
template< typename _Scalar, int _Deg >
|
||||
void companion<_Scalar,_Deg>::balance()
|
||||
template< typename Scalar_, int Deg_ >
|
||||
void companion<Scalar_,Deg_>::balance()
|
||||
{
|
||||
using std::abs;
|
||||
EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE );
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_POLYNOMIALS_MODULE_H
|
||||
#error "Please include unsupported/Eigen/Polynomials instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_POLYNOMIAL_SOLVER_H
|
||||
#define EIGEN_POLYNOMIAL_SOLVER_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Polynomials_Module
|
||||
@@ -25,16 +27,16 @@ namespace Eigen {
|
||||
* It stores the set of roots as a vector of complexes.
|
||||
*
|
||||
*/
|
||||
template< typename _Scalar, int _Deg >
|
||||
template< typename Scalar_, int Deg_ >
|
||||
class PolynomialSolverBase
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar_,Deg_==Dynamic ? Dynamic : Deg_)
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef std::complex<RealScalar> RootType;
|
||||
typedef Matrix<RootType,_Deg,1> RootsType;
|
||||
typedef Matrix<RootType,Deg_,1> RootsType;
|
||||
|
||||
typedef DenseIndex Index;
|
||||
|
||||
@@ -59,7 +61,7 @@ class PolynomialSolverBase
|
||||
* i.e. the real part of the complex roots that have an imaginary part which
|
||||
* absolute value is smaller than absImaginaryThreshold.
|
||||
* absImaginaryThreshold takes the dummy_precision associated
|
||||
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
|
||||
* with the Scalar_ template parameter of the PolynomialSolver class as the default value.
|
||||
*
|
||||
* \param[out] bi_seq : the back insertion sequence (stl concept)
|
||||
* \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex
|
||||
@@ -200,7 +202,7 @@ class PolynomialSolverBase
|
||||
* A real root is defined as the real part of a complex root with absolute imaginary
|
||||
* part smallest than absImaginaryThreshold.
|
||||
* absImaginaryThreshold takes the dummy_precision associated
|
||||
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
|
||||
* with the Scalar_ template parameter of the PolynomialSolver class as the default value.
|
||||
* If no real root is found the boolean hasArealRoot is set to false and the real part of
|
||||
* the root with smallest absolute imaginary part is returned instead.
|
||||
*
|
||||
@@ -223,7 +225,7 @@ class PolynomialSolverBase
|
||||
* A real root is defined as the real part of a complex root with absolute imaginary
|
||||
* part smallest than absImaginaryThreshold.
|
||||
* absImaginaryThreshold takes the dummy_precision associated
|
||||
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
|
||||
* with the Scalar_ template parameter of the PolynomialSolver class as the default value.
|
||||
* If no real root is found the boolean hasArealRoot is set to false and the real part of
|
||||
* the root with smallest absolute imaginary part is returned instead.
|
||||
*
|
||||
@@ -246,7 +248,7 @@ class PolynomialSolverBase
|
||||
* A real root is defined as the real part of a complex root with absolute imaginary
|
||||
* part smallest than absImaginaryThreshold.
|
||||
* absImaginaryThreshold takes the dummy_precision associated
|
||||
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
|
||||
* with the Scalar_ template parameter of the PolynomialSolver class as the default value.
|
||||
* If no real root is found the boolean hasArealRoot is set to false and the real part of
|
||||
* the root with smallest absolute imaginary part is returned instead.
|
||||
*
|
||||
@@ -269,7 +271,7 @@ class PolynomialSolverBase
|
||||
* A real root is defined as the real part of a complex root with absolute imaginary
|
||||
* part smallest than absImaginaryThreshold.
|
||||
* absImaginaryThreshold takes the dummy_precision associated
|
||||
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
|
||||
* with the Scalar_ template parameter of the PolynomialSolver class as the default value.
|
||||
* If no real root is found the boolean hasArealRoot is set to false and the real part of
|
||||
* the root with smallest absolute imaginary part is returned instead.
|
||||
*
|
||||
@@ -306,9 +308,9 @@ class PolynomialSolverBase
|
||||
*
|
||||
* Computes the complex roots of a real polynomial.
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e., the type of the polynomial coefficients
|
||||
* \param _Deg the degree of the polynomial, can be a compile time value or Dynamic.
|
||||
* Notice that the number of polynomial coefficients is _Deg+1.
|
||||
* \param Scalar_ the scalar type, i.e., the type of the polynomial coefficients
|
||||
* \param Deg_ the degree of the polynomial, can be a compile time value or Dynamic.
|
||||
* Notice that the number of polynomial coefficients is Deg_+1.
|
||||
*
|
||||
* This class implements a polynomial solver and provides convenient methods such as
|
||||
* - real roots,
|
||||
@@ -327,20 +329,20 @@ class PolynomialSolverBase
|
||||
* However, almost always, correct accuracy is reached even in these cases for 64bit
|
||||
* (double) floating types and small polynomial degree (<20).
|
||||
*/
|
||||
template<typename _Scalar, int _Deg>
|
||||
class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
|
||||
template<typename Scalar_, int Deg_>
|
||||
class PolynomialSolver : public PolynomialSolverBase<Scalar_,Deg_>
|
||||
{
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar_,Deg_==Dynamic ? Dynamic : Deg_)
|
||||
|
||||
typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base;
|
||||
typedef PolynomialSolverBase<Scalar_,Deg_> PS_Base;
|
||||
EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
|
||||
|
||||
typedef Matrix<Scalar,_Deg,_Deg> CompanionMatrixType;
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
ComplexEigenSolver<CompanionMatrixType>,
|
||||
EigenSolver<CompanionMatrixType> >::type EigenSolverType;
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, Scalar, std::complex<Scalar> >::type ComplexScalar;
|
||||
typedef Matrix<Scalar,Deg_,Deg_> CompanionMatrixType;
|
||||
typedef std::conditional_t<NumTraits<Scalar>::IsComplex,
|
||||
ComplexEigenSolver<CompanionMatrixType>,
|
||||
EigenSolver<CompanionMatrixType> > EigenSolverType;
|
||||
typedef std::conditional_t<NumTraits<Scalar>::IsComplex, Scalar, std::complex<Scalar> > ComplexScalar;
|
||||
|
||||
public:
|
||||
/** Computes the complex roots of a new polynomial. */
|
||||
@@ -351,7 +353,7 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
|
||||
eigen_assert( poly.size() > 1 );
|
||||
if(poly.size() > 2 )
|
||||
{
|
||||
internal::companion<Scalar,_Deg> companion( poly );
|
||||
internal::companion<Scalar,Deg_> companion( poly );
|
||||
companion.balance();
|
||||
m_eigenSolver.compute( companion.denseMatrix() );
|
||||
m_roots = m_eigenSolver.eigenvalues();
|
||||
@@ -395,11 +397,11 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
|
||||
};
|
||||
|
||||
|
||||
template< typename _Scalar >
|
||||
class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
|
||||
template< typename Scalar_ >
|
||||
class PolynomialSolver<Scalar_,1> : public PolynomialSolverBase<Scalar_,1>
|
||||
{
|
||||
public:
|
||||
typedef PolynomialSolverBase<_Scalar,1> PS_Base;
|
||||
typedef PolynomialSolverBase<Scalar_,1> PS_Base;
|
||||
EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
|
||||
|
||||
public:
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_POLYNOMIAL_UTILS_H
|
||||
#define EIGEN_POLYNOMIAL_UTILS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Polynomials_Module
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_SKYLINE_MODULE_H
|
||||
#error "Please include unsupported/Eigen/Skyline instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SKYLINEINPLACELU_H
|
||||
#define EIGEN_SKYLINEINPLACELU_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Skyline_Module
|
||||
@@ -116,7 +118,7 @@ protected:
|
||||
* using the default algorithm.
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
//template<typename _Scalar>
|
||||
//template<typename Scalar_>
|
||||
void SkylineInplaceLU<MatrixType>::compute() {
|
||||
const size_t rows = m_lu.rows();
|
||||
const size_t cols = m_lu.cols();
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#include "SkylineStorage.h"
|
||||
#include "SkylineMatrixBase.h"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Skyline_Module
|
||||
@@ -24,16 +26,16 @@ namespace Eigen {
|
||||
* This class implements a skyline matrix using the very uncommon storage
|
||||
* scheme.
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* \param Scalar_ the scalar type, i.e. the type of the coefficients
|
||||
* \param Options_ Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* is RowMajor. The default is 0 which means column-major.
|
||||
*
|
||||
*
|
||||
*/
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options>
|
||||
struct traits<SkylineMatrix<_Scalar, _Options> > {
|
||||
typedef _Scalar Scalar;
|
||||
template<typename Scalar_, int Options_>
|
||||
struct traits<SkylineMatrix<Scalar_, Options_> > {
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Sparse StorageKind;
|
||||
|
||||
enum {
|
||||
@@ -41,15 +43,15 @@ struct traits<SkylineMatrix<_Scalar, _Options> > {
|
||||
ColsAtCompileTime = Dynamic,
|
||||
MaxRowsAtCompileTime = Dynamic,
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
Flags = SkylineBit | _Options,
|
||||
Flags = SkylineBit | Options_,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options>
|
||||
template<typename Scalar_, int Options_>
|
||||
class SkylineMatrix
|
||||
: public SkylineMatrixBase<SkylineMatrix<_Scalar, _Options> > {
|
||||
: public SkylineMatrixBase<SkylineMatrix<Scalar_, Options_> > {
|
||||
public:
|
||||
EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(SkylineMatrix)
|
||||
EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, +=)
|
||||
@@ -375,8 +377,8 @@ public:
|
||||
/** Removes all non zeros */
|
||||
inline void setZero() {
|
||||
m_data.clear();
|
||||
memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
|
||||
memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
|
||||
std::fill_n(m_colStartIndex, m_outerSize + 1, Index(0));
|
||||
std::fill_n(m_rowStartIndex, m_outerSize + 1, Index(0));
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
@@ -435,7 +437,7 @@ public:
|
||||
}
|
||||
|
||||
//zeros new data
|
||||
memset(this->_upperPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar));
|
||||
std::fill_n(this->_upperPtr() + start, bandIncrement - 1, Scalar(0));
|
||||
|
||||
return m_data.upper(m_colStartIndex[inner]);
|
||||
} else {
|
||||
@@ -466,7 +468,7 @@ public:
|
||||
}
|
||||
|
||||
//zeros new data
|
||||
memset(this->_lowerPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar));
|
||||
std::fill_n(this->_lowerPtr() + start, bandIncrement - 1, Scalar(0));
|
||||
return m_data.lower(m_rowStartIndex[outer]);
|
||||
} else {
|
||||
return m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
|
||||
@@ -493,7 +495,7 @@ public:
|
||||
for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) {
|
||||
m_rowStartIndex[innerIdx] += bandIncrement;
|
||||
}
|
||||
memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
|
||||
std::fill_n(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, bandIncrement - 1, Scalar(0));
|
||||
return m_data.upper(m_rowStartIndex[inner] + m_data.upperProfile(inner));
|
||||
} else {
|
||||
return m_data.upper(m_rowStartIndex[inner] + (outer - inner));
|
||||
@@ -520,7 +522,7 @@ public:
|
||||
for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) {
|
||||
m_colStartIndex[innerIdx] += bandIncrement;
|
||||
}
|
||||
memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
|
||||
std::fill_n(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, bandIncrement - 1, Scalar(0));
|
||||
return m_data.lower(m_colStartIndex[outer] + m_data.lowerProfile(outer));
|
||||
} else {
|
||||
return m_data.lower(m_colStartIndex[outer] + (inner - outer));
|
||||
@@ -619,8 +621,8 @@ public:
|
||||
m_data.clear();
|
||||
|
||||
m_outerSize = diagSize;
|
||||
memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index));
|
||||
memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index));
|
||||
std::fill_n(m_colStartIndex, cols + 1, Index(0));
|
||||
std::fill_n(m_rowStartIndex, rows + 1, Index(0));
|
||||
}
|
||||
|
||||
void resizeNonZeros(Index size) {
|
||||
@@ -731,15 +733,15 @@ public:
|
||||
Scalar sum() const;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options>
|
||||
class SkylineMatrix<Scalar, _Options>::InnerUpperIterator {
|
||||
template<typename Scalar, int Options_>
|
||||
class SkylineMatrix<Scalar, Options_>::InnerUpperIterator {
|
||||
public:
|
||||
|
||||
InnerUpperIterator(const SkylineMatrix& mat, Index outer)
|
||||
: m_matrix(mat), m_outer(outer),
|
||||
m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1),
|
||||
m_id(Options_ == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1),
|
||||
m_start(m_id),
|
||||
m_end(_Options == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) {
|
||||
m_end(Options_ == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) {
|
||||
}
|
||||
|
||||
inline InnerUpperIterator & operator++() {
|
||||
@@ -793,16 +795,16 @@ protected:
|
||||
const Index m_end;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options>
|
||||
class SkylineMatrix<Scalar, _Options>::InnerLowerIterator {
|
||||
template<typename Scalar, int Options_>
|
||||
class SkylineMatrix<Scalar, Options_>::InnerLowerIterator {
|
||||
public:
|
||||
|
||||
InnerLowerIterator(const SkylineMatrix& mat, Index outer)
|
||||
: m_matrix(mat),
|
||||
m_outer(outer),
|
||||
m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1),
|
||||
m_id(Options_ == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1),
|
||||
m_start(m_id),
|
||||
m_end(_Options == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) {
|
||||
m_end(Options_ == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) {
|
||||
}
|
||||
|
||||
inline InnerLowerIterator & operator++() {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
|
||||
#include "SkylineUtil.h"
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Skyline_Module
|
||||
@@ -44,8 +46,7 @@ public:
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
|
||||
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
|
||||
internal::traits<Derived>::ColsAtCompileTime>::ret),
|
||||
SizeAtCompileTime = (internal::size_of_xpr_at_compile_time<Derived>::ret),
|
||||
/**< This is equal to the number of coefficients, i.e. the number of
|
||||
* rows times the number of columns, or to \a Dynamic if this is not
|
||||
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
||||
@@ -53,8 +54,8 @@ public:
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
|
||||
MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime>::ret),
|
||||
MaxSizeAtCompileTime = (internal::size_at_compile_time(MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime)),
|
||||
|
||||
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
|
||||
/**< This is set to true if either the number of rows or the number of
|
||||
@@ -85,8 +86,8 @@ public:
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** type of the equivalent square matrix */
|
||||
typedef Matrix<Scalar, EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime),
|
||||
EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime) > SquareMatrixType;
|
||||
typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
|
||||
internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime) > SquareMatrixType;
|
||||
|
||||
inline const Derived& derived() const {
|
||||
return *static_cast<const Derived*> (this);
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SKYLINEPRODUCT_H
|
||||
#define EIGEN_SKYLINEPRODUCT_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductMode>
|
||||
@@ -23,22 +25,22 @@ struct SkylineProductReturnType {
|
||||
template<typename LhsNested, typename RhsNested, int ProductMode>
|
||||
struct internal::traits<SkylineProduct<LhsNested, RhsNested, ProductMode> > {
|
||||
// clean the nested types:
|
||||
typedef typename internal::remove_all<LhsNested>::type _LhsNested;
|
||||
typedef typename internal::remove_all<RhsNested>::type _RhsNested;
|
||||
typedef typename _LhsNested::Scalar Scalar;
|
||||
typedef internal::remove_all_t<LhsNested> LhsNested_;
|
||||
typedef internal::remove_all_t<RhsNested> RhsNested_;
|
||||
typedef typename LhsNested_::Scalar Scalar;
|
||||
|
||||
enum {
|
||||
LhsCoeffReadCost = _LhsNested::CoeffReadCost,
|
||||
RhsCoeffReadCost = _RhsNested::CoeffReadCost,
|
||||
LhsFlags = _LhsNested::Flags,
|
||||
RhsFlags = _RhsNested::Flags,
|
||||
LhsCoeffReadCost = LhsNested_::CoeffReadCost,
|
||||
RhsCoeffReadCost = RhsNested_::CoeffReadCost,
|
||||
LhsFlags = LhsNested_::Flags,
|
||||
RhsFlags = RhsNested_::Flags,
|
||||
|
||||
RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
|
||||
ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
|
||||
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
|
||||
RowsAtCompileTime = LhsNested_::RowsAtCompileTime,
|
||||
ColsAtCompileTime = RhsNested_::ColsAtCompileTime,
|
||||
InnerSize = internal::min_size_prefer_fixed(LhsNested_::ColsAtCompileTime, RhsNested_::RowsAtCompileTime),
|
||||
|
||||
MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = LhsNested_::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = RhsNested_::MaxColsAtCompileTime,
|
||||
|
||||
EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
|
||||
ResultIsSkyline = ProductMode == SkylineTimeSkylineProduct,
|
||||
@@ -52,9 +54,9 @@ struct internal::traits<SkylineProduct<LhsNested, RhsNested, ProductMode> > {
|
||||
CoeffReadCost = HugeCost
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<ResultIsSkyline,
|
||||
typedef std::conditional_t<ResultIsSkyline,
|
||||
SkylineMatrixBase<SkylineProduct<LhsNested, RhsNested, ProductMode> >,
|
||||
MatrixBase<SkylineProduct<LhsNested, RhsNested, ProductMode> > >::type Base;
|
||||
MatrixBase<SkylineProduct<LhsNested, RhsNested, ProductMode> > > Base;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
@@ -67,8 +69,8 @@ public:
|
||||
|
||||
private:
|
||||
|
||||
typedef typename traits<SkylineProduct>::_LhsNested _LhsNested;
|
||||
typedef typename traits<SkylineProduct>::_RhsNested _RhsNested;
|
||||
typedef typename traits<SkylineProduct>::LhsNested_ LhsNested_;
|
||||
typedef typename traits<SkylineProduct>::RhsNested_ RhsNested_;
|
||||
|
||||
public:
|
||||
|
||||
@@ -78,11 +80,11 @@ public:
|
||||
eigen_assert(lhs.cols() == rhs.rows());
|
||||
|
||||
enum {
|
||||
ProductIsValid = _LhsNested::ColsAtCompileTime == Dynamic
|
||||
|| _RhsNested::RowsAtCompileTime == Dynamic
|
||||
|| int(_LhsNested::ColsAtCompileTime) == int(_RhsNested::RowsAtCompileTime),
|
||||
AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
|
||||
SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested, _RhsNested)
|
||||
ProductIsValid = LhsNested_::ColsAtCompileTime == Dynamic
|
||||
|| RhsNested_::RowsAtCompileTime == Dynamic
|
||||
|| int(LhsNested_::ColsAtCompileTime) == int(RhsNested_::RowsAtCompileTime),
|
||||
AreVectors = LhsNested_::IsVectorAtCompileTime && RhsNested_::IsVectorAtCompileTime,
|
||||
SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(LhsNested_, RhsNested_)
|
||||
};
|
||||
// note to the lost user:
|
||||
// * for a dot product use: v1.dot(v2)
|
||||
@@ -102,11 +104,11 @@ public:
|
||||
return m_rhs.cols();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE const _LhsNested& lhs() const {
|
||||
EIGEN_STRONG_INLINE const LhsNested_& lhs() const {
|
||||
return m_lhs;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE const _RhsNested& rhs() const {
|
||||
EIGEN_STRONG_INLINE const RhsNested_& rhs() const {
|
||||
return m_rhs;
|
||||
}
|
||||
|
||||
@@ -120,17 +122,17 @@ protected:
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Dest>
|
||||
EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) {
|
||||
typedef typename remove_all<Lhs>::type _Lhs;
|
||||
typedef typename remove_all<Rhs>::type _Rhs;
|
||||
typedef remove_all_t<Lhs> Lhs_;
|
||||
typedef remove_all_t<Rhs> Rhs_;
|
||||
typedef typename traits<Lhs>::Scalar Scalar;
|
||||
|
||||
enum {
|
||||
LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit,
|
||||
LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit,
|
||||
LhsIsRowMajor = (Lhs_::Flags & RowMajorBit) == RowMajorBit,
|
||||
LhsIsSelfAdjoint = (Lhs_::Flags & SelfAdjointBit) == SelfAdjointBit,
|
||||
ProcessFirstHalf = LhsIsSelfAdjoint
|
||||
&& (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
|
||||
|| ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor)
|
||||
|| ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)),
|
||||
&& (((Lhs_::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
|
||||
|| ((Lhs_::Flags & UpperTriangularBit) && !LhsIsRowMajor)
|
||||
|| ((Lhs_::Flags & LowerTriangularBit) && LhsIsRowMajor)),
|
||||
ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
|
||||
};
|
||||
|
||||
@@ -142,7 +144,7 @@ EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, cons
|
||||
}
|
||||
//Use matrix lower triangular part
|
||||
for (Index row = 0; row < lhs.rows(); row++) {
|
||||
typename _Lhs::InnerLowerIterator lIt(lhs, row);
|
||||
typename Lhs_::InnerLowerIterator lIt(lhs, row);
|
||||
const Index stop = lIt.col() + lIt.size();
|
||||
for (Index col = 0; col < rhs.cols(); col++) {
|
||||
|
||||
@@ -162,7 +164,7 @@ EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, cons
|
||||
|
||||
//Use matrix upper triangular part
|
||||
for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
|
||||
typename _Lhs::InnerUpperIterator uIt(lhs, lhscol);
|
||||
typename Lhs_::InnerUpperIterator uIt(lhs, lhscol);
|
||||
const Index stop = uIt.size() + uIt.row();
|
||||
for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
|
||||
|
||||
@@ -183,17 +185,17 @@ EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, cons
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Dest>
|
||||
EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) {
|
||||
typedef typename remove_all<Lhs>::type _Lhs;
|
||||
typedef typename remove_all<Rhs>::type _Rhs;
|
||||
typedef remove_all_t<Lhs> Lhs_;
|
||||
typedef remove_all_t<Rhs> Rhs_;
|
||||
typedef typename traits<Lhs>::Scalar Scalar;
|
||||
|
||||
enum {
|
||||
LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit,
|
||||
LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit,
|
||||
LhsIsRowMajor = (Lhs_::Flags & RowMajorBit) == RowMajorBit,
|
||||
LhsIsSelfAdjoint = (Lhs_::Flags & SelfAdjointBit) == SelfAdjointBit,
|
||||
ProcessFirstHalf = LhsIsSelfAdjoint
|
||||
&& (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
|
||||
|| ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor)
|
||||
|| ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)),
|
||||
&& (((Lhs_::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
|
||||
|| ((Lhs_::Flags & UpperTriangularBit) && !LhsIsRowMajor)
|
||||
|| ((Lhs_::Flags & LowerTriangularBit) && LhsIsRowMajor)),
|
||||
ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
|
||||
};
|
||||
|
||||
@@ -206,7 +208,7 @@ EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, cons
|
||||
|
||||
//Use matrix upper triangular part
|
||||
for (Index row = 0; row < lhs.rows(); row++) {
|
||||
typename _Lhs::InnerUpperIterator uIt(lhs, row);
|
||||
typename Lhs_::InnerUpperIterator uIt(lhs, row);
|
||||
const Index stop = uIt.col() + uIt.size();
|
||||
for (Index col = 0; col < rhs.cols(); col++) {
|
||||
|
||||
@@ -227,7 +229,7 @@ EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, cons
|
||||
|
||||
//Use matrix lower triangular part
|
||||
for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
|
||||
typename _Lhs::InnerLowerIterator lIt(lhs, lhscol);
|
||||
typename Lhs_::InnerLowerIterator lIt(lhs, lhscol);
|
||||
const Index stop = lIt.size() + lIt.row();
|
||||
for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
|
||||
|
||||
@@ -251,7 +253,7 @@ template<typename Lhs, typename Rhs, typename ResultType,
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct skyline_product_selector<Lhs, Rhs, ResultType, RowMajor> {
|
||||
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
|
||||
typedef typename traits<remove_all_t<Lhs>>::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) {
|
||||
skyline_row_major_time_dense_product<Lhs, Rhs, ResultType > (lhs, rhs, res);
|
||||
@@ -260,7 +262,7 @@ struct skyline_product_selector<Lhs, Rhs, ResultType, RowMajor> {
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct skyline_product_selector<Lhs, Rhs, ResultType, ColMajor> {
|
||||
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
|
||||
typedef typename traits<remove_all_t<Lhs>>::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) {
|
||||
skyline_col_major_time_dense_product<Lhs, Rhs, ResultType > (lhs, rhs, res);
|
||||
@@ -272,9 +274,9 @@ struct skyline_product_selector<Lhs, Rhs, ResultType, ColMajor> {
|
||||
// template<typename Derived>
|
||||
// template<typename Lhs, typename Rhs >
|
||||
// Derived & MatrixBase<Derived>::lazyAssign(const SkylineProduct<Lhs, Rhs, SkylineTimeDenseProduct>& product) {
|
||||
// typedef typename internal::remove_all<Lhs>::type _Lhs;
|
||||
// internal::skyline_product_selector<typename internal::remove_all<Lhs>::type,
|
||||
// typename internal::remove_all<Rhs>::type,
|
||||
// typedef internal::remove_all_t<Lhs> Lhs_;
|
||||
// internal::skyline_product_selector<internal::remove_all_t<Lhs>,
|
||||
// internal::remove_all_t<Rhs>,
|
||||
// Derived>::run(product.lhs(), product.rhs(), derived());
|
||||
//
|
||||
// return derived();
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SKYLINE_STORAGE_H
|
||||
#define EIGEN_SKYLINE_STORAGE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** Stores a skyline set of values in three structures :
|
||||
@@ -187,11 +189,11 @@ public:
|
||||
}
|
||||
|
||||
inline void reset() {
|
||||
memset(m_diag, 0, m_diagSize * sizeof (Scalar));
|
||||
memset(m_upper, 0, m_upperSize * sizeof (Scalar));
|
||||
memset(m_lower, 0, m_lowerSize * sizeof (Scalar));
|
||||
memset(m_upperProfile, 0, m_diagSize * sizeof (Index));
|
||||
memset(m_lowerProfile, 0, m_diagSize * sizeof (Index));
|
||||
std::fill_n(m_diag, m_diagSize, Scalar(0));
|
||||
std::fill_n(m_upper, m_upperSize, Scalar(0));
|
||||
std::fill_n(m_lower, m_lowerSize, Scalar(0));
|
||||
std::fill_n(m_upperProfile, m_diagSize, Index(0));
|
||||
std::fill_n(m_lowerProfile, m_diagSize, Index(0));
|
||||
}
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>()) {
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SKYLINEUTIL_H
|
||||
#define EIGEN_SKYLINEUTIL_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
#ifdef NDEBUG
|
||||
@@ -49,7 +51,7 @@ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
|
||||
EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
|
||||
EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
|
||||
|
||||
#define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
|
||||
#define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE_(Derived, BaseClass) \
|
||||
typedef BaseClass Base; \
|
||||
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
|
||||
@@ -58,13 +60,13 @@ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
|
||||
enum { Flags = Eigen::internal::traits<Derived>::Flags, };
|
||||
|
||||
#define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \
|
||||
_EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase<Derived>)
|
||||
EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE_(Derived, Eigen::SkylineMatrixBase<Derived>)
|
||||
|
||||
template<typename Derived> class SkylineMatrixBase;
|
||||
template<typename _Scalar, int _Flags = 0> class SkylineMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class DynamicSkylineMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class SkylineVector;
|
||||
template<typename _Scalar, int _Flags = 0> class MappedSkylineMatrix;
|
||||
template<typename Scalar_, int Flags_ = 0> class SkylineMatrix;
|
||||
template<typename Scalar_, int Flags_ = 0> class DynamicSkylineMatrix;
|
||||
template<typename Scalar_, int Flags_ = 0> class SkylineVector;
|
||||
template<typename Scalar_, int Flags_ = 0> class MappedSkylineMatrix;
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -73,13 +75,13 @@ template<typename Lhs, typename Rhs, int ProductMode = skyline_product_mode<Lhs,
|
||||
|
||||
template<typename T> class eval<T,IsSkyline>
|
||||
{
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::Scalar Scalar_;
|
||||
enum {
|
||||
_Flags = traits<T>::Flags
|
||||
Flags_ = traits<T>::Flags
|
||||
};
|
||||
|
||||
public:
|
||||
typedef SkylineMatrix<_Scalar, _Flags> type;
|
||||
typedef SkylineMatrix<Scalar_, Flags_> type;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
|
||||
#define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
#if 0
|
||||
|
||||
// NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... >
|
||||
// See SparseBlock.h for an example
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
* specialisation for DynamicSparseMatrix
|
||||
***************************************************************************/
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index, int Size>
|
||||
class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size>
|
||||
: public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size> >
|
||||
{
|
||||
typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType;
|
||||
public:
|
||||
|
||||
enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
|
||||
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
class InnerIterator: public MatrixType::InnerIterator
|
||||
{
|
||||
public:
|
||||
inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
|
||||
: MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
|
||||
{}
|
||||
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
|
||||
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
|
||||
protected:
|
||||
Index m_outer;
|
||||
};
|
||||
|
||||
inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
|
||||
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
|
||||
{
|
||||
eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
|
||||
}
|
||||
|
||||
inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
|
||||
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
|
||||
{
|
||||
eigen_assert(Size!=Dynamic);
|
||||
eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
|
||||
{
|
||||
// need to transpose => perform a block evaluation followed by a big swap
|
||||
DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
|
||||
*this = aux.markAsRValue();
|
||||
}
|
||||
else
|
||||
{
|
||||
// evaluate/copy vector per vector
|
||||
for (Index j=0; j<m_outerSize.value(); ++j)
|
||||
{
|
||||
SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
|
||||
m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
|
||||
{
|
||||
return operator=<SparseInnerVectorSet>(other);
|
||||
}
|
||||
|
||||
Index nonZeros() const
|
||||
{
|
||||
Index count = 0;
|
||||
for (Index j=0; j<m_outerSize.value(); ++j)
|
||||
count += m_matrix._data()[m_outerStart+j].size();
|
||||
return count;
|
||||
}
|
||||
|
||||
const Scalar& lastCoeff() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
|
||||
eigen_assert(m_matrix.data()[m_outerStart].size()>0);
|
||||
return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
|
||||
}
|
||||
|
||||
// template<typename Sparse>
|
||||
// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
// {
|
||||
// return *this;
|
||||
// }
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
protected:
|
||||
|
||||
const typename MatrixType::Nested m_matrix;
|
||||
Index m_outerStart;
|
||||
const internal::variable_if_dynamic<Index, Size> m_outerSize;
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_SPARSEBLOCKMATRIX_H
|
||||
#define EIGEN_SPARSEBLOCKMATRIX_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
@@ -46,21 +48,21 @@ namespace Eigen {
|
||||
* It is obviously required to describe the block layout beforehand by calling either
|
||||
* setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks.
|
||||
*
|
||||
* \tparam _Scalar The Scalar type
|
||||
* \tparam Scalar_ The Scalar type
|
||||
* \tparam _BlockAtCompileTime The block layout option. It takes the following values
|
||||
* Dynamic : block size known at runtime
|
||||
* a numeric number : fixed-size block known at compile time
|
||||
*/
|
||||
template<typename _Scalar, int _BlockAtCompileTime=Dynamic, int _Options=ColMajor, typename _StorageIndex=int> class BlockSparseMatrix;
|
||||
template<typename Scalar_, int _BlockAtCompileTime=Dynamic, int Options_=ColMajor, typename StorageIndex_=int> class BlockSparseMatrix;
|
||||
|
||||
template<typename BlockSparseMatrixT> class BlockSparseMatrixView;
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _Index>
|
||||
struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
|
||||
template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename Index_>
|
||||
struct traits<BlockSparseMatrix<Scalar_,_BlockAtCompileTime,Options_, Index_> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Index_ Index;
|
||||
typedef Sparse StorageKind; // FIXME Where is it used ??
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@@ -69,7 +71,7 @@ struct traits<BlockSparseMatrix<_Scalar,_BlockAtCompileTime,_Options, _Index> >
|
||||
MaxRowsAtCompileTime = Dynamic,
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
BlockSize = _BlockAtCompileTime,
|
||||
Flags = _Options | NestByRefBit | LvalueBit,
|
||||
Flags = Options_ | NestByRefBit | LvalueBit,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
@@ -280,17 +282,17 @@ class BlockSparseTimeDenseProduct
|
||||
BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&);
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_BlockAtCompileTime, _Options,_StorageIndex> >
|
||||
template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
|
||||
class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<Scalar_,_BlockAtCompileTime, Options_,StorageIndex_> >
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef typename internal::ref_selector<BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex> >::type Nested;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef typename internal::ref_selector<BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_> >::type Nested;
|
||||
|
||||
enum {
|
||||
Options = _Options,
|
||||
Options = Options_,
|
||||
Flags = Options,
|
||||
BlockSize=_BlockAtCompileTime,
|
||||
RowsAtCompileTime = Dynamic,
|
||||
@@ -302,7 +304,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
};
|
||||
typedef Matrix<Scalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockScalar;
|
||||
typedef Matrix<RealScalar, _BlockAtCompileTime, _BlockAtCompileTime,IsColMajor ? ColMajor : RowMajor> BlockRealScalar;
|
||||
typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType;
|
||||
typedef std::conditional_t<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar> BlockScalarReturnType;
|
||||
typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
|
||||
public:
|
||||
// Default constructor
|
||||
@@ -937,7 +939,7 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
{
|
||||
if(m_blockSize == Dynamic) return m_blockPtr[id];
|
||||
else return id * m_blockSize * m_blockSize;
|
||||
//return blockDynIdx(id, typename internal::conditional<(BlockSize==Dynamic), internal::true_type, internal::false_type>::type());
|
||||
//return blockDynIdx(id, std::conditional_t<(BlockSize==Dynamic), internal::true_type, internal::false_type>());
|
||||
}
|
||||
|
||||
|
||||
@@ -968,13 +970,13 @@ class BlockSparseMatrix : public SparseMatrixBase<BlockSparseMatrix<_Scalar,_Blo
|
||||
Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator
|
||||
template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
|
||||
class BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_>::BlockInnerIterator
|
||||
{
|
||||
public:
|
||||
|
||||
enum{
|
||||
Flags = _Options
|
||||
Flags = Options_
|
||||
};
|
||||
|
||||
BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer)
|
||||
@@ -1010,14 +1012,14 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat;
|
||||
const BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex>& m_mat;
|
||||
const Index m_outer;
|
||||
Index m_id;
|
||||
Index m_end;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _BlockAtCompileTime, int _Options, typename _StorageIndex>
|
||||
class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator
|
||||
template<typename Scalar_, int _BlockAtCompileTime, int Options_, typename StorageIndex_>
|
||||
class BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const BlockSparseMatrix& mat, Index outer)
|
||||
|
||||
@@ -1,404 +0,0 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
|
||||
#define EIGEN_DYNAMIC_SPARSEMATRIX_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \deprecated use a SparseMatrix in an uncompressed mode
|
||||
*
|
||||
* \class DynamicSparseMatrix
|
||||
*
|
||||
* \brief A sparse matrix class designed for matrix assembly purpose
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
|
||||
* random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
|
||||
* nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
|
||||
* otherwise.
|
||||
*
|
||||
* Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
|
||||
* decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
|
||||
* till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
|
||||
*
|
||||
* \see SparseMatrix
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct traits<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = Dynamic,
|
||||
ColsAtCompileTime = Dynamic,
|
||||
MaxRowsAtCompileTime = Dynamic,
|
||||
MaxColsAtCompileTime = Dynamic,
|
||||
Flags = _Options | NestByRefBit | LvalueBit,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = OuterRandomAccessPattern
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
class DynamicSparseMatrix
|
||||
: public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef SparseMatrixBase<DynamicSparseMatrix> Base;
|
||||
using Base::convert_index;
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
|
||||
// FIXME: why are these operator already alvailable ???
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
|
||||
typedef MappedSparseMatrix<Scalar,Flags> Map;
|
||||
using Base::IsRowMajor;
|
||||
using Base::operator=;
|
||||
enum {
|
||||
Options = _Options
|
||||
};
|
||||
|
||||
protected:
|
||||
|
||||
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0), StorageIndex> TransposedSparseMatrix;
|
||||
|
||||
Index m_innerSize;
|
||||
std::vector<internal::CompressedStorage<Scalar,StorageIndex> > m_data;
|
||||
|
||||
public:
|
||||
|
||||
inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
|
||||
inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
|
||||
inline Index innerSize() const { return m_innerSize; }
|
||||
inline Index outerSize() const { return convert_index(m_data.size()); }
|
||||
inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
|
||||
|
||||
std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() { return m_data; }
|
||||
const std::vector<internal::CompressedStorage<Scalar,StorageIndex> >& _data() const { return m_data; }
|
||||
|
||||
/** \returns the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search.
|
||||
*/
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
return m_data[outer].at(inner);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search. If the coefficient does not
|
||||
* exist yet, then a sorted insertion into a sequential buffer is performed.
|
||||
*/
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
return m_data[outer].atWithInsertion(inner);
|
||||
}
|
||||
|
||||
class InnerIterator;
|
||||
class ReverseInnerIterator;
|
||||
|
||||
void setZero()
|
||||
{
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
m_data[j].clear();
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
Index nonZeros() const
|
||||
{
|
||||
Index res = 0;
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
res += m_data[j].size();
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void reserve(Index reserveSize = 1000)
|
||||
{
|
||||
if (outerSize()>0)
|
||||
{
|
||||
Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
{
|
||||
m_data[j].reserve(reserveSizePerVector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Does nothing: provided for compatibility with SparseMatrix */
|
||||
inline void startVec(Index /*outer*/) {}
|
||||
|
||||
/** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
||||
* - the nonzero does not already exist
|
||||
* - the new coefficient is the last one of the given inner vector.
|
||||
*
|
||||
* \sa insert, insertBackByOuterInner */
|
||||
inline Scalar& insertBack(Index row, Index col)
|
||||
{
|
||||
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
|
||||
}
|
||||
|
||||
/** \sa insertBack */
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
|
||||
eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
|
||||
&& "wrong sorted insertion");
|
||||
m_data[outer].append(0, inner);
|
||||
return m_data[outer].value(m_data[outer].size()-1);
|
||||
}
|
||||
|
||||
inline Scalar& insert(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
Index startId = 0;
|
||||
Index id = static_cast<Index>(m_data[outer].size()) - 1;
|
||||
m_data[outer].resize(id+2,1);
|
||||
|
||||
while ( (id >= startId) && (m_data[outer].index(id) > inner) )
|
||||
{
|
||||
m_data[outer].index(id+1) = m_data[outer].index(id);
|
||||
m_data[outer].value(id+1) = m_data[outer].value(id);
|
||||
--id;
|
||||
}
|
||||
m_data[outer].index(id+1) = inner;
|
||||
m_data[outer].value(id+1) = 0;
|
||||
return m_data[outer].value(id+1);
|
||||
}
|
||||
|
||||
/** Does nothing: provided for compatibility with SparseMatrix */
|
||||
inline void finalize() {}
|
||||
|
||||
/** Suppress all nonzeros which are smaller than \a reference under the tolerance \a epsilon */
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
m_data[j].prune(reference,epsilon);
|
||||
}
|
||||
|
||||
/** Resize the matrix without preserving the data (the matrix is set to zero)
|
||||
*/
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
const Index outerSize = IsRowMajor ? rows : cols;
|
||||
m_innerSize = convert_index(IsRowMajor ? cols : rows);
|
||||
setZero();
|
||||
if (Index(m_data.size()) != outerSize)
|
||||
{
|
||||
m_data.resize(outerSize);
|
||||
}
|
||||
}
|
||||
|
||||
void resizeAndKeepData(Index rows, Index cols)
|
||||
{
|
||||
const Index outerSize = IsRowMajor ? rows : cols;
|
||||
const Index innerSize = IsRowMajor ? cols : rows;
|
||||
if (m_innerSize>innerSize)
|
||||
{
|
||||
// remove all coefficients with innerCoord>=innerSize
|
||||
// TODO
|
||||
//std::cerr << "not implemented yet\n";
|
||||
exit(2);
|
||||
}
|
||||
if (m_data.size() != outerSize)
|
||||
{
|
||||
m_data.resize(outerSize);
|
||||
}
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
EIGEN_DEPRECATED inline DynamicSparseMatrix()
|
||||
: m_innerSize(0), m_data(0)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
eigen_assert(innerSize()==0 && outerSize()==0);
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
resize(rows, cols);
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
Base::operator=(other.derived());
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
|
||||
: Base(), m_innerSize(0)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline void swap(DynamicSparseMatrix& other)
|
||||
{
|
||||
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
|
||||
std::swap(m_innerSize, other.m_innerSize);
|
||||
//std::swap(m_outerSize, other.m_outerSize);
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
}
|
||||
else
|
||||
{
|
||||
resize(other.rows(), other.cols());
|
||||
m_data = other.m_data;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Destructor */
|
||||
inline ~DynamicSparseMatrix() {}
|
||||
|
||||
public:
|
||||
|
||||
/** \deprecated
|
||||
* Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
* 1 - the coefficient does not exist yet
|
||||
* 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
|
||||
* In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
|
||||
* \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
|
||||
*
|
||||
* \see fillrand(), coeffRef()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
return insertBack(outer,inner);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
* Compared to the generic coeffRef(), the unique limitation is that we assume
|
||||
* the coefficient does not exist yet.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize()
|
||||
* Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
|
||||
# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
|
||||
# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
|
||||
# endif
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::InnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator
|
||||
{
|
||||
typedef typename SparseVector<Scalar,_Options,_StorageIndex>::InnerIterator Base;
|
||||
public:
|
||||
InnerIterator(const DynamicSparseMatrix& mat, Index outer)
|
||||
: Base(mat.m_data[outer]), m_outer(outer)
|
||||
{}
|
||||
|
||||
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
|
||||
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
|
||||
inline Index outer() const { return m_outer; }
|
||||
|
||||
protected:
|
||||
const Index m_outer;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
class DynamicSparseMatrix<Scalar,_Options,_StorageIndex>::ReverseInnerIterator : public SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator
|
||||
{
|
||||
typedef typename SparseVector<Scalar,_Options,_StorageIndex>::ReverseInnerIterator Base;
|
||||
public:
|
||||
ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer)
|
||||
: Base(mat.m_data[outer]), m_outer(outer)
|
||||
{}
|
||||
|
||||
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
|
||||
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
|
||||
inline Index outer() const { return m_outer; }
|
||||
|
||||
protected:
|
||||
const Index m_outer;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct evaluator<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
: evaluator_base<DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
|
||||
typedef typename SparseMatrixType::InnerIterator InnerIterator;
|
||||
typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
|
||||
Flags = SparseMatrixType::Flags
|
||||
};
|
||||
|
||||
evaluator() : m_matrix(0) {}
|
||||
evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
|
||||
|
||||
operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
|
||||
operator const SparseMatrixType&() const { return *m_matrix; }
|
||||
|
||||
Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); }
|
||||
|
||||
Index nonZerosEstimate() const { return m_matrix->nonZeros(); }
|
||||
|
||||
const SparseMatrixType *m_matrix;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_SPARSE_EXTRA_MODULE_H
|
||||
#error "Please include unsupported/Eigen/SparseExtra instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -14,6 +14,8 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal
|
||||
@@ -47,14 +49,14 @@ namespace internal
|
||||
}
|
||||
|
||||
template <typename RealScalar>
|
||||
inline void GetVectorElt (const std::string& line, RealScalar& val)
|
||||
inline void GetDenseElt (const std::string& line, RealScalar& val)
|
||||
{
|
||||
std::istringstream newline(line);
|
||||
newline >> val;
|
||||
}
|
||||
|
||||
template <typename RealScalar>
|
||||
inline void GetVectorElt (const std::string& line, std::complex<RealScalar>& val)
|
||||
inline void GetDenseElt (const std::string& line, std::complex<RealScalar>& val)
|
||||
{
|
||||
RealScalar valR, valI;
|
||||
std::istringstream newline(line);
|
||||
@@ -94,23 +96,34 @@ namespace internal
|
||||
|
||||
|
||||
template<typename Scalar>
|
||||
inline void putVectorElt(Scalar value, std::ofstream& out)
|
||||
inline void putDenseElt(Scalar value, std::ofstream& out)
|
||||
{
|
||||
out << value << "\n";
|
||||
}
|
||||
template<typename Scalar>
|
||||
inline void putVectorElt(std::complex<Scalar> value, std::ofstream& out)
|
||||
inline void putDenseElt(std::complex<Scalar> value, std::ofstream& out)
|
||||
{
|
||||
out << value.real() << " " << value.imag()<< "\n";
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief Reads the header of a matrixmarket file and determines the properties of a matrix
|
||||
*
|
||||
* @param filename of the file
|
||||
* @param sym if the matrix is hermitian,symmetric or none of the latter (sym=0)
|
||||
* @param iscomplex if the matrix has complex or real coefficients
|
||||
* @param isdense if the matrix is dense or sparse
|
||||
* @return true if the file was found
|
||||
*/
|
||||
inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isdense)
|
||||
{
|
||||
sym = 0;
|
||||
iscomplex = false;
|
||||
isvector = false;
|
||||
isdense = false;
|
||||
std::ifstream in(filename.c_str(),std::ios::in);
|
||||
if(!in)
|
||||
return false;
|
||||
@@ -122,14 +135,22 @@ inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscompl
|
||||
std::stringstream fmtline(line);
|
||||
std::string substr[5];
|
||||
fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4];
|
||||
if(substr[2].compare("array") == 0) isvector = true;
|
||||
if(substr[2].compare("array") == 0) isdense = true;
|
||||
if(substr[3].compare("complex") == 0) iscomplex = true;
|
||||
if(substr[4].compare("symmetric") == 0) sym = Symmetric;
|
||||
else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief Loads a sparse matrix from a matrixmarket format file.
|
||||
*
|
||||
* @tparam SparseMatrixType to read into, symmetries are not supported
|
||||
* @param mat SparseMatrix to read into, current values are overwritten
|
||||
* @param filename to parse matrix from
|
||||
* @return returns true if file exists. Returns false if the parsing did not succeed.
|
||||
*/
|
||||
template<typename SparseMatrixType>
|
||||
bool loadMarket(SparseMatrixType& mat, const std::string& filename)
|
||||
{
|
||||
@@ -184,50 +205,108 @@ bool loadMarket(SparseMatrixType& mat, const std::string& filename)
|
||||
elements.push_back(T(i,j,value));
|
||||
}
|
||||
else
|
||||
std::cerr << "Invalid read: " << i << "," << j << "\n";
|
||||
{
|
||||
std::cerr << "Invalid read: " << i << "," << j << "\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mat.setFromTriplets(elements.begin(), elements.end());
|
||||
if(count!=NNZ)
|
||||
if(count!=NNZ){
|
||||
std::cerr << count << "!=" << NNZ << "\n";
|
||||
|
||||
return false;
|
||||
}
|
||||
input.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename VectorType>
|
||||
bool loadMarketVector(VectorType& vec, const std::string& filename)
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief Loads a dense Matrix or Vector from a matrixmarket file. If a statically sized matrix has to be parsed and the file contains the wrong dimensions it is undefined behaviour.
|
||||
*
|
||||
* @tparam DenseMatrixType to read into
|
||||
* @param mat DenseMatrix to read into, current values are overwritten, symmetries are not supported
|
||||
* @param filename to parse matrix from
|
||||
* @return true if parsing was successful. Returns false if the parsing did not succeed.
|
||||
*/
|
||||
template<typename DenseType>
|
||||
bool loadMarketDense(DenseType& mat, const std::string& filename)
|
||||
{
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
typedef typename DenseType::Scalar Scalar;
|
||||
std::ifstream in(filename.c_str(), std::ios::in);
|
||||
if(!in)
|
||||
return false;
|
||||
|
||||
std::string line;
|
||||
int n(0), col(0);
|
||||
Index rows(0), cols(0);
|
||||
do
|
||||
{ // Skip comments
|
||||
std::getline(in, line); eigen_assert(in.good());
|
||||
} while (line[0] == '%');
|
||||
std::istringstream newline(line);
|
||||
newline >> n >> col;
|
||||
eigen_assert(n>0 && col>0);
|
||||
vec.resize(n);
|
||||
int i = 0;
|
||||
newline >> rows >> cols;
|
||||
|
||||
bool sizes_not_positive=(rows<1 || cols<1);
|
||||
bool wrong_input_rows = (DenseType::MaxRowsAtCompileTime != Dynamic && rows > DenseType::MaxRowsAtCompileTime) ||
|
||||
(DenseType::RowsAtCompileTime!=Dynamic && rows!=DenseType::RowsAtCompileTime);
|
||||
bool wrong_input_cols = (DenseType::MaxColsAtCompileTime != Dynamic && cols > DenseType::MaxColsAtCompileTime) ||
|
||||
(DenseType::ColsAtCompileTime!=Dynamic && cols!=DenseType::ColsAtCompileTime);
|
||||
|
||||
if(sizes_not_positive || wrong_input_rows || wrong_input_cols){
|
||||
if(sizes_not_positive){
|
||||
std::cerr<< "non-positive row or column size in file" << filename << "\n";
|
||||
}else{
|
||||
std::cerr<< "Input matrix can not be resized to"<<rows<<" x "<<cols<< "as given in " << filename << "\n";
|
||||
}
|
||||
in.close();
|
||||
return false;
|
||||
}
|
||||
|
||||
mat.resize(rows,cols);
|
||||
Index row = 0;
|
||||
Index col = 0;
|
||||
Index n=0;
|
||||
Scalar value;
|
||||
while ( std::getline(in, line) && (i < n) ){
|
||||
internal::GetVectorElt(line, value);
|
||||
vec(i++) = value;
|
||||
while ( std::getline(in, line) && (row < rows) && (col < cols)){
|
||||
internal::GetDenseElt(line, value);
|
||||
//matrixmarket format is column major
|
||||
mat(row,col) = value;
|
||||
row++;
|
||||
if(row==rows){
|
||||
row=0;
|
||||
col++;
|
||||
}
|
||||
n++;
|
||||
}
|
||||
in.close();
|
||||
if (i!=n){
|
||||
if (n!=mat.size()){
|
||||
std::cerr<< "Unable to read all elements from file " << filename << "\n";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief Same functionality as loadMarketDense, deprecated
|
||||
*/
|
||||
template<typename VectorType>
|
||||
bool loadMarketVector(VectorType& vec, const std::string& filename)
|
||||
{
|
||||
return loadMarketDense(vec, filename);
|
||||
}
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief writes a sparse Matrix to a marketmarket format file
|
||||
*
|
||||
* @tparam SparseMatrixType to write to file
|
||||
* @param mat matrix to write to file
|
||||
* @param filename filename to write to
|
||||
* @param sym at the moment no symmetry operations are supported
|
||||
* @return true if writing succeeded
|
||||
*/
|
||||
template<typename SparseMatrixType>
|
||||
bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0)
|
||||
{
|
||||
@@ -254,11 +333,22 @@ bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sy
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename VectorType>
|
||||
bool saveMarketVector (const VectorType& vec, const std::string& filename)
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief writes a dense Matrix or vector to a marketmarket format file
|
||||
*
|
||||
* @tparam DenseMatrixType to write to file
|
||||
* @param mat matrix to write to file
|
||||
* @param filename filename to write to
|
||||
* @return true if writing succeeded
|
||||
*/
|
||||
|
||||
template<typename DenseType>
|
||||
bool saveMarketDense (const DenseType& mat, const std::string& filename)
|
||||
{
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
typedef typename VectorType::RealScalar RealScalar;
|
||||
typedef typename DenseType::Scalar Scalar;
|
||||
typedef typename DenseType::RealScalar RealScalar;
|
||||
std::ofstream out(filename.c_str(),std::ios::out);
|
||||
if(!out)
|
||||
return false;
|
||||
@@ -269,14 +359,26 @@ bool saveMarketVector (const VectorType& vec, const std::string& filename)
|
||||
out << "%%MatrixMarket matrix array complex general\n";
|
||||
else
|
||||
out << "%%MatrixMarket matrix array real general\n";
|
||||
out << vec.size() << " "<< 1 << "\n";
|
||||
for (int i=0; i < vec.size(); i++){
|
||||
internal::putVectorElt(vec(i), out);
|
||||
out << mat.rows() << " "<< mat.cols() << "\n";
|
||||
for (Index i=0; i < mat.cols(); i++){
|
||||
for (Index j=0; j < mat.rows(); j++){
|
||||
internal::putDenseElt(mat(j,i), out);
|
||||
}
|
||||
}
|
||||
out.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* \ingroup SparseExtra_Module
|
||||
* @brief Same functionality as saveMarketDense, deprecated
|
||||
*/
|
||||
template<typename VectorType>
|
||||
bool saveMarketVector (const VectorType& vec, const std::string& filename)
|
||||
{
|
||||
return saveMarketDense(vec, filename);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_MARKET_IO_H
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_BROWSE_MATRICES_H
|
||||
#define EIGEN_BROWSE_MATRICES_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum {
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
namespace google {}
|
||||
#endif
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** Represents a std::map
|
||||
@@ -33,21 +35,8 @@ template<typename Scalar> struct StdMapTraits
|
||||
static void setInvalidKey(Type&, const KeyType&) {}
|
||||
};
|
||||
|
||||
#ifdef EIGEN_UNORDERED_MAP_SUPPORT
|
||||
|
||||
/** Represents a std::unordered_map
|
||||
*
|
||||
* To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
|
||||
* yourself making sure that unordered_map is defined in the std namespace.
|
||||
*
|
||||
* For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
|
||||
* \code
|
||||
* #include <tr1/unordered_map>
|
||||
* #define EIGEN_UNORDERED_MAP_SUPPORT
|
||||
* namespace std {
|
||||
* using std::tr1::unordered_map;
|
||||
* }
|
||||
* \endcode
|
||||
*
|
||||
* \see RandomSetter
|
||||
*/
|
||||
template<typename Scalar> struct StdUnorderedMapTraits
|
||||
@@ -60,7 +49,6 @@ template<typename Scalar> struct StdUnorderedMapTraits
|
||||
|
||||
static void setInvalidKey(Type&, const KeyType&) {}
|
||||
};
|
||||
#endif // EIGEN_UNORDERED_MAP_SUPPORT
|
||||
|
||||
#if defined(EIGEN_GOOGLEHASH_SUPPORT)
|
||||
|
||||
@@ -115,7 +103,7 @@ template<typename Scalar> struct GoogleSparseHashMapTraits
|
||||
#endif
|
||||
|
||||
/** \class RandomSetter
|
||||
*
|
||||
* \ingroup SparseExtra_Module
|
||||
* \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
|
||||
*
|
||||
* \tparam SparseMatrixType the type of the sparse matrix we are updating
|
||||
@@ -149,12 +137,12 @@ template<typename Scalar> struct GoogleSparseHashMapTraits
|
||||
*
|
||||
* The possible values for the template parameter MapTraits are:
|
||||
* - \b StdMapTraits: corresponds to std::map. (does not perform very well)
|
||||
* - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
|
||||
* - \b StdUnorderedMapTraits: corresponds to std::unordered_map
|
||||
* - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
|
||||
* - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
|
||||
*
|
||||
* The default map implementation depends on the availability, and the preferred order is:
|
||||
* GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
|
||||
* GoogleSparseHashMapTraits, StdUnorderedMapTraits, and finally StdMapTraits.
|
||||
*
|
||||
* For performance and memory consumption reasons it is highly recommended to use one of
|
||||
* Google's hash_map implementations. To enable the support for them, you must define
|
||||
@@ -167,10 +155,8 @@ template<typename SparseMatrixType,
|
||||
template <typename T> class MapTraits =
|
||||
#if defined(EIGEN_GOOGLEHASH_SUPPORT)
|
||||
GoogleDenseHashMapTraits
|
||||
#elif defined(_HASH_MAP)
|
||||
GnuHashMapTraits
|
||||
#else
|
||||
StdMapTraits
|
||||
StdUnorderedMapTraits
|
||||
#endif
|
||||
,int OuterPacketBits = 6>
|
||||
class RandomSetter
|
||||
@@ -185,7 +171,7 @@ class RandomSetter
|
||||
};
|
||||
typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
|
||||
typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
|
||||
static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
|
||||
static constexpr int OuterPacketMask = (1 << OuterPacketBits) - 1;
|
||||
enum {
|
||||
SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
|
||||
TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
|
||||
|
||||
231
libs/eigen/unsupported/Eigen/src/SparseExtra/SparseInverse.h
Normal file
231
libs/eigen/unsupported/Eigen/src/SparseExtra/SparseInverse.h
Normal file
@@ -0,0 +1,231 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2022 Julian Kent <jkflying@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEINVERSE_H
|
||||
#define EIGEN_SPARSEINVERSE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
#include "../../../../Eigen/Sparse"
|
||||
#include "../../../../Eigen/SparseLU"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/**
|
||||
* @brief Kahan algorithm based accumulator
|
||||
*
|
||||
* The Kahan sum algorithm guarantees to bound the error from floating point
|
||||
* accumulation to a fixed value, regardless of the number of accumulations
|
||||
* performed. Naive accumulation accumulates errors O(N), and pairwise O(logN).
|
||||
* However pairwise also requires O(logN) memory while Kahan summation requires
|
||||
* O(1) memory, but 4x the operations / latency.
|
||||
*
|
||||
* NB! Do not enable associative math optimizations, they may cause the Kahan
|
||||
* summation to be optimized out leaving you with naive summation again.
|
||||
*
|
||||
*/
|
||||
template <typename Scalar>
|
||||
class KahanSum {
|
||||
// Straighforward Kahan summation for accurate accumulation of a sum of numbers
|
||||
Scalar _sum{};
|
||||
Scalar _correction{};
|
||||
|
||||
public:
|
||||
Scalar value() { return _sum; }
|
||||
|
||||
void operator+=(Scalar increment) {
|
||||
const Scalar correctedIncrement = increment + _correction;
|
||||
const Scalar previousSum = _sum;
|
||||
_sum += correctedIncrement;
|
||||
_correction = correctedIncrement - (_sum - previousSum);
|
||||
}
|
||||
};
|
||||
template <typename Scalar, Index Width = 16>
|
||||
class FABSum {
|
||||
// https://epubs.siam.org/doi/pdf/10.1137/19M1257780
|
||||
// Fast and Accurate Blocked Summation
|
||||
// Uses naive summation for the fast sum, and Kahan summation for the accurate sum
|
||||
// Theoretically SIMD sum could be changed to a tree sum which would improve accuracy
|
||||
// over naive summation
|
||||
KahanSum<Scalar> _totalSum;
|
||||
Matrix<Scalar, Width, 1> _block;
|
||||
Index _blockUsed{};
|
||||
|
||||
public:
|
||||
Scalar value() { return _block.topRows(_blockUsed).sum() + _totalSum.value(); }
|
||||
|
||||
void operator+=(Scalar increment) {
|
||||
_block(_blockUsed++, 0) = increment;
|
||||
if (_blockUsed == Width) {
|
||||
_totalSum += _block.sum();
|
||||
_blockUsed = 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief computes an accurate dot product on two sparse vectors
|
||||
*
|
||||
* Uses an accurate summation algorithm for the accumulator in order to
|
||||
* compute an accurate dot product for two sparse vectors.
|
||||
*
|
||||
*/
|
||||
template <typename Derived, typename OtherDerived>
|
||||
typename Derived::Scalar accurateDot(const SparseMatrixBase<Derived>& A, const SparseMatrixBase<OtherDerived>& other) {
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
|
||||
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived, OtherDerived)
|
||||
static_assert(internal::is_same<Scalar, typename OtherDerived::Scalar>::value, "mismatched types");
|
||||
|
||||
internal::evaluator<Derived> thisEval(A.derived());
|
||||
typename Derived::ReverseInnerIterator i(thisEval, 0);
|
||||
|
||||
internal::evaluator<OtherDerived> otherEval(other.derived());
|
||||
typename OtherDerived::ReverseInnerIterator j(otherEval, 0);
|
||||
|
||||
FABSum<Scalar> res;
|
||||
while (i && j) {
|
||||
if (i.index() == j.index()) {
|
||||
res += numext::conj(i.value()) * j.value();
|
||||
--i;
|
||||
--j;
|
||||
} else if (i.index() > j.index())
|
||||
--i;
|
||||
else
|
||||
--j;
|
||||
}
|
||||
return res.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief calculate sparse subset of inverse of sparse matrix
|
||||
*
|
||||
* This class returns a sparse subset of the inverse of the input matrix.
|
||||
* The nonzeros correspond to the nonzeros of the input, plus any additional
|
||||
* elements required due to fill-in of the internal LU factorization. This is
|
||||
* is minimized via a applying a fill-reducing permutation as part of the LU
|
||||
* factorization.
|
||||
*
|
||||
* If there are specific entries of the input matrix which you need inverse
|
||||
* values for, which are zero for the input, you need to insert entries into
|
||||
* the input sparse matrix for them to be calculated.
|
||||
*
|
||||
* Due to the sensitive nature of matrix inversion, particularly on large
|
||||
* matrices which are made possible via sparsity, high accuracy dot products
|
||||
* based on Kahan summation are used to reduce numerical error. If you still
|
||||
* encounter numerical errors you may with to equilibrate your matrix before
|
||||
* calculating the inverse, as well as making sure it is actually full rank.
|
||||
*/
|
||||
template <typename Scalar>
|
||||
class SparseInverse {
|
||||
public:
|
||||
typedef SparseMatrix<Scalar, ColMajor> MatrixType;
|
||||
typedef SparseMatrix<Scalar, RowMajor> RowMatrixType;
|
||||
|
||||
SparseInverse() {}
|
||||
|
||||
/**
|
||||
* @brief This Constructor is for if you already have a factored SparseLU and would like to use it to calculate a
|
||||
* sparse inverse.
|
||||
*
|
||||
* Just call this constructor with your already factored SparseLU class and you can directly call the .inverse()
|
||||
* method to get the result.
|
||||
*/
|
||||
SparseInverse(const SparseLU<MatrixType>& slu) { _result = computeInverse(slu); }
|
||||
|
||||
/**
|
||||
* @brief Calculate the sparse inverse from a given sparse input
|
||||
*/
|
||||
SparseInverse& compute(const SparseMatrix<Scalar>& A) {
|
||||
SparseLU<MatrixType> slu;
|
||||
slu.compute(A);
|
||||
_result = computeInverse(slu);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief return the already-calculated sparse inverse, or a 0x0 matrix if it could not be computed
|
||||
*/
|
||||
const MatrixType& inverse() const { return _result; }
|
||||
|
||||
/**
|
||||
* @brief Internal function to calculate the sparse inverse in a functional way
|
||||
* @return A sparse inverse representation, or, if the decomposition didn't complete, a 0x0 matrix.
|
||||
*/
|
||||
static MatrixType computeInverse(const SparseLU<MatrixType>& slu) {
|
||||
if (slu.info() != Success) {
|
||||
return MatrixType(0, 0);
|
||||
}
|
||||
|
||||
// Extract from SparseLU and decompose into L, inverse D and U terms
|
||||
Matrix<Scalar, Dynamic, 1> invD;
|
||||
RowMatrixType Upper;
|
||||
{
|
||||
RowMatrixType DU = slu.matrixU().toSparse();
|
||||
invD = DU.diagonal().cwiseInverse();
|
||||
Upper = (invD.asDiagonal() * DU).template triangularView<StrictlyUpper>();
|
||||
}
|
||||
MatrixType Lower = slu.matrixL().toSparse().template triangularView<StrictlyLower>();
|
||||
|
||||
// Compute the inverse and reapply the permutation matrix from the LU decomposition
|
||||
return slu.colsPermutation().transpose() * computeInverse(Upper, invD, Lower) * slu.rowsPermutation();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Internal function to calculate the inverse from strictly upper, diagonal and strictly lower components
|
||||
*/
|
||||
static MatrixType computeInverse(const RowMatrixType& Upper, const Matrix<Scalar, Dynamic, 1>& inverseDiagonal,
|
||||
const MatrixType& Lower) {
|
||||
// Calculate the 'minimal set', which is the nonzeros of (L+U).transpose()
|
||||
// It could be zeroed, but we will overwrite all non-zeros anyways.
|
||||
MatrixType colInv = Lower.transpose().template triangularView<UnitUpper>();
|
||||
colInv += Upper.transpose();
|
||||
|
||||
// We also need rowmajor representation in order to do efficient row-wise dot products
|
||||
RowMatrixType rowInv = Upper.transpose().template triangularView<UnitLower>();
|
||||
rowInv += Lower.transpose();
|
||||
|
||||
// Use the Takahashi algorithm to build the supporting elements of the inverse
|
||||
// upwards and to the left, from the bottom right element, 1 col/row at a time
|
||||
for (Index recurseLevel = Upper.cols() - 1; recurseLevel >= 0; recurseLevel--) {
|
||||
const auto& col = Lower.col(recurseLevel);
|
||||
const auto& row = Upper.row(recurseLevel);
|
||||
|
||||
// Calculate the inverse values for the nonzeros in this column
|
||||
typename MatrixType::ReverseInnerIterator colIter(colInv, recurseLevel);
|
||||
for (; recurseLevel < colIter.index(); --colIter) {
|
||||
const Scalar element = -accurateDot(col, rowInv.row(colIter.index()));
|
||||
colIter.valueRef() = element;
|
||||
rowInv.coeffRef(colIter.index(), recurseLevel) = element;
|
||||
}
|
||||
|
||||
// Calculate the inverse values for the nonzeros in this row
|
||||
typename RowMatrixType::ReverseInnerIterator rowIter(rowInv, recurseLevel);
|
||||
for (; recurseLevel < rowIter.index(); --rowIter) {
|
||||
const Scalar element = -accurateDot(row, colInv.col(rowIter.index()));
|
||||
rowIter.valueRef() = element;
|
||||
colInv.coeffRef(recurseLevel, rowIter.index()) = element;
|
||||
}
|
||||
|
||||
// And finally the diagonal, which corresponds to both row and col iterator now
|
||||
const Scalar diag = inverseDiagonal(recurseLevel) - accurateDot(row, colInv.col(recurseLevel));
|
||||
rowIter.valueRef() = diag;
|
||||
colIter.valueRef() = diag;
|
||||
}
|
||||
|
||||
return colInv;
|
||||
}
|
||||
|
||||
private:
|
||||
MatrixType _result;
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
#endif
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_BESSELFUNCTIONS_ARRAYAPI_H
|
||||
#define EIGEN_BESSELFUNCTIONS_ARRAYAPI_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \returns an expression of the coefficient-wise i0(\a x) to the given
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#ifndef EIGEN_BESSELFUNCTIONS_BFLOAT16_H
|
||||
#define EIGEN_BESSELFUNCTIONS_BFLOAT16_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace numext {
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_BESSELFUNCTIONS_FUNCTORS_H
|
||||
#define EIGEN_BESSELFUNCTIONS_FUNCTORS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -22,7 +24,6 @@ namespace internal {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_i0_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i0_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_i0;
|
||||
return bessel_i0(x);
|
||||
@@ -50,7 +51,6 @@ struct functor_traits<scalar_bessel_i0_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_i0e_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i0e_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_i0e;
|
||||
return bessel_i0e(x);
|
||||
@@ -77,7 +77,6 @@ struct functor_traits<scalar_bessel_i0e_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_i1_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i1_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_i1;
|
||||
return bessel_i1(x);
|
||||
@@ -105,7 +104,6 @@ struct functor_traits<scalar_bessel_i1_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_i1e_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i1e_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_i1e;
|
||||
return bessel_i1e(x);
|
||||
@@ -132,7 +130,6 @@ struct functor_traits<scalar_bessel_i1e_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_j0_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_j0_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_j0;
|
||||
return bessel_j0(x);
|
||||
@@ -160,7 +157,6 @@ struct functor_traits<scalar_bessel_j0_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_y0_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_y0_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_y0;
|
||||
return bessel_y0(x);
|
||||
@@ -188,7 +184,6 @@ struct functor_traits<scalar_bessel_y0_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_j1_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_j1_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_j1;
|
||||
return bessel_j1(x);
|
||||
@@ -216,7 +211,6 @@ struct functor_traits<scalar_bessel_j1_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_y1_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_y1_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_y1;
|
||||
return bessel_y1(x);
|
||||
@@ -244,7 +238,6 @@ struct functor_traits<scalar_bessel_y1_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_k0_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k0_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_k0;
|
||||
return bessel_k0(x);
|
||||
@@ -272,7 +265,6 @@ struct functor_traits<scalar_bessel_k0_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_k0e_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k0e_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_k0e;
|
||||
return bessel_k0e(x);
|
||||
@@ -300,7 +292,6 @@ struct functor_traits<scalar_bessel_k0e_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_k1_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k1_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_k1;
|
||||
return bessel_k1(x);
|
||||
@@ -328,7 +319,6 @@ struct functor_traits<scalar_bessel_k1_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_bessel_k1e_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k1e_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const {
|
||||
using numext::bessel_k1e;
|
||||
return bessel_k1e(x);
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#ifndef EIGEN_BESSELFUNCTIONS_HALF_H
|
||||
#define EIGEN_BESSELFUNCTIONS_HALF_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace numext {
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_BESSEL_FUNCTIONS_H
|
||||
#define EIGEN_BESSEL_FUNCTIONS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
@@ -48,10 +50,10 @@ struct bessel_i0e_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_i0e {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -239,10 +241,10 @@ struct bessel_i1e_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type >
|
||||
struct generic_i1e {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -434,10 +436,10 @@ struct bessel_k0e_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_k0e {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -597,10 +599,10 @@ struct bessel_k0_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_k0 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -769,10 +771,10 @@ struct bessel_k1e_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_k1e {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -925,10 +927,10 @@ struct bessel_k1_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_k1 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -1091,10 +1093,10 @@ struct bessel_j0_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_j0 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -1291,10 +1293,10 @@ struct bessel_y0_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_y0 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -1489,10 +1491,10 @@ struct bessel_j1_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_j1 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
@@ -1680,10 +1682,10 @@ struct bessel_y1_retval {
|
||||
|
||||
template <typename T, typename ScalarType = typename unpacket_traits<T>::type>
|
||||
struct generic_y1 {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<T, T>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T run(const T&) {
|
||||
return ScalarType(0);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_BESSELFUNCTIONS_PACKETMATH_H
|
||||
#define EIGEN_BESSELFUNCTIONS_PACKETMATH_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -5,6 +5,8 @@ namespace hip_impl {
|
||||
template <typename, typename, unsigned int> struct Scalar_accessor;
|
||||
} // end namespace hip_impl
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_MODULE_H
|
||||
#error "Please include unsupported/Eigen/SpecialFunctions instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
|
||||
#define EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \cpp11 \returns an expression of the coefficient-wise igamma(\a a, \a x) to the given arrays.
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_BFLOAT16_H
|
||||
#define EIGEN_SPECIALFUNCTIONS_BFLOAT16_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace numext {
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_FUNCTORS_H
|
||||
#define EIGEN_SPECIALFUNCTIONS_FUNCTORS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
@@ -23,7 +25,6 @@ namespace internal {
|
||||
*/
|
||||
template<typename Scalar> struct scalar_igamma_op : binary_op_base<Scalar,Scalar>
|
||||
{
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const {
|
||||
using numext::igamma; return igamma(a, x);
|
||||
}
|
||||
@@ -49,7 +50,6 @@ struct functor_traits<scalar_igamma_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_igamma_der_a_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_der_a_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& a, const Scalar& x) const {
|
||||
using numext::igamma_der_a;
|
||||
return igamma_der_a(a, x);
|
||||
@@ -77,7 +77,6 @@ struct functor_traits<scalar_igamma_der_a_op<Scalar> > {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct scalar_gamma_sample_der_alpha_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_gamma_sample_der_alpha_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& alpha, const Scalar& sample) const {
|
||||
using numext::gamma_sample_der_alpha;
|
||||
return gamma_sample_der_alpha(alpha, sample);
|
||||
@@ -103,7 +102,6 @@ struct functor_traits<scalar_gamma_sample_der_alpha_op<Scalar> > {
|
||||
*/
|
||||
template<typename Scalar> struct scalar_igammac_op : binary_op_base<Scalar,Scalar>
|
||||
{
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_igammac_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const {
|
||||
using numext::igammac; return igammac(a, x);
|
||||
}
|
||||
@@ -128,7 +126,6 @@ struct functor_traits<scalar_igammac_op<Scalar> > {
|
||||
*
|
||||
*/
|
||||
template<typename Scalar> struct scalar_betainc_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_betainc_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& a, const Scalar& b) const {
|
||||
using numext::betainc; return betainc(x, a, b);
|
||||
}
|
||||
@@ -154,7 +151,6 @@ struct functor_traits<scalar_betainc_op<Scalar> > {
|
||||
* \sa class CwiseUnaryOp, Cwise::lgamma()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_lgamma_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_lgamma_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
|
||||
using numext::lgamma; return lgamma(a);
|
||||
}
|
||||
@@ -176,7 +172,6 @@ struct functor_traits<scalar_lgamma_op<Scalar> >
|
||||
* \sa class CwiseUnaryOp, Cwise::digamma()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_digamma_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_digamma_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
|
||||
using numext::digamma; return digamma(a);
|
||||
}
|
||||
@@ -198,7 +193,6 @@ struct functor_traits<scalar_digamma_op<Scalar> >
|
||||
* \sa class CwiseUnaryOp, Cwise::zeta()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_zeta_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_zeta_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& q) const {
|
||||
using numext::zeta; return zeta(x, q);
|
||||
}
|
||||
@@ -220,7 +214,6 @@ struct functor_traits<scalar_zeta_op<Scalar> >
|
||||
* \sa class CwiseUnaryOp, Cwise::polygamma()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_polygamma_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_polygamma_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& n, const Scalar& x) const {
|
||||
using numext::polygamma; return polygamma(n, x);
|
||||
}
|
||||
@@ -242,7 +235,6 @@ struct functor_traits<scalar_polygamma_op<Scalar> >
|
||||
* \sa class CwiseUnaryOp, ArrayBase::erf()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_erf_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_erf_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
|
||||
operator()(const Scalar& a) const {
|
||||
return numext::erf(a);
|
||||
@@ -281,7 +273,6 @@ struct functor_traits<scalar_erf_op<Scalar> > {
|
||||
* \sa class CwiseUnaryOp, Cwise::erfc()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_erfc_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_erfc_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
|
||||
using numext::erfc; return erfc(a);
|
||||
}
|
||||
@@ -304,7 +295,6 @@ struct functor_traits<scalar_erfc_op<Scalar> >
|
||||
* \sa class CwiseUnaryOp, Cwise::ndtri()
|
||||
*/
|
||||
template<typename Scalar> struct scalar_ndtri_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_ndtri_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const {
|
||||
using numext::ndtri; return ndtri(a);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_HALF_H
|
||||
#define EIGEN_SPECIALFUNCTIONS_HALF_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace numext {
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPECIAL_FUNCTIONS_H
|
||||
#define EIGEN_SPECIAL_FUNCTIONS_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
@@ -43,10 +45,10 @@ namespace internal {
|
||||
|
||||
template <typename Scalar>
|
||||
struct lgamma_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -126,10 +128,10 @@ struct digamma_retval {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct digamma_impl_maybe_poly {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -390,10 +392,10 @@ struct erf_impl<double> {
|
||||
|
||||
template <typename Scalar>
|
||||
struct erfc_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -599,13 +601,12 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T generic_ndtri_lt_exp_neg_two(
|
||||
ScalarType(6.79019408009981274425e-9)
|
||||
};
|
||||
const T eight = pset1<T>(ScalarType(8.0));
|
||||
const T one = pset1<T>(ScalarType(1));
|
||||
const T neg_two = pset1<T>(ScalarType(-2));
|
||||
T x, x0, x1, z;
|
||||
|
||||
x = psqrt(pmul(neg_two, plog(b)));
|
||||
x0 = psub(x, pdiv(plog(x), x));
|
||||
z = pdiv(one, x);
|
||||
z = preciprocal(x);
|
||||
x1 = pmul(
|
||||
z, pselect(
|
||||
pcmp_lt(x, eight),
|
||||
@@ -650,10 +651,10 @@ struct ndtri_retval {
|
||||
|
||||
template <typename Scalar>
|
||||
struct ndtri_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -684,11 +685,11 @@ struct igammac_retval {
|
||||
template <typename Scalar>
|
||||
struct cephes_helper {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar machep() { assert(false && "machep not supported for this type"); return 0.0; }
|
||||
static EIGEN_STRONG_INLINE Scalar machep() { eigen_assert(false && "machep not supported for this type"); return 0.0; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar big() { assert(false && "big not supported for this type"); return 0.0; }
|
||||
static EIGEN_STRONG_INLINE Scalar big() { eigen_assert(false && "big not supported for this type"); return 0.0; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar biginv() { assert(false && "biginv not supported for this type"); return 0.0; }
|
||||
static EIGEN_STRONG_INLINE Scalar biginv() { eigen_assert(false && "biginv not supported for this type"); return 0.0; }
|
||||
};
|
||||
|
||||
template <>
|
||||
@@ -786,7 +787,7 @@ struct igammac_cf_impl {
|
||||
Scalar ax = main_igamma_term<Scalar>(a, x);
|
||||
// This is independent of mode. If this value is zero,
|
||||
// then the function value is zero. If the function value is zero,
|
||||
// then we are in a neighborhood where the function value evalutes to zero,
|
||||
// then we are in a neighborhood where the function value evaluates to zero,
|
||||
// so the derivative is zero.
|
||||
if (ax == zero) {
|
||||
return zero;
|
||||
@@ -897,7 +898,7 @@ struct igamma_series_impl {
|
||||
|
||||
// This is independent of mode. If this value is zero,
|
||||
// then the function value is zero. If the function value is zero,
|
||||
// then we are in a neighborhood where the function value evalutes to zero,
|
||||
// then we are in a neighborhood where the function value evaluates to zero,
|
||||
// so the derivative is zero.
|
||||
if (ax == zero) {
|
||||
return zero;
|
||||
@@ -952,10 +953,10 @@ struct igamma_series_impl {
|
||||
|
||||
template <typename Scalar>
|
||||
struct igammac_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static Scalar run(Scalar a, Scalar x) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static Scalar run(Scalar a, Scalar x) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1051,10 +1052,10 @@ struct igammac_impl {
|
||||
|
||||
template <typename Scalar, IgammaComputationMode mode>
|
||||
struct igamma_generic_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1255,10 +1256,10 @@ struct zeta_retval {
|
||||
|
||||
template <typename Scalar>
|
||||
struct zeta_impl_series {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Scalar) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1387,7 +1388,7 @@ struct zeta_impl {
|
||||
};
|
||||
|
||||
const Scalar maxnum = NumTraits<Scalar>::infinity();
|
||||
const Scalar zero = 0.0, half = 0.5, one = 1.0;
|
||||
const Scalar zero = Scalar(0.0), half = Scalar(0.5), one = Scalar(1.0);
|
||||
const Scalar machep = cephes_helper<Scalar>::machep();
|
||||
const Scalar nan = NumTraits<Scalar>::quiet_NaN();
|
||||
|
||||
@@ -1429,11 +1430,19 @@ struct zeta_impl {
|
||||
return s;
|
||||
}
|
||||
|
||||
// If b is zero, then the tail sum will also end up being zero.
|
||||
// Exiting early here can prevent NaNs for some large inputs, where
|
||||
// the tail sum computed below has term `a` which can overflow to `inf`.
|
||||
if (numext::equal_strict(b, zero)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
w = a;
|
||||
s += b*w/(x-one);
|
||||
s -= half * b;
|
||||
a = one;
|
||||
k = zero;
|
||||
|
||||
for( i=0; i<12; i++ )
|
||||
{
|
||||
a *= x + k;
|
||||
@@ -1466,10 +1475,10 @@ struct polygamma_retval {
|
||||
|
||||
template <typename Scalar>
|
||||
struct polygamma_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(Scalar n, Scalar x) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar n, Scalar x) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1515,10 +1524,10 @@ struct betainc_retval {
|
||||
|
||||
template <typename Scalar>
|
||||
struct betainc_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x) {
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1527,8 +1536,10 @@ struct betainc_impl {
|
||||
|
||||
template <typename Scalar>
|
||||
struct betainc_impl {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(Scalar, Scalar, Scalar) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar, Scalar, Scalar) {
|
||||
/* betaincf.c
|
||||
*
|
||||
* Incomplete beta integral
|
||||
@@ -1597,9 +1608,6 @@ struct betainc_impl {
|
||||
* incbet domain x<0, x>1 nan
|
||||
* incbet underflow nan
|
||||
*/
|
||||
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, Scalar>::value == false),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
@@ -1609,11 +1617,11 @@ struct betainc_impl {
|
||||
*/
|
||||
template <typename Scalar>
|
||||
struct incbeta_cfe {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x, bool small_branch) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, float>::value ||
|
||||
internal::is_same<Scalar, double>::value),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED);
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, float>::value ||
|
||||
internal::is_same<Scalar, double>::value),
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x, bool small_branch) {
|
||||
const Scalar big = cephes_helper<Scalar>::big();
|
||||
const Scalar machep = cephes_helper<Scalar>::machep();
|
||||
const Scalar biginv = cephes_helper<Scalar>::biginv();
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPECIALFUNCTIONS_PACKETMATH_H
|
||||
#define EIGEN_SPECIALFUNCTIONS_PACKETMATH_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
// Bessel functions only available for some compilers.
|
||||
#if EIGEN_HAS_AVX512_MATH
|
||||
|
||||
F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_i0)
|
||||
BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_i0)
|
||||
|
||||
@@ -40,6 +43,8 @@ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_y0)
|
||||
F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_y1)
|
||||
BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_y1)
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_SPLINES_MODULE_H
|
||||
#error "Please include unsupported/Eigen/Splines instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
#ifndef EIGEN_SPLINE_H
|
||||
#define EIGEN_SPLINE_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
#include "SplineFwd.h"
|
||||
|
||||
namespace Eigen
|
||||
@@ -25,19 +27,19 @@ namespace Eigen
|
||||
* C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i
|
||||
* \f}
|
||||
*
|
||||
* \tparam _Scalar The underlying data type (typically float or double)
|
||||
* \tparam _Dim The curve dimension (e.g. 2 or 3)
|
||||
* \tparam _Degree Per default set to Dynamic; could be set to the actual desired
|
||||
* \tparam Scalar_ The underlying data type (typically float or double)
|
||||
* \tparam Dim_ The curve dimension (e.g. 2 or 3)
|
||||
* \tparam Degree_ Per default set to Dynamic; could be set to the actual desired
|
||||
* degree for optimization purposes (would result in stack allocation
|
||||
* of several temporary variables).
|
||||
**/
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
class Spline
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
|
||||
enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
|
||||
enum { Degree = _Degree /*!< The spline curve's degree. */ };
|
||||
typedef Scalar_ Scalar; /*!< The spline curve's scalar type. */
|
||||
enum { Dimension = Dim_ /*!< The spline curve's dimension. */ };
|
||||
enum { Degree = Degree_ /*!< The spline curve's degree. */ };
|
||||
|
||||
/** \brief The point type the spline is representing. */
|
||||
typedef typename SplineTraits<Spline>::PointType PointType;
|
||||
@@ -223,18 +225,18 @@ namespace Eigen
|
||||
|
||||
template <typename DerivativeType>
|
||||
static void BasisFunctionDerivativesImpl(
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::Scalar u,
|
||||
const DenseIndex order,
|
||||
const DenseIndex p,
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U,
|
||||
const DenseIndex p,
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::KnotVectorType& U,
|
||||
DerivativeType& N_);
|
||||
};
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
DenseIndex Spline<_Scalar, _Dim, _Degree>::Span(
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u,
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
DenseIndex Spline<Scalar_, Dim_, Degree_>::Span(
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_> >::Scalar u,
|
||||
DenseIndex degree,
|
||||
const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots)
|
||||
const typename SplineTraits< Spline<Scalar_, Dim_, Degree_> >::KnotVectorType& knots)
|
||||
{
|
||||
// Piegl & Tiller, "The NURBS Book", A2.1 (p. 68)
|
||||
if (u <= knots(0)) return degree;
|
||||
@@ -242,12 +244,12 @@ namespace Eigen
|
||||
return static_cast<DenseIndex>( std::distance(knots.data(), pos) - 1 );
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType
|
||||
Spline<_Scalar, _Dim, _Degree>::BasisFunctions(
|
||||
typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename Spline<Scalar_, Dim_, Degree_>::BasisVectorType
|
||||
Spline<Scalar_, Dim_, Degree_>::BasisFunctions(
|
||||
typename Spline<Scalar_, Dim_, Degree_>::Scalar u,
|
||||
DenseIndex degree,
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::KnotVectorType& knots)
|
||||
{
|
||||
const DenseIndex p = degree;
|
||||
const DenseIndex i = Spline::Span(u, degree, knots);
|
||||
@@ -276,23 +278,23 @@ namespace Eigen
|
||||
return N;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
DenseIndex Spline<Scalar_, Dim_, Degree_>::degree() const
|
||||
{
|
||||
if (_Degree == Dynamic)
|
||||
if (Degree_ == Dynamic)
|
||||
return m_knots.size() - m_ctrls.cols() - 1;
|
||||
else
|
||||
return _Degree;
|
||||
return Degree_;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
DenseIndex Spline<Scalar_, Dim_, Degree_>::span(Scalar u) const
|
||||
{
|
||||
return Spline::Span(u, degree(), knots());
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename Spline<Scalar_, Dim_, Degree_>::PointType Spline<Scalar_, Dim_, Degree_>::operator()(Scalar u) const
|
||||
{
|
||||
enum { Order = SplineTraits<Spline>::OrderAtCompileTime };
|
||||
|
||||
@@ -337,28 +339,28 @@ namespace Eigen
|
||||
}
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType
|
||||
Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_> >::DerivativeType
|
||||
Spline<Scalar_, Dim_, Degree_>::derivatives(Scalar u, DenseIndex order) const
|
||||
{
|
||||
typename SplineTraits< Spline >::DerivativeType res;
|
||||
derivativesImpl(*this, u, order, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
template <int DerivativeOrder>
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType
|
||||
Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_>, DerivativeOrder >::DerivativeType
|
||||
Spline<Scalar_, Dim_, Degree_>::derivatives(Scalar u, DenseIndex order) const
|
||||
{
|
||||
typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res;
|
||||
derivativesImpl(*this, u, order, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType
|
||||
Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_> >::BasisVectorType
|
||||
Spline<Scalar_, Dim_, Degree_>::basisFunctions(Scalar u) const
|
||||
{
|
||||
return Spline::BasisFunctions(u, degree(), knots());
|
||||
}
|
||||
@@ -366,16 +368,16 @@ namespace Eigen
|
||||
/* --------------------------------------------------------------------------------------------- */
|
||||
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
template <typename DerivativeType>
|
||||
void Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivativesImpl(
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
|
||||
void Spline<Scalar_, Dim_, Degree_>::BasisFunctionDerivativesImpl(
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::Scalar u,
|
||||
const DenseIndex order,
|
||||
const DenseIndex p,
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U,
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::KnotVectorType& U,
|
||||
DerivativeType& N_)
|
||||
{
|
||||
typedef Spline<_Scalar, _Dim, _Degree> SplineType;
|
||||
typedef Spline<Scalar_, Dim_, Degree_> SplineType;
|
||||
enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
|
||||
|
||||
const DenseIndex span = SplineType::Span(u, p, U);
|
||||
@@ -471,32 +473,32 @@ namespace Eigen
|
||||
}
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
|
||||
Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_> >::BasisDerivativeType
|
||||
Spline<Scalar_, Dim_, Degree_>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
|
||||
{
|
||||
typename SplineTraits<Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType der;
|
||||
typename SplineTraits<Spline<Scalar_, Dim_, Degree_> >::BasisDerivativeType der;
|
||||
BasisFunctionDerivativesImpl(u, order, degree(), knots(), der);
|
||||
return der;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
template <int DerivativeOrder>
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType
|
||||
Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_>, DerivativeOrder >::BasisDerivativeType
|
||||
Spline<Scalar_, Dim_, Degree_>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
|
||||
{
|
||||
typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType der;
|
||||
typename SplineTraits< Spline<Scalar_, Dim_, Degree_>, DerivativeOrder >::BasisDerivativeType der;
|
||||
BasisFunctionDerivativesImpl(u, order, degree(), knots(), der);
|
||||
return der;
|
||||
}
|
||||
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
typename SplineTraits<Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
|
||||
Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivatives(
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
typename SplineTraits<Spline<Scalar_, Dim_, Degree_> >::BasisDerivativeType
|
||||
Spline<Scalar_, Dim_, Degree_>::BasisFunctionDerivatives(
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::Scalar u,
|
||||
const DenseIndex order,
|
||||
const DenseIndex degree,
|
||||
const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
|
||||
const typename Spline<Scalar_, Dim_, Degree_>::KnotVectorType& knots)
|
||||
{
|
||||
typename SplineTraits<Spline>::BasisDerivativeType der;
|
||||
BasisFunctionDerivativesImpl(u, order, degree, knots, der);
|
||||
|
||||
@@ -15,11 +15,14 @@
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
#include "SplineFwd.h"
|
||||
|
||||
#include "../../../../Eigen/LU"
|
||||
#include "../../../../Eigen/QR"
|
||||
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
/**
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#ifndef EIGEN_SPLINES_FWD_H
|
||||
#define EIGEN_SPLINES_FWD_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
#include "../../../../Eigen/Core"
|
||||
|
||||
namespace Eigen
|
||||
@@ -22,14 +23,14 @@ namespace Eigen
|
||||
* \ingroup Splines_Module
|
||||
* \brief Compile-time attributes of the Spline class for Dynamic degree.
|
||||
**/
|
||||
template <typename _Scalar, int _Dim, int _Degree>
|
||||
struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic >
|
||||
template <typename Scalar_, int Dim_, int Degree_>
|
||||
struct SplineTraits< Spline<Scalar_, Dim_, Degree_>, Dynamic >
|
||||
{
|
||||
typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
|
||||
enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
|
||||
enum { Degree = _Degree /*!< The spline curve's degree. */ };
|
||||
typedef Scalar_ Scalar; /*!< The spline curve's scalar type. */
|
||||
enum { Dimension = Dim_ /*!< The spline curve's dimension. */ };
|
||||
enum { Degree = Degree_ /*!< The spline curve's degree. */ };
|
||||
|
||||
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
|
||||
enum { OrderAtCompileTime = Degree_==Dynamic ? Dynamic : Degree_+1 /*!< The spline curve's order at compile-time. */ };
|
||||
enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ };
|
||||
|
||||
enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
|
||||
@@ -62,19 +63,19 @@ namespace Eigen
|
||||
*
|
||||
* The traits class inherits all attributes from the SplineTraits of Dynamic degree.
|
||||
**/
|
||||
template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder >
|
||||
struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> >
|
||||
template < typename Scalar_, int Dim_, int Degree_, int _DerivativeOrder >
|
||||
struct SplineTraits< Spline<Scalar_, Dim_, Degree_>, _DerivativeOrder > : public SplineTraits< Spline<Scalar_, Dim_, Degree_> >
|
||||
{
|
||||
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
|
||||
enum { OrderAtCompileTime = Degree_==Dynamic ? Dynamic : Degree_+1 /*!< The spline curve's order at compile-time. */ };
|
||||
enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ };
|
||||
|
||||
enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
|
||||
enum { DerivativeMemoryLayout = Dim_==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
|
||||
|
||||
/** \brief The data type used to store the values of the basis function derivatives. */
|
||||
typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
|
||||
typedef Array<Scalar_,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
|
||||
|
||||
/** \brief The data type used to store the spline's derivative values. */
|
||||
typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
|
||||
typedef Array<Scalar_,Dim_,Dynamic,DerivativeMemoryLayout,Dim_,NumOfDerivativesAtCompileTime> DerivativeType;
|
||||
};
|
||||
|
||||
/** \brief 2D float B-spline with dynamic degree. */
|
||||
|
||||
Reference in New Issue
Block a user