ADD: added other eigen lib
This commit is contained in:
@@ -10,9 +10,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# some examples and snippets needs c++11, so let's check it once
|
||||
check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
|
||||
|
||||
option(EIGEN_INTERNAL_DOCUMENTATION "Build internal documentation" OFF)
|
||||
option(EIGEN_DOC_USE_MATHJAX "Use MathJax for rendering math in HTML docs" ON)
|
||||
|
||||
|
||||
@@ -1600,8 +1600,6 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
|
||||
EIGEN_QT_SUPPORT \
|
||||
EIGEN_STRONG_INLINE=inline \
|
||||
EIGEN_DEVICE_FUNC= \
|
||||
EIGEN_HAS_CXX11=1 \
|
||||
EIGEN_HAS_CXX11_MATH=1 \
|
||||
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
|
||||
"EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<LHS::Scalar,RHS::Scalar>, const LHS, const RHS>"\
|
||||
"EIGEN_CAT2(a,b)= a ## b"\
|
||||
|
||||
@@ -88,7 +88,7 @@ You may wonder, isn't it overengineering to have the storage in a separate class
|
||||
|
||||
Let's look at this constructor, in src/Core/DenseStorage.h. You can see that there are many partial template specializations of DenseStorages here, treating separately the cases where dimensions are Dynamic or fixed at compile-time. The partial specialization that we are looking at is:
|
||||
\code
|
||||
template<typename T, int _Cols> class DenseStorage<T, Dynamic, Dynamic, _Cols>
|
||||
template<typename T, int Cols_> class DenseStorage<T, Dynamic, Dynamic, Cols_>
|
||||
\endcode
|
||||
|
||||
Here, the constructor called is DenseStorage::DenseStorage(int size, int rows, int columns)
|
||||
@@ -101,7 +101,7 @@ inline DenseStorage(int size, int rows, int) : m_data(internal::aligned_new<T>(s
|
||||
|
||||
Here, the \a m_data member is the actual array of coefficients of the matrix. As you see, it is dynamically allocated. Rather than calling new[] or malloc(), as you can see, we have our own internal::aligned_new defined in src/Core/util/Memory.h. What it does is that if vectorization is enabled, then it uses a platform-specific call to allocate a 128-bit-aligned array, as that is very useful for vectorization with both SSE2 and AltiVec. If vectorization is disabled, it amounts to the standard new[].
|
||||
|
||||
As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no \a m_columns member: indeed, in this partial specialization of DenseStorage, we know the number of columns at compile-time, since the _Cols template parameter is different from Dynamic. Namely, in our case, _Cols is 1, which is to say that our vector is just a matrix with 1 column. Hence, there is no need to store the number of columns as a runtime variable.
|
||||
As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no \a m_columns member: indeed, in this partial specialization of DenseStorage, we know the number of columns at compile-time, since the Cols_ template parameter is different from Dynamic. Namely, in our case, Cols_ is 1, which is to say that our vector is just a matrix with 1 column. Hence, there is no need to store the number of columns as a runtime variable.
|
||||
|
||||
When you call VectorXf::data() to get the pointer to the array of coefficients, it returns DenseStorage::data() which returns the \a m_data member.
|
||||
|
||||
|
||||
@@ -55,29 +55,15 @@ By default, %Eigen strive to automatically detect and enable language features a
|
||||
the information provided by the compiler.
|
||||
|
||||
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
|
||||
Possible values are: 03, 11, 14, 17, etc. If not defined (the default), %Eigen enables all features supported
|
||||
Possible values are: 14, 17, etc. If not defined (the default), %Eigen enables all features supported
|
||||
by the compiler.
|
||||
|
||||
Individual features can be explicitly enabled or disabled by defining the following token to 0 or 1 respectively.
|
||||
For instance, one might limit the C++ version to C++03 by defining EIGEN_MAX_CPP_VER=03, but still enable C99 math
|
||||
For instance, one might limit the C++ version to C++14 by defining EIGEN_MAX_CPP_VER=14, but still enable C99 math
|
||||
functions by defining EIGEN_HAS_C99_MATH=1.
|
||||
|
||||
- \b EIGEN_HAS_C99_MATH - controls the usage of C99 math functions such as erf, erfc, lgamma, etc.
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_RVALUE_REFERENCES - defines whether rvalue references are supported
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_VARIADIC_TEMPLATES - defines whether variadic templates are supported
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_CONSTEXPR - defines whether relaxed const expression are supported
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<14.
|
||||
- \b EIGEN_HAS_CXX11_CONTAINERS - defines whether STL's containers follows C++11 specifications
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_HAS_CXX11_NOEXCEPT - defines whether noexcept is supported
|
||||
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
|
||||
- \b EIGEN_NO_IO - Disables any usage and support for `<iostreams>`.
|
||||
|
||||
\section TopicPreprocessorDirectivesAssertions Assertions
|
||||
@@ -104,7 +90,7 @@ run time. However, these assertions do cost time and can thus be turned off.
|
||||
- \b \c EIGEN_MAX_ALIGN_BYTES - Must be a power of two, or 0. Defines an upper bound on the memory boundary in bytes on which dynamically and statically allocated data may be aligned by %Eigen. If not defined, a default value is automatically computed based on architecture, compiler, and OS.
|
||||
This option is typically used to enforce binary compatibility between code/libraries compiled with different SIMD options. For instance, one may compile AVX code and enforce ABI compatibility with existing SSE code by defining \c EIGEN_MAX_ALIGN_BYTES=16. In the other way round, since by default AVX implies 32 bytes alignment for best performance, one can compile SSE code to be ABI compatible with AVX code by defining \c EIGEN_MAX_ALIGN_BYTES=32.
|
||||
- \b \c EIGEN_MAX_STATIC_ALIGN_BYTES - Same as \c EIGEN_MAX_ALIGN_BYTES but for statically allocated data only. By default, if only \c EIGEN_MAX_ALIGN_BYTES is defined, then \c EIGEN_MAX_STATIC_ALIGN_BYTES == \c EIGEN_MAX_ALIGN_BYTES, otherwise a default value is automatically computed based on architecture, compiler, and OS (can be smaller than the default value of EIGEN_MAX_ALIGN_BYTES on architectures that do not support stack alignment).
|
||||
Let us emphasize that \c EIGEN_MAX_*_ALIGN_BYTES define only a diserable upper bound. In practice data is aligned to largest power-of-two common divisor of \c EIGEN_MAX_STATIC_ALIGN_BYTES and the size of the data, such that memory is not wasted.
|
||||
Let us emphasize that \c EIGEN_MAX_*_ALIGN_BYTES define only a desirable upper bound. In practice data is aligned to largest power-of-two common divisor of \c EIGEN_MAX_STATIC_ALIGN_BYTES and the size of the data, such that memory is not wasted.
|
||||
- \b \c EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP.
|
||||
See \ref TopicMultiThreading for details.
|
||||
- \b \c EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
|
||||
@@ -131,8 +117,11 @@ run time. However, these assertions do cost time and can thus be turned off.
|
||||
- \b \c EIGEN_DEFAULT_L2_CACHE_SIZE - Sets the default L2 cache size that is used in Eigen's GEBP kernel when the correct cache size cannot be determined at runtime.
|
||||
- \b \c EIGEN_DEFAULT_L3_CACHE_SIZE - Sets the default L3 cache size that is used in Eigen's GEBP kernel when the correct cache size cannot be determined at runtime.
|
||||
|
||||
- \c EIGEN_DONT_ALIGN - Deprecated, it is a synonym for \c EIGEN_MAX_ALIGN_BYTES=0. It disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization if \b \c EIGEN_UNALIGNED_VECTORIZE=1. Not defined by default.
|
||||
- \c EIGEN_DONT_ALIGN_STATICALLY - Deprecated, it is a synonym for \c EIGEN_MAX_STATIC_ALIGN_BYTES=0. It disables alignment of arrays on the stack. Not defined by default, unless \c EIGEN_DONT_ALIGN is defined.
|
||||
- \b \c EIGEN_DONT_ALIGN - Deprecated, it is a synonym for \c EIGEN_MAX_ALIGN_BYTES=0. It disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization if \b \c EIGEN_UNALIGNED_VECTORIZE=1. Not defined by default.
|
||||
- \b \c EIGEN_DONT_ALIGN_STATICALLY - Deprecated, it is a synonym for \c EIGEN_MAX_STATIC_ALIGN_BYTES=0. It disables alignment of arrays on the stack. Not defined by default, unless \c EIGEN_DONT_ALIGN is defined.
|
||||
- \b \c EIGEN_ALTIVEC_ENABLE_MMA_DYNAMIC_DISPATCH - Controls whether to use Eigen's dynamic dispatching for Altivec MMA or not.
|
||||
- \b \c EIGEN_ALTIVEC_DISABLE_MMA - Overrides the usage of Altivec MMA instructions.
|
||||
- \b \c EIGEN_ALTIVEC_USE_CUSTOM_PACK - Controls whether to use Eigen's custom packing for Altivec or not.
|
||||
|
||||
|
||||
\section TopicPreprocessorDirectivesPlugins Plugins
|
||||
|
||||
@@ -367,7 +367,8 @@ vec2 = vec1.normalized(); vec1.normalize(); // inplace \endcode
|
||||
<tr class="alt"><td>
|
||||
\link MatrixBase::cross() cross product \endlink \matrixworld</td><td>\code
|
||||
#include <Eigen/Geometry>
|
||||
vec3 = vec1.cross(vec2);\endcode</td></tr>
|
||||
v3c = v3a.cross(v3b); // size-3 vectors
|
||||
scalar = v2a.cross(v2b); // size-2 vectors \endcode</td></tr>
|
||||
</table>
|
||||
|
||||
<a href="#" class="top">top</a>
|
||||
|
||||
@@ -22,11 +22,11 @@ We will explain the program after telling you how to compile it.
|
||||
|
||||
\section GettingStartedCompiling Compiling and running your first program
|
||||
|
||||
There is no library to link to. The only thing that you need to keep in mind when compiling the above program is that the compiler must be able to find the Eigen header files. The directory in which you placed Eigen's source code must be in the include path. With GCC you use the -I option to achieve this, so you can compile the program with a command like this:
|
||||
There is no library to link to. The only thing that you need to keep in mind when compiling the above program is that the compiler must be able to find the Eigen header files. The directory in which you placed Eigen's source code must be in the include path. With GCC you use the \c -I option to achieve this, so you can compile the program with a command like this:
|
||||
|
||||
\code g++ -I /path/to/eigen/ my_program.cpp -o my_program \endcode
|
||||
|
||||
On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into /usr/local/include/. This way, you can compile the program with:
|
||||
On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into \c /usr/local/include/. This way, you can compile the program with:
|
||||
|
||||
\code g++ my_program.cpp -o my_program \endcode
|
||||
|
||||
|
||||
@@ -13,24 +13,20 @@ They are summarized in the following tables:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Class</th><th>Solver kind</th><th>Matrix kind</th><th>Features related to performance</th>
|
||||
<th>License</th><th class="width20em"><p>Notes</p></th></tr>
|
||||
<th class="width20em"><p>Notes</p></th></tr>
|
||||
|
||||
<tr><td>SimplicialLLT \n <tt>\#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
|
||||
<td>LGPL</td>
|
||||
<td>SimplicialLDLT is often preferable</td></tr>
|
||||
|
||||
<tr><td>SimplicialLDLT \n <tt>\#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
|
||||
<td>LGPL</td>
|
||||
<td>Recommended for very sparse and not too large problems (e.g., 2D Poisson eq.)</td></tr>
|
||||
|
||||
<tr><td>SparseLU \n <tt>\#include<Eigen/\link SparseLU_Module SparseLU\endlink></tt></td> <td>LU factorization </td>
|
||||
<td>Square </td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||
<td>MPL2</td>
|
||||
<td>optimized for small and large problems with irregular patterns </td></tr>
|
||||
|
||||
<tr><td>SparseQR \n <tt>\#include<Eigen/\link SparseQR_Module SparseQR\endlink></tt></td> <td> QR factorization</td>
|
||||
<td>Any, rectangular</td><td> Fill-in reducing</td>
|
||||
<td>MPL2</td>
|
||||
<td>recommended for least-square problems, has a basic rank-revealing feature</td></tr>
|
||||
</table>
|
||||
|
||||
@@ -38,21 +34,18 @@ They are summarized in the following tables:
|
||||
|
||||
<table class="manual">
|
||||
<tr><th>Class</th><th>Solver kind</th><th>Matrix kind</th><th>Supported preconditioners, [default]</th>
|
||||
<th>License</th><th class="width20em"><p>Notes</p></th></tr>
|
||||
<th class="width20em"><p>Notes</p></th></tr>
|
||||
|
||||
<tr><td>ConjugateGradient \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td> <td>Classic iterative CG</td><td>SPD</td>
|
||||
<td>IdentityPreconditioner, [DiagonalPreconditioner], IncompleteCholesky</td>
|
||||
<td>MPL2</td>
|
||||
<td>Recommended for large symmetric problems (e.g., 3D Poisson eq.)</td></tr>
|
||||
|
||||
<tr><td>LeastSquaresConjugateGradient \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>CG for rectangular least-square problem</td><td>Rectangular</td>
|
||||
<td>IdentityPreconditioner, [LeastSquareDiagonalPreconditioner]</td>
|
||||
<td>MPL2</td>
|
||||
<td>Solve for min |A'Ax-b|^2 without forming A'A</td></tr>
|
||||
<td>Solve for min |Ax-b|^2 without forming A'A</td></tr>
|
||||
|
||||
<tr><td>BiCGSTAB \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td>
|
||||
<td>IdentityPreconditioner, [DiagonalPreconditioner], IncompleteLUT</td>
|
||||
<td>MPL2</td>
|
||||
<td>To speedup the convergence, try it with the \ref IncompleteLUT preconditioner.</td></tr>
|
||||
</table>
|
||||
|
||||
@@ -82,6 +75,9 @@ They are summarized in the following tables:
|
||||
<tr><td>PardisoLLT \n PardisoLDLT \n PardisoLU</td><td>\link PardisoSupport_Module PardisoSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
|
||||
<td>Requires the <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel MKL</a> package, \b Proprietary </td>
|
||||
<td>optimized for tough problems patterns, see also \link TopicUsingIntelMKL using MKL with Eigen \endlink</td></tr>
|
||||
<tr><td>AccelerateLLT \n AccelerateLDLT \n AccelerateQR</td><td>\link AccelerateSupport_Module AccelerateSupport \endlink</td><td>Direct LLt, LDLt, QR factorizations</td><td>SPD \n SPD \n Rectangular</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
|
||||
<td>Requires the <a href="https://developer.apple.com/documentation/accelerate">Apple Accelerate</a> package, \b Proprietary </td>
|
||||
<td></td></tr>
|
||||
</table>
|
||||
|
||||
Here \c SPD means symmetric positive definite.
|
||||
@@ -137,7 +133,7 @@ x1 = solver.solve(b1);
|
||||
x2 = solver.solve(b2);
|
||||
...
|
||||
\endcode
|
||||
The compute() method is equivalent to calling both analyzePattern() and factorize().
|
||||
The `compute()` method is equivalent to calling both `analyzePattern()` and `factorize()`.
|
||||
|
||||
Each solver provides some specific features, such as determinant, access to the factors, controls of the iterations, and so on.
|
||||
More details are available in the documentations of the respective classes.
|
||||
@@ -145,9 +141,9 @@ More details are available in the documentations of the respective classes.
|
||||
Finally, most of the iterative solvers, can also be used in a \b matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
|
||||
\section TheSparseCompute The Compute Step
|
||||
In the compute() function, the matrix is generally factorized: LLT for self-adjoint matrices, LDLT for general hermitian matrices, LU for non hermitian matrices and QR for rectangular matrices. These are the results of using direct solvers. For this class of solvers precisely, the compute step is further subdivided into analyzePattern() and factorize().
|
||||
In the `compute()` function, the matrix is generally factorized: LLT for self-adjoint matrices, LDLT for general hermitian matrices, LU for non hermitian matrices and QR for rectangular matrices. These are the results of using direct solvers. For this class of solvers precisely, the compute step is further subdivided into `analyzePattern()` and `factorize()`.
|
||||
|
||||
The goal of analyzePattern() is to reorder the nonzero elements of the matrix, such that the factorization step creates less fill-in. This step exploits only the structure of the matrix. Hence, the results of this step can be used for other linear systems where the matrix has the same structure. Note however that sometimes, some external solvers (like SuperLU) require that the values of the matrix are set in this step, for instance to equilibrate the rows and columns of the matrix. In this situation, the results of this step should not be used with other matrices.
|
||||
The goal of `analyzePattern()` is to reorder the nonzero elements of the matrix, such that the factorization step creates less fill-in. This step exploits only the structure of the matrix. Hence, the results of this step can be used for other linear systems where the matrix has the same structure. Note however that sometimes, some external solvers (like SuperLU) require that the values of the matrix are set in this step, for instance to equilibrate the rows and columns of the matrix. In this situation, the results of this step should not be used with other matrices.
|
||||
|
||||
Eigen provides a limited set of methods to reorder the matrix in this step, either built-in (COLAMD, AMD) or external (METIS). These methods are set in template parameter list of the solver :
|
||||
\code
|
||||
@@ -156,21 +152,21 @@ DirectSolverClassName<SparseMatrix<double>, OrderingMethod<IndexType> > solver;
|
||||
|
||||
See the \link OrderingMethods_Module OrderingMethods module \endlink for the list of available methods and the associated options.
|
||||
|
||||
In factorize(), the factors of the coefficient matrix are computed. This step should be called each time the values of the matrix change. However, the structural pattern of the matrix should not change between multiple calls.
|
||||
In `factorize()`, the factors of the coefficient matrix are computed. This step should be called each time the values of the matrix change. However, the structural pattern of the matrix should not change between multiple calls.
|
||||
|
||||
For iterative solvers, the compute step is used to eventually setup a preconditioner. For instance, with the ILUT preconditioner, the incomplete factors L and U are computed in this step. Remember that, basically, the goal of the preconditioner is to speedup the convergence of an iterative method by solving a modified linear system where the coefficient matrix has more clustered eigenvalues. For real problems, an iterative solver should always be used with a preconditioner. In Eigen, a preconditioner is selected by simply adding it as a template parameter to the iterative solver object.
|
||||
\code
|
||||
IterativeSolverClassName<SparseMatrix<double>, PreconditionerName<SparseMatrix<double> > solver;
|
||||
\endcode
|
||||
The member function preconditioner() returns a read-write reference to the preconditioner
|
||||
The member function `preconditioner()` returns a read-write reference to the preconditioner
|
||||
to directly interact with it. See the \link IterativeLinearSolvers_Module Iterative solvers module \endlink and the documentation of each class for the list of available methods.
|
||||
|
||||
\section TheSparseSolve The Solve step
|
||||
The solve() function computes the solution of the linear systems with one or many right hand sides.
|
||||
The `solve()` function computes the solution of the linear systems with one or many right hand sides.
|
||||
\code
|
||||
X = solver.solve(B);
|
||||
\endcode
|
||||
Here, B can be a vector or a matrix where the columns form the different right hand sides. The solve() function can be called several times as well, for instance when all the right hand sides are not available at once.
|
||||
Here, B can be a vector or a matrix where the columns form the different right hand sides. `The solve()` function can be called several times as well, for instance when all the right hand sides are not available at once.
|
||||
\code
|
||||
x1 = solver.solve(b1);
|
||||
// Get the second right hand side b2
|
||||
@@ -180,7 +176,7 @@ x2 = solver.solve(b2);
|
||||
For direct methods, the solution are computed at the machine precision. Sometimes, the solution need not be too accurate. In this case, the iterative methods are more suitable and the desired accuracy can be set before the solve step using \b setTolerance(). For all the available functions, please, refer to the documentation of the \link IterativeLinearSolvers_Module Iterative solvers module \endlink.
|
||||
|
||||
\section BenchmarkRoutine
|
||||
Most of the time, all you need is to know how much time it will take to solve your system, and hopefully, what is the most suitable solver. In Eigen, we provide a benchmark routine that can be used for this purpose. It is very easy to use. In the build directory, navigate to bench/spbench and compile the routine by typing \b make \e spbenchsolver. Run it with --help option to get the list of all available options. Basically, the matrices to test should be in <a href="http://math.nist.gov/MatrixMarket/formats.html">MatrixMarket Coordinate format</a>, and the routine returns the statistics from all available solvers in Eigen.
|
||||
Most of the time, all you need is to know how much time it will take to solve your system, and hopefully, what is the most suitable solver. In Eigen, we provide a benchmark routine that can be used for this purpose. It is very easy to use. In the build directory, navigate to `bench/spbench` and compile the routine by typing `make spbenchsolver`. Run it with `--help` option to get the list of all available options. Basically, the matrices to test should be in <a href="http://math.nist.gov/MatrixMarket/formats.html">MatrixMarket Coordinate format</a>, and the routine returns the statistics from all available solvers in Eigen.
|
||||
|
||||
To export your matrices and right-hand-side vectors in the matrix-market format, you can the the unsupported SparseExtra module:
|
||||
\code
|
||||
|
||||
@@ -249,7 +249,7 @@ sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector
|
||||
\endcode
|
||||
</td>
|
||||
<td>
|
||||
If the matrix is not in compressed form, makeCompressed() should be called before.\n
|
||||
If the matrix is not in compressed form, `makeCompressed()` should be called before.\n
|
||||
Note that these functions are mostly provided for interoperability purposes with external libraries.\n
|
||||
A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
|
||||
</tr>
|
||||
|
||||
@@ -80,7 +80,7 @@ But AVX instructions (at least the ones that %Eigen uses, which are the fast one
|
||||
Otherwise you get a segmentation fault.
|
||||
|
||||
For this reason, %Eigen takes care by itself to require 256-bit alignment for Eigen::Vector4d, by doing two things:
|
||||
\li %Eigen requires 256-bit alignment for the Eigen::Vector4d's array (of 4 doubles). With \cpp11 this is done with the <a href="https://en.cppreference.com/w/cpp/keyword/alignas">alignas</a> keyword, or compiler's extensions for c++98/03.
|
||||
\li %Eigen requires 256-bit alignment for the Eigen::Vector4d's array (of 4 doubles). This is done with the <a href="https://en.cppreference.com/w/cpp/keyword/alignas">alignas</a> keyword.
|
||||
\li %Eigen overloads the `operator new` of Eigen::Vector4d so it will always return 256-bit aligned pointers. (removed in \cpp17)
|
||||
|
||||
Thus, normally, you don't have to worry about anything, %Eigen handles alignment of operator new for you...
|
||||
|
||||
@@ -272,7 +272,7 @@ To get an overview of the true relative speed of the different decompositions, c
|
||||
<dt><b>Blocking</b></dt>
|
||||
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
|
||||
<dt><b>Implicit Multi Threading (MT)</b></dt>
|
||||
<dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
|
||||
<dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algorithm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
|
||||
<dt><b>Explicit Multi Threading (MT)</b></dt>
|
||||
<dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd>
|
||||
<dt><b>Meta-unroller</b></dt>
|
||||
|
||||
@@ -158,7 +158,7 @@ For dot product and cross product, you need the \link MatrixBase::dot() dot()\en
|
||||
\verbinclude tut_arithmetic_dot_cross.out
|
||||
</td></tr></table>
|
||||
|
||||
Remember that cross product is only for vectors of size 3. Dot product is for vectors of any sizes.
|
||||
Cross product is defined in Eigen not only for vectors of size 3 but also for those of size 2, check \link MatrixBase::cross() the doc\endlink for details. Dot product is for vectors of any sizes.
|
||||
When using complex numbers, Eigen's dot product is conjugate-linear in the first variable and linear in the
|
||||
second variable.
|
||||
|
||||
|
||||
@@ -111,9 +111,9 @@ Vector4d c(5.0, 6.0, 7.0, 8.0);
|
||||
|
||||
If C++11 is enabled, fixed-size column or row vectors of arbitrary size can be initialized by passing an arbitrary number of coefficients:
|
||||
\code
|
||||
Vector2i a(1, 2); // A column vector containing the elements {1, 2}
|
||||
Matrix<int, 5, 1> b {1, 2, 3, 4, 5}; // A row-vector containing the elements {1, 2, 3, 4, 5}
|
||||
Matrix<int, 1, 5> c = {1, 2, 3, 4, 5}; // A column vector containing the elements {1, 2, 3, 4, 5}
|
||||
Vector2i a(1, 2); // A column-vector containing the elements {1, 2}
|
||||
Matrix<int, 5, 1> b {1, 2, 3, 4, 5}; // A column-vector containing the elements {1, 2, 3, 4, 5}
|
||||
Matrix<int, 1, 5> c = {1, 2, 3, 4, 5}; // A row-vector containing the elements {1, 2, 3, 4, 5}
|
||||
\endcode
|
||||
|
||||
In the general case of matrices and vectors with either fixed or runtime sizes,
|
||||
@@ -151,14 +151,14 @@ The numbering starts at 0. This example is self-explanatory:
|
||||
\verbinclude tut_matrix_coefficient_accessors.out
|
||||
</td></tr></table>
|
||||
|
||||
Note that the syntax <tt> m(index) </tt>
|
||||
Note that the syntax `m(index)`
|
||||
is not restricted to vectors, it is also available for general matrices, meaning index-based access
|
||||
in the array of coefficients. This however depends on the matrix's storage order. All Eigen matrices default to
|
||||
column-major storage order, but this can be changed to row-major, see \ref TopicStorageOrders "Storage orders".
|
||||
|
||||
The operator[] is also overloaded for index-based access in vectors, but keep in mind that C++ doesn't allow operator[] to
|
||||
take more than one argument. We restrict operator[] to vectors, because an awkwardness in the C++ language
|
||||
would make matrix[i,j] compile to the same thing as matrix[j] !
|
||||
The `operator[]` is also overloaded for index-based access in vectors, but keep in mind that C++ doesn't allow `operator[]` to
|
||||
take more than one argument. We restrict `operator[]` to vectors, because an awkwardness in the C++ language
|
||||
would make `matrix[i,j]` compile to the same thing as `matrix[j]`!
|
||||
|
||||
\section TutorialMatrixCommaInitializer Comma-initialization
|
||||
|
||||
@@ -186,8 +186,8 @@ The current size of a matrix can be retrieved by \link EigenBase::rows() rows()\
|
||||
<td>\verbinclude tut_matrix_resize.out </td>
|
||||
</tr></table>
|
||||
|
||||
The resize() method is a no-operation if the actual matrix size doesn't change; otherwise it is destructive: the values of the coefficients may change.
|
||||
If you want a conservative variant of resize() which does not change the coefficients, use \link PlainObjectBase::conservativeResize() conservativeResize()\endlink, see \ref TopicResizing "this page" for more details.
|
||||
The `resize()` method is a no-operation if the actual matrix size doesn't change; otherwise it is destructive: the values of the coefficients may change.
|
||||
If you want a conservative variant of `resize()` which does not change the coefficients, use \link PlainObjectBase::conservativeResize() conservativeResize()\endlink, see \ref TopicResizing "this page" for more details.
|
||||
|
||||
All these methods are still available on fixed-size matrices, for the sake of API uniformity. Of course, you can't actually
|
||||
resize a fixed-size matrix. Trying to change a fixed size to an actually different value will trigger an assertion failure;
|
||||
@@ -234,7 +234,7 @@ is always allocated on the heap, so doing
|
||||
\code MatrixXf mymatrix(rows,columns); \endcode
|
||||
amounts to doing
|
||||
\code float *mymatrix = new float[rows*columns]; \endcode
|
||||
and in addition to that, the MatrixXf object stores its number of rows and columns as
|
||||
and in addition to that, the \c MatrixXf object stores its number of rows and columns as
|
||||
member variables.
|
||||
|
||||
The limitation of using fixed sizes, of course, is that this is only possible
|
||||
@@ -276,14 +276,16 @@ Matrix<typename Scalar,
|
||||
\section TutorialMatrixTypedefs Convenience typedefs
|
||||
|
||||
Eigen defines the following Matrix typedefs:
|
||||
\li MatrixNt for Matrix<type, N, N>. For example, MatrixXi for Matrix<int, Dynamic, Dynamic>.
|
||||
\li VectorNt for Matrix<type, N, 1>. For example, Vector2f for Matrix<float, 2, 1>.
|
||||
\li RowVectorNt for Matrix<type, 1, N>. For example, RowVector3d for Matrix<double, 1, 3>.
|
||||
\li \c MatrixNt for `Matrix<type, N, N>`. For example, \c MatrixXi for `Matrix<int, Dynamic, Dynamic>`.
|
||||
\li \c MatrixXNt for `Matrix<type, Dynamic, N>`. For example, \c MatrixX3i for `Matrix<int, Dynamic, 3>`.
|
||||
\li \c MatrixNXt for `Matrix<type, N, Dynamic>`. For example, \c Matrix4Xd for `Matrix<d, 4, Dynamic>`.
|
||||
\li \c VectorNt for `Matrix<type, N, 1>`. For example, \c Vector2f for `Matrix<float, 2, 1>`.
|
||||
\li \c RowVectorNt for `Matrix<type, 1, N>`. For example, \c RowVector3d for `Matrix<double, 1, 3>`.
|
||||
|
||||
Where:
|
||||
\li N can be any one of \c 2, \c 3, \c 4, or \c X (meaning \c Dynamic).
|
||||
\li t can be any one of \c i (meaning int), \c f (meaning float), \c d (meaning double),
|
||||
\c cf (meaning complex<float>), or \c cd (meaning complex<double>). The fact that typedefs are only
|
||||
\li \c N can be any one of \c 2, \c 3, \c 4, or \c X (meaning \c Dynamic).
|
||||
\li \c t can be any one of \c i (meaning \c int), \c f (meaning \c float), \c d (meaning \c double),
|
||||
\c cf (meaning `complex<float>`), or \c cd (meaning `complex<double>`). The fact that `typedef`s are only
|
||||
defined for these five types doesn't mean that they are the only supported scalar types. For example,
|
||||
all standard integer types are supported, see \ref TopicScalarTypes "Scalar types".
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ namespace Eigen {
|
||||
/** \eigenManualPage TutorialReshape Reshape
|
||||
|
||||
Since the version 3.4, %Eigen exposes convenient methods to reshape a matrix to another matrix of different sizes or vector.
|
||||
All cases are handled via the DenseBase::reshaped(NRowsType,NColsType) and DenseBase::reshaped() functions.
|
||||
All cases are handled via the `DenseBase::reshaped(NRowsType,NColsType)` and `DenseBase::reshaped()` functions.
|
||||
Those functions do not perform in-place reshaping, but instead return a <i> view </i> on the input expression.
|
||||
|
||||
\eigenAutoToc
|
||||
@@ -23,7 +23,7 @@ Here is an example reshaping a 4x4 matrix to a 2x8 one:
|
||||
</td></tr></table>
|
||||
|
||||
By default, the input coefficients are always interpreted in column-major order regardless of the storage order of the input expression.
|
||||
For more control on ordering, compile-time sizes, and automatic size deduction, please see de documentation of DenseBase::reshaped(NRowsType,NColsType) that contains all the details with many examples.
|
||||
For more control on ordering, compile-time sizes, and automatic size deduction, please see de documentation of `DenseBase::reshaped(NRowsType,NColsType)` that contains all the details with many examples.
|
||||
|
||||
|
||||
\section TutorialReshapeMat2Vec 1D linear views
|
||||
|
||||
@@ -15,7 +15,7 @@ All the aforementioned operations are handled through the generic DenseBase::ope
|
||||
Each argument can be:
|
||||
- An integer indexing a single row or column, including symbolic indices.
|
||||
- The symbol Eigen::all representing the whole set of respective rows or columns in increasing order.
|
||||
- An ArithmeticSequence as constructed by the Eigen::seq, Eigen::seqN, or Eigen::lastN functions.
|
||||
- An ArithmeticSequence as constructed by the Eigen::seq, Eigen::seqN, or Eigen::placeholders::lastN functions.
|
||||
- Any 1D vector/array of integers including %Eigen's vector/array, expressions, std::vector, std::array, as well as plain C arrays: `int[N]`.
|
||||
|
||||
More generally, it can accepts any object exposing the following two member functions:
|
||||
@@ -72,7 +72,7 @@ Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v.
|
||||
</tr>
|
||||
<tr>
|
||||
<td>%Block starting at \c i,j having \c m rows, and \c n columns</td>
|
||||
<td>\code A(seqN(i,m), seqN(i,n) \endcode</td>
|
||||
<td>\code A(seqN(i,m), seqN(i,n)) \endcode</td>
|
||||
<td>\code A.block(i,j,m,n) \endcode</td>
|
||||
</tr>
|
||||
<tr>
|
||||
@@ -112,9 +112,10 @@ Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v.
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
As seen in the last exemple, referencing the <i> last n </i> elements (or rows/columns) is a bit cumbersome to write.
|
||||
As seen in the last example, referencing the <i> last n </i> elements (or rows/columns) is a bit cumbersome to write.
|
||||
This becomes even more tricky and error prone with a non-default increment.
|
||||
Here comes \link Eigen::lastN(SizeType) Eigen::lastN(size) \endlink, and \link Eigen::lastN(SizeType,IncrType) Eigen::lastN(size,incr) \endlink:
|
||||
Here comes \link Eigen::placeholders::lastN(SizeType) Eigen::placeholders::lastN(size) \endlink, and
|
||||
\link Eigen::placeholders::lastN(SizeType,IncrType) Eigen::placeholders::lastN(size,incr) \endlink:
|
||||
|
||||
<table class="manual">
|
||||
<tr>
|
||||
@@ -129,12 +130,12 @@ Here comes \link Eigen::lastN(SizeType) Eigen::lastN(size) \endlink, and \link E
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Bottom-right corner of A of size \c m times \c n</td>
|
||||
<td>\code v(lastN(m), lastN(n)) \endcode</td>
|
||||
<td>\code A(lastN(m), lastN(n)) \endcode</td>
|
||||
<td>\code A.bottomRightCorner(m,n) \endcode</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Bottom-right corner of A of size \c m times \c n</td>
|
||||
<td>\code v(lastN(m), lastN(n)) \endcode</td>
|
||||
<td>\code A(lastN(m), lastN(n)) \endcode</td>
|
||||
<td>\code A.bottomRightCorner(m,n) \endcode</td>
|
||||
</tr>
|
||||
<tr>
|
||||
@@ -221,7 +222,7 @@ i = ind[i];
|
||||
\endcode
|
||||
|
||||
This means you can easily build your own fancy sequence generator and pass it to `operator()`.
|
||||
Here is an exemple enlarging a given matrix while padding the additional first rows and columns through repetition:
|
||||
Here is an example enlarging a given matrix while padding the additional first rows and columns through repetition:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
|
||||
@@ -44,8 +44,8 @@ This storage scheme is better explained on an example. The following matrix
|
||||
|
||||
and one of its possible sparse, \b column \b major representation:
|
||||
<table class="manual">
|
||||
<tr><td>Values:</td> <td>22</td><td>7</td><td>_</td><td>3</td><td>5</td><td>14</td><td>_</td><td>_</td><td>1</td><td>_</td><td>17</td><td>8</td></tr>
|
||||
<tr><td>InnerIndices:</td> <td> 1</td><td>2</td><td>_</td><td>0</td><td>2</td><td> 4</td><td>_</td><td>_</td><td>2</td><td>_</td><td> 1</td><td>4</td></tr>
|
||||
<tr><td>Values:</td> <td>22</td><td>7</td><td>_</td><td>3</td><td>5</td><td>_</td><td>14</td><td>_</td><td>1</td><td>_</td><td>17</td><td>8</td></tr>
|
||||
<tr><td>InnerIndices:</td> <td> 1</td><td>2</td><td>_</td><td>0</td><td>2</td><td>_</td><td>4</td><td>_</td><td>2</td><td>_</td><td> 1</td><td>4</td></tr>
|
||||
</table>
|
||||
<table class="manual">
|
||||
<tr><td>OuterStarts:</td><td>0</td><td>3</td><td>5</td><td>8</td><td>10</td><td>\em 12 </td></tr>
|
||||
@@ -54,13 +54,13 @@ and one of its possible sparse, \b column \b major representation:
|
||||
|
||||
Currently the elements of a given inner vector are guaranteed to be always sorted by increasing inner indices.
|
||||
The \c "_" indicates available free space to quickly insert new elements.
|
||||
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
|
||||
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
|
||||
Assuming no reallocation is needed, the insertion of a random element is therefore in `O(nnz_j)` where `nnz_j` is the number of nonzeros of the respective inner vector.
|
||||
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a `O(1)` operation.
|
||||
|
||||
The case where no empty space is available is a special case, and is referred as the \em compressed mode.
|
||||
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
|
||||
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
|
||||
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we have the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
|
||||
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we have the equality: `InnerNNZs[j] == OuterStarts[j+1] - OuterStarts[j]`.
|
||||
Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer.
|
||||
|
||||
It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs.
|
||||
@@ -221,9 +221,9 @@ A typical scenario of this approach is illustrated below:
|
||||
5: mat.makeCompressed(); // optional
|
||||
\endcode
|
||||
|
||||
- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an operator[](int j) returning the reserve size of the \c j-th inner vector (e.g., via a VectorXi or std::vector<int>). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector.
|
||||
- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an `operator[](int j)` returning the reserve size of the \c j-th inner vector (e.g., via a `VectorXi` or `std::vector<int>`). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector.
|
||||
- The line 4 performs a sorted insertion. In this example, the ideal case is when the \c j-th column is not full and contains non-zeros whose inner-indices are smaller than \c i. In this case, this operation boils down to trivial O(1) operation.
|
||||
- When calling insert(i,j) the element \c i \c ,j must not already exists, otherwise use the coeffRef(i,j) method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls insert(i,j) if the element does not already exist. It is more flexible than insert() but also more costly.
|
||||
- When calling `insert(i,j)` the element `i`, `j` must not already exists, otherwise use the `coeffRef(i,j)` method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls `insert(i,j)` if the element does not already exist. It is more flexible than `insert()` but also more costly.
|
||||
- The line 5 suppresses the remaining empty space and transforms the matrix into a compressed column storage.
|
||||
|
||||
|
||||
@@ -259,7 +259,7 @@ sm2 = sm1.cwiseProduct(dm1);
|
||||
dm2 = sm1 + dm1;
|
||||
dm2 = dm1 - sm1;
|
||||
\endcode
|
||||
Performance-wise, the adding/subtracting sparse and dense matrices is better performed in two steps. For instance, instead of doing <tt>dm2 = sm1 + dm1</tt>, better write:
|
||||
Performance-wise, the adding/subtracting sparse and dense matrices is better performed in two steps. For instance, instead of doing `dm2 = sm1 + dm1`, better write:
|
||||
\code
|
||||
dm2 = dm1;
|
||||
dm2 += sm1;
|
||||
@@ -272,7 +272,7 @@ This version has the advantage to fully exploit the higher performance of dense
|
||||
sm1 = sm2.transpose();
|
||||
sm1 = sm2.adjoint();
|
||||
\endcode
|
||||
However, there is no transposeInPlace() method.
|
||||
However, there is no `transposeInPlace()` method.
|
||||
|
||||
|
||||
\subsection TutorialSparse_Products Matrix products
|
||||
@@ -284,18 +284,18 @@ dv2 = sm1 * dv1;
|
||||
dm2 = dm1 * sm1.adjoint();
|
||||
dm2 = 2. * sm1 * dm1;
|
||||
\endcode
|
||||
- \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with selfadjointView():
|
||||
- \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with `selfadjointView()`:
|
||||
\code
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored
|
||||
dm2 = A.selfadjointView<Upper>() * dm1; // if only the upper part of A is stored
|
||||
dm2 = A.selfadjointView<Lower>() * dm1; // if only the lower part of A is stored
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of sm1 are stored
|
||||
dm2 = sm1.selfadjointView<Upper>() * dm1; // if only the upper part of sm1 is stored
|
||||
dm2 = sm1.selfadjointView<Lower>() * dm1; // if only the lower part of sm1 is stored
|
||||
\endcode
|
||||
- \b sparse-sparse. For sparse-sparse products, two different algorithms are available. The default one is conservative and preserve the explicit zeros that might appear:
|
||||
\code
|
||||
sm3 = sm1 * sm2;
|
||||
sm3 = 4 * sm1.adjoint() * sm2;
|
||||
\endcode
|
||||
The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the prune() functions:
|
||||
The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the `prune()` functions:
|
||||
\code
|
||||
sm3 = (sm1 * sm2).pruned(); // removes numerical zeros
|
||||
sm3 = (sm1 * sm2).pruned(ref); // removes elements much smaller than ref
|
||||
@@ -314,7 +314,7 @@ sm2 = sm1.transpose() * P;
|
||||
\subsection TutorialSparse_SubMatrices Block operations
|
||||
|
||||
Regarding read-access, sparse matrices expose the same API than for dense matrices to access to sub-matrices such as blocks, columns, and rows. See \ref TutorialBlockOperations for a detailed introduction.
|
||||
However, for performance reasons, writing to a sub-sparse-matrix is much more limited, and currently only contiguous sets of columns (resp. rows) of a column-major (resp. row-major) SparseMatrix are writable. Moreover, this information has to be known at compile-time, leaving out methods such as <tt>block(...)</tt> and <tt>corner*(...)</tt>. The available API for write-access to a SparseMatrix are summarized below:
|
||||
However, for performance reasons, writing to a sub-sparse-matrix is much more limited, and currently only contiguous sets of columns (resp. rows) of a column-major (resp. row-major) SparseMatrix are writable. Moreover, this information has to be known at compile-time, leaving out methods such as `block(...)` and `corner*(...)`. The available API for write-access to a SparseMatrix are summarized below:
|
||||
\code
|
||||
SparseMatrix<double,ColMajor> sm1;
|
||||
sm1.col(j) = ...;
|
||||
@@ -329,22 +329,22 @@ sm2.middleRows(i,nrows) = ...;
|
||||
sm2.bottomRows(nrows) = ...;
|
||||
\endcode
|
||||
|
||||
In addition, sparse matrices expose the SparseMatrixBase::innerVector() and SparseMatrixBase::innerVectors() methods, which are aliases to the col/middleCols methods for a column-major storage, and to the row/middleRows methods for a row-major storage.
|
||||
In addition, sparse matrices expose the `SparseMatrixBase::innerVector()` and `SparseMatrixBase::innerVectors()` methods, which are aliases to the `col`/`middleCols` methods for a column-major storage, and to the `row`/`middleRows` methods for a row-major storage.
|
||||
|
||||
\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
|
||||
|
||||
Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
|
||||
Just as with dense matrices, the `triangularView()` function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
|
||||
\code
|
||||
dm2 = sm1.triangularView<Lower>(dm1);
|
||||
dv2 = sm1.transpose().triangularView<Upper>(dv1);
|
||||
\endcode
|
||||
|
||||
The selfadjointView() function permits various operations:
|
||||
The `selfadjointView()` function permits various operations:
|
||||
- optimized sparse-dense matrix products:
|
||||
\code
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored
|
||||
dm2 = A.selfadjointView<Upper>() * dm1; // if only the upper part of A is stored
|
||||
dm2 = A.selfadjointView<Lower>() * dm1; // if only the lower part of A is stored
|
||||
dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of sm1 are stored
|
||||
dm2 = sm1.selfadjointView<Upper>() * dm1; // if only the upper part of sm1 is stored
|
||||
dm2 = sm1.selfadjointView<Lower>() * dm1; // if only the lower part of sm1 is stored
|
||||
\endcode
|
||||
- copy of triangular parts:
|
||||
\code
|
||||
|
||||
@@ -101,11 +101,17 @@ m1.colPivHouseholderQr();
|
||||
?geqp3
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
|
||||
JacobiSVD<MatrixXd> svd;
|
||||
svd.compute(m1, ComputeThinV);
|
||||
JacobiSVD<MatrixXd, ComputeThinV> svd;
|
||||
svd.compute(m1);
|
||||
\endcode</td><td>\code
|
||||
?gesvd
|
||||
\endcode</td></tr>
|
||||
<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
BDCSVD<MatrixXd> svd;
|
||||
svd.compute(m1);
|
||||
\endcode</td><td>\code
|
||||
?gesdd
|
||||
\endcode</td></tr>
|
||||
<tr><td>Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
||||
EigenSolver<MatrixXd> es(m1);
|
||||
ComplexEigenSolver<MatrixXcd> ces(m1);
|
||||
|
||||
@@ -62,23 +62,161 @@ function getNode(o, po)
|
||||
}
|
||||
}
|
||||
|
||||
// Overloaded to adjust the size of the navtree wrt the toc
|
||||
function resizeHeight()
|
||||
{
|
||||
var header = $("#top");
|
||||
var sidenav = $("#side-nav");
|
||||
var content = $("#doc-content");
|
||||
var navtree = $("#nav-tree");
|
||||
var footer = $("#nav-path");
|
||||
var toc = $("#nav-toc");
|
||||
/*
|
||||
@licstart The following is the entire license notice for the JavaScript code in this file.
|
||||
|
||||
var headerHeight = header.outerHeight();
|
||||
var footerHeight = footer.outerHeight();
|
||||
var tocHeight = toc.height();
|
||||
var windowHeight = $(window).height() - headerHeight - footerHeight;
|
||||
content.css({height:windowHeight + "px"});
|
||||
navtree.css({height:(windowHeight-tocHeight) + "px"});
|
||||
sidenav.css({height:windowHeight + "px"});
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (C) 1997-2020 by Dimitri van Heesch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||
and associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or
|
||||
substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
|
||||
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
@licend The above is the entire license notice for the JavaScript code in this file
|
||||
*/
|
||||
// We need to override entire resizable just so we can change the height to account for the TOC.
|
||||
function initResizable()
|
||||
{
|
||||
var cookie_namespace = 'doxygen';
|
||||
var sidenav,navtree,content,header,collapsed,collapsedWidth=0,barWidth=6,desktop_vp=768,titleHeight;
|
||||
|
||||
function readCookie(cookie)
|
||||
{
|
||||
var myCookie = cookie_namespace+"_"+cookie+"=";
|
||||
if (document.cookie) {
|
||||
var index = document.cookie.indexOf(myCookie);
|
||||
if (index != -1) {
|
||||
var valStart = index + myCookie.length;
|
||||
var valEnd = document.cookie.indexOf(";", valStart);
|
||||
if (valEnd == -1) {
|
||||
valEnd = document.cookie.length;
|
||||
}
|
||||
var val = document.cookie.substring(valStart, valEnd);
|
||||
return val;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function writeCookie(cookie, val, expiration)
|
||||
{
|
||||
if (val==undefined) return;
|
||||
if (expiration == null) {
|
||||
var date = new Date();
|
||||
date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week
|
||||
expiration = date.toGMTString();
|
||||
}
|
||||
document.cookie = cookie_namespace + "_" + cookie + "=" + val + "; expires=" + expiration+"; path=/";
|
||||
}
|
||||
|
||||
function resizeWidth()
|
||||
{
|
||||
var windowWidth = $(window).width() + "px";
|
||||
var sidenavWidth = $(sidenav).outerWidth();
|
||||
content.css({marginLeft:parseInt(sidenavWidth)+"px"});
|
||||
writeCookie('width',sidenavWidth-barWidth, null);
|
||||
}
|
||||
|
||||
function restoreWidth(navWidth)
|
||||
{
|
||||
var windowWidth = $(window).width() + "px";
|
||||
content.css({marginLeft:parseInt(navWidth)+barWidth+"px"});
|
||||
sidenav.css({width:navWidth + "px"});
|
||||
}
|
||||
|
||||
function resizeHeight()
|
||||
{
|
||||
var headerHeight = header.outerHeight();
|
||||
var footerHeight = footer.outerHeight();
|
||||
var windowHeight = $(window).height() - headerHeight - footerHeight;
|
||||
//==========================================================================
|
||||
// MODIFICATION:
|
||||
// This small section is the only portion modified within initResizable().
|
||||
// The rest is copy-pasted from the doxygen-generated resize.js.
|
||||
//
|
||||
// Adjust nav height to make room for TOC.
|
||||
var toc = $("#nav-toc");
|
||||
var tocHeight = toc.height();
|
||||
var navHeight = windowHeight;
|
||||
// tocHeight is not always defined (e.g. if empty)
|
||||
if (tocHeight) {
|
||||
navHeight = windowHeight - tocHeight;
|
||||
}
|
||||
//==========================================================================
|
||||
|
||||
content.css({height:windowHeight + "px"});
|
||||
navtree.css({height:navHeight + "px"});
|
||||
sidenav.css({height:windowHeight + "px"});
|
||||
|
||||
var width=$(window).width();
|
||||
if (width!=collapsedWidth) {
|
||||
if (width<desktop_vp && collapsedWidth>=desktop_vp) {
|
||||
if (!collapsed) {
|
||||
collapseExpand();
|
||||
}
|
||||
} else if (width>desktop_vp && collapsedWidth<desktop_vp) {
|
||||
if (collapsed) {
|
||||
collapseExpand();
|
||||
}
|
||||
}
|
||||
collapsedWidth=width;
|
||||
}
|
||||
if (location.hash.slice(1)) {
|
||||
(document.getElementById(location.hash.slice(1))||document.body).scrollIntoView();
|
||||
}
|
||||
}
|
||||
|
||||
function collapseExpand()
|
||||
{
|
||||
if (sidenav.width()>0) {
|
||||
restoreWidth(0);
|
||||
collapsed=true;
|
||||
}
|
||||
else {
|
||||
var width = readCookie('width');
|
||||
if (width>200 && width<$(window).width()) { restoreWidth(width); } else { restoreWidth(200); }
|
||||
collapsed=false;
|
||||
}
|
||||
}
|
||||
header = $("#top");
|
||||
sidenav = $("#side-nav");
|
||||
content = $("#doc-content");
|
||||
navtree = $("#nav-tree");
|
||||
footer = $("#nav-path");
|
||||
|
||||
$(".side-nav-resizable").resizable({resize: function(e, ui) { resizeWidth(); } });
|
||||
$(sidenav).resizable({ minWidth: 0 });
|
||||
$(window).resize(function() { resizeHeight(); });
|
||||
var device = navigator.userAgent.toLowerCase();
|
||||
var touch_device = device.match(/(iphone|ipod|ipad|android)/);
|
||||
if (touch_device) { /* wider split bar for touch only devices */
|
||||
$(sidenav).css({ paddingRight:'20px' });
|
||||
$('.ui-resizable-e').css({ width:'20px' });
|
||||
$('#nav-sync').css({ right:'34px' });
|
||||
barWidth=20;
|
||||
}
|
||||
var width = readCookie('width');
|
||||
if (width) { restoreWidth(width); } else { resizeWidth(); }
|
||||
resizeHeight();
|
||||
var url = location.href;
|
||||
var i=url.indexOf("#");
|
||||
if (i>=0) window.location.hash=url.substr(i);
|
||||
var _preventDefault = function(evt) { evt.preventDefault(); };
|
||||
$("#splitbar").bind("dragstart", _preventDefault).bind("selectstart", _preventDefault);
|
||||
$(".ui-resizable-handle").dblclick(collapseExpand);
|
||||
$(window).on('load',resizeHeight);
|
||||
}
|
||||
|
||||
// Overloaded to save the root node into global_navtree_object
|
||||
@@ -241,7 +379,4 @@ $(document).ready(function() {
|
||||
setTimeout(arguments.callee, 10);
|
||||
}
|
||||
})();
|
||||
|
||||
$(window).on("load", resizeHeight);
|
||||
});
|
||||
|
||||
|
||||
@@ -160,9 +160,7 @@ div.toc {
|
||||
margin:0;
|
||||
padding: 0.3em 0 0 0;
|
||||
width:100%;
|
||||
float:none;
|
||||
position:absolute;
|
||||
bottom:0;
|
||||
float: none;
|
||||
border-radius:0px;
|
||||
border-style: solid none none none;
|
||||
max-height:50%;
|
||||
|
||||
@@ -21,8 +21,6 @@ $mathjax
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div style="background:#FFDDDD;font-size:120%;text-align:center;margin:0;padding:5px">Please, help us to better know about our user community by answering the following short survey: <a href="https://forms.gle/wpyrxWi18ox9Z5ae9">https://forms.gle/wpyrxWi18ox9Z5ae9</a></div>
|
||||
|
||||
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
|
||||
|
||||
<!--BEGIN TITLEAREA-->
|
||||
|
||||
@@ -14,7 +14,3 @@ foreach(example_src ${examples_SRCS})
|
||||
)
|
||||
add_dependencies(all_examples ${example})
|
||||
endforeach()
|
||||
|
||||
if(EIGEN_COMPILER_SUPPORT_CPP11)
|
||||
ei_add_target_property(nullary_indexing COMPILE_FLAGS "-std=c++11")
|
||||
endif()
|
||||
@@ -1,9 +1,8 @@
|
||||
#include <Eigen/Core>
|
||||
#include <unsupported/Eigen/SpecialFunctions>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
Array4d v(-0.5,2,0,-7);
|
||||
Eigen::Array4d v(-0.5,2,0,-7);
|
||||
std::cout << v.erf() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
#include <Eigen/Core>
|
||||
#include <unsupported/Eigen/SpecialFunctions>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
Array4d v(-0.5,2,0,-7);
|
||||
Eigen::Array4d v(-0.5,2,0,-7);
|
||||
std::cout << v.erfc() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
#include <Eigen/Core>
|
||||
#include <unsupported/Eigen/SpecialFunctions>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
Array4d v(0.5,10,0,-1);
|
||||
Eigen::Array4d v(0.5,10,0,-1);
|
||||
std::cout << v.lgamma() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
int main()
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
Eigen::MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(1..3,:) =\n" << A.middleCols(1,3) << endl;
|
||||
std::cout << "A =\n" << A << '\n' << std::endl;
|
||||
std::cout << "A(1..3,:) =\n" << A.middleCols(1,3) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
int main()
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
Eigen::MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(2..3,:) =\n" << A.middleRows(2,2) << endl;
|
||||
std::cout << "A =\n" << A << '\n' << std::endl;
|
||||
std::cout << "A(2..3,:) =\n" << A.middleRows(2,2) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
int main()
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
Eigen::MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << endl;
|
||||
std::cout << "A =\n" << A << '\n' << std::endl;
|
||||
std::cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main(void)
|
||||
int main()
|
||||
{
|
||||
int const N = 5;
|
||||
MatrixXi A(N,N);
|
||||
Eigen::MatrixXi A(N,N);
|
||||
A.setRandom();
|
||||
cout << "A =\n" << A << '\n' << endl;
|
||||
cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << endl;
|
||||
std::cout << "A =\n" << A << '\n' << std::endl;
|
||||
std::cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
using Eigen::MatrixXd;
|
||||
using Eigen::VectorXd;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd m = MatrixXd::Random(3,3);
|
||||
m = (m + MatrixXd::Constant(3,3,1.2)) * 50;
|
||||
cout << "m =" << endl << m << endl;
|
||||
std::cout << "m =" << std::endl << m << std::endl;
|
||||
VectorXd v(3);
|
||||
v << 1, 2, 3;
|
||||
cout << "m * v =" << endl << m * v << endl;
|
||||
std::cout << "m * v =" << std::endl << m * v << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
using Eigen::Matrix3d;
|
||||
using Eigen::Vector3d;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3d m = Matrix3d::Random();
|
||||
m = (m + Matrix3d::Constant(1.2)) * 50;
|
||||
cout << "m =" << endl << m << endl;
|
||||
std::cout << "m =" << std::endl << m << std::endl;
|
||||
Vector3d v(1,2,3);
|
||||
|
||||
cout << "m * v =" << endl << m * v << endl;
|
||||
std::cout << "m * v =" << std::endl << m * v << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
template <typename Derived1, typename Derived2>
|
||||
void copyUpperTriangularPart(MatrixBase<Derived1>& dst, const MatrixBase<Derived2>& src)
|
||||
void copyUpperTriangularPart(Eigen::MatrixBase<Derived1>& dst, const Eigen::MatrixBase<Derived2>& src)
|
||||
{
|
||||
/* Note the 'template' keywords in the following line! */
|
||||
dst.template triangularView<Upper>() = src.template triangularView<Upper>();
|
||||
dst.template triangularView<Eigen::Upper>() = src.template triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXi m1 = MatrixXi::Ones(5,5);
|
||||
MatrixXi m2 = MatrixXi::Random(4,4);
|
||||
Eigen::MatrixXi m1 = Eigen::MatrixXi::Ones(5,5);
|
||||
Eigen::MatrixXi m2 = Eigen::MatrixXi::Random(4,4);
|
||||
std::cout << "m2 before copy:" << std::endl;
|
||||
std::cout << m2 << std::endl << std::endl;
|
||||
copyUpperTriangularPart(m2, m1.topLeftCorner(4,4));
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using Eigen::MatrixXf;
|
||||
|
||||
void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src)
|
||||
{
|
||||
dst.triangularView<Upper>() = src.triangularView<Upper>();
|
||||
dst.triangularView<Eigen::Upper>() = src.triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
int main()
|
||||
|
||||
@@ -1,61 +1,57 @@
|
||||
#include <iostream>
|
||||
struct init {
|
||||
init() { std::cout << "[" << "init" << "]" << std::endl; }
|
||||
init() { std::cout << "[init]\n"; }
|
||||
};
|
||||
init init_obj;
|
||||
// [init]
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd A(2,2);
|
||||
Eigen::MatrixXd A(2,2);
|
||||
A << 2, -1, 1, 3;
|
||||
cout << "Here is the input matrix A before decomposition:\n" << A << endl;
|
||||
cout << "[init]" << endl;
|
||||
std::cout << "Here is the input matrix A before decomposition:\n" << A << "\n";
|
||||
std::cout << "[init]\n";
|
||||
|
||||
cout << "[declaration]" << endl;
|
||||
PartialPivLU<Ref<MatrixXd> > lu(A);
|
||||
cout << "Here is the input matrix A after decomposition:\n" << A << endl;
|
||||
cout << "[declaration]" << endl;
|
||||
std::cout << "[declaration]\n";
|
||||
Eigen::PartialPivLU<Eigen::Ref<Eigen::MatrixXd> > lu(A);
|
||||
std::cout << "Here is the input matrix A after decomposition:\n" << A << "\n";
|
||||
std::cout << "[declaration]\n";
|
||||
|
||||
cout << "[matrixLU]" << endl;
|
||||
cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << endl;
|
||||
cout << "[matrixLU]" << endl;
|
||||
std::cout << "[matrixLU]\n";
|
||||
std::cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << "\n";
|
||||
std::cout << "[matrixLU]\n";
|
||||
|
||||
cout << "[solve]" << endl;
|
||||
MatrixXd A0(2,2); A0 << 2, -1, 1, 3;
|
||||
VectorXd b(2); b << 1, 2;
|
||||
VectorXd x = lu.solve(b);
|
||||
cout << "Residual: " << (A0 * x - b).norm() << endl;
|
||||
cout << "[solve]" << endl;
|
||||
std::cout << "[solve]\n";
|
||||
Eigen::MatrixXd A0(2,2); A0 << 2, -1, 1, 3;
|
||||
Eigen::VectorXd b(2); b << 1, 2;
|
||||
Eigen::VectorXd x = lu.solve(b);
|
||||
std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
|
||||
std::cout << "[solve]\n";
|
||||
|
||||
cout << "[modifyA]" << endl;
|
||||
std::cout << "[modifyA]\n";
|
||||
A << 3, 4, -2, 1;
|
||||
x = lu.solve(b);
|
||||
cout << "Residual: " << (A0 * x - b).norm() << endl;
|
||||
cout << "[modifyA]" << endl;
|
||||
std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
|
||||
std::cout << "[modifyA]\n";
|
||||
|
||||
cout << "[recompute]" << endl;
|
||||
std::cout << "[recompute]\n";
|
||||
A0 = A; // save A
|
||||
lu.compute(A);
|
||||
x = lu.solve(b);
|
||||
cout << "Residual: " << (A0 * x - b).norm() << endl;
|
||||
cout << "[recompute]" << endl;
|
||||
std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
|
||||
std::cout << "[recompute]\n";
|
||||
|
||||
cout << "[recompute_bis0]" << endl;
|
||||
MatrixXd A1(2,2);
|
||||
std::cout << "[recompute_bis0]\n";
|
||||
Eigen::MatrixXd A1(2,2);
|
||||
A1 << 5,-2,3,4;
|
||||
lu.compute(A1);
|
||||
cout << "Here is the input matrix A1 after decomposition:\n" << A1 << endl;
|
||||
cout << "[recompute_bis0]" << endl;
|
||||
std::cout << "Here is the input matrix A1 after decomposition:\n" << A1 << "\n";
|
||||
std::cout << "[recompute_bis0]\n";
|
||||
|
||||
cout << "[recompute_bis1]" << endl;
|
||||
std::cout << "[recompute_bis1]\n";
|
||||
x = lu.solve(b);
|
||||
cout << "Residual: " << (A1 * x - b).norm() << endl;
|
||||
cout << "[recompute_bis1]" << endl;
|
||||
std::cout << "Residual: " << (A1 * x - b).norm() << "\n";
|
||||
std::cout << "[recompute_bis1]\n";
|
||||
|
||||
}
|
||||
|
||||
@@ -1,23 +1,20 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A, b;
|
||||
LLT<Matrix2f> llt;
|
||||
Eigen::Matrix2f A, b;
|
||||
Eigen::LLT<Eigen::Matrix2f> llt;
|
||||
A << 2, -1, -1, 3;
|
||||
b << 1, 2, 3, 1;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
cout << "Computing LLT decomposition..." << endl;
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
std::cout << "Here is the right hand side b:\n" << b << std::endl;
|
||||
std::cout << "Computing LLT decomposition..." << std::endl;
|
||||
llt.compute(A);
|
||||
cout << "The solution is:\n" << llt.solve(b) << endl;
|
||||
std::cout << "The solution is:\n" << llt.solve(b) << std::endl;
|
||||
A(1,1)++;
|
||||
cout << "The matrix A is now:\n" << A << endl;
|
||||
cout << "Computing LLT decomposition..." << endl;
|
||||
std::cout << "The matrix A is now:\n" << A << std::endl;
|
||||
std::cout << "Computing LLT decomposition..." << std::endl;
|
||||
llt.compute(A);
|
||||
cout << "The solution is now:\n" << llt.solve(b) << endl;
|
||||
std::cout << "The solution is now:\n" << llt.solve(b) << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
using Eigen::MatrixXd;
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -10,5 +9,5 @@ int main()
|
||||
MatrixXd b = MatrixXd::Random(100,50);
|
||||
MatrixXd x = A.fullPivLu().solve(b);
|
||||
double relative_error = (A*x - b).norm() / b.norm(); // norm() is L2 norm
|
||||
cout << "The relative error is:\n" << relative_error << endl;
|
||||
std::cout << "The relative error is:\n" << relative_error << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Vector3f b;
|
||||
Eigen::Matrix3f A;
|
||||
Eigen::Vector3f b;
|
||||
A << 1,2,3, 4,5,6, 7,8,10;
|
||||
b << 3, 3, 4;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the vector b:\n" << b << endl;
|
||||
Vector3f x = A.colPivHouseholderQr().solve(b);
|
||||
cout << "The solution is:\n" << x << endl;
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
std::cout << "Here is the vector b:\n" << b << std::endl;
|
||||
Eigen::Vector3f x = A.colPivHouseholderQr().solve(b);
|
||||
std::cout << "The solution is:\n" << x << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A, b;
|
||||
Eigen::Matrix2f A, b;
|
||||
A << 2, -1, -1, 3;
|
||||
b << 1, 2, 3, 1;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
Matrix2f x = A.ldlt().solve(b);
|
||||
cout << "The solution is:\n" << x << endl;
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
std::cout << "Here is the right hand side b:\n" << b << std::endl;
|
||||
Eigen::Matrix2f x = A.ldlt().solve(b);
|
||||
std::cout << "The solution is:\n" << x << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Eigen::Matrix3f A;
|
||||
A << 1, 2, 1,
|
||||
2, 1, 0,
|
||||
-1, 1, 2;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
cout << "The determinant of A is " << A.determinant() << endl;
|
||||
cout << "The inverse of A is:\n" << A.inverse() << endl;
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
std::cout << "The determinant of A is " << A.determinant() << std::endl;
|
||||
std::cout << "The inverse of A is:\n" << A.inverse() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,20 +1,17 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Eigen::Matrix3f A;
|
||||
A << 1, 2, 5,
|
||||
2, 1, 4,
|
||||
3, 0, 3;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
FullPivLU<Matrix3f> lu_decomp(A);
|
||||
cout << "The rank of A is " << lu_decomp.rank() << endl;
|
||||
cout << "Here is a matrix whose columns form a basis of the null-space of A:\n"
|
||||
<< lu_decomp.kernel() << endl;
|
||||
cout << "Here is a matrix whose columns form a basis of the column-space of A:\n"
|
||||
<< lu_decomp.image(A) << endl; // yes, have to pass the original A
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
Eigen::FullPivLU<Eigen::Matrix3f> lu_decomp(A);
|
||||
std::cout << "The rank of A is " << lu_decomp.rank() << std::endl;
|
||||
std::cout << "Here is a matrix whose columns form a basis of the null-space of A:\n"
|
||||
<< lu_decomp.kernel() << std::endl;
|
||||
std::cout << "Here is a matrix whose columns form a basis of the column-space of A:\n"
|
||||
<< lu_decomp.image(A) << std::endl; // yes, have to pass the original A
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf A = MatrixXf::Random(3, 2);
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
VectorXf b = VectorXf::Random(3);
|
||||
cout << "Here is the right hand side b:\n" << b << endl;
|
||||
cout << "The least-squares solution is:\n"
|
||||
<< A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
|
||||
Eigen::MatrixXf A = Eigen::MatrixXf::Random(3, 2);
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
Eigen::VectorXf b = Eigen::VectorXf::Random(3);
|
||||
std::cout << "Here is the right hand side b:\n" << b << std::endl;
|
||||
std::cout << "The least-squares solution is:\n"
|
||||
<< A.template bdcSvd<Eigen::ComputeThinU | Eigen::ComputeThinV>().solve(b) << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2f A;
|
||||
Eigen::Matrix2f A;
|
||||
A << 1, 2, 2, 3;
|
||||
cout << "Here is the matrix A:\n" << A << endl;
|
||||
SelfAdjointEigenSolver<Matrix2f> eigensolver(A);
|
||||
if (eigensolver.info() != Success) abort();
|
||||
cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << endl;
|
||||
cout << "Here's a matrix whose columns are eigenvectors of A \n"
|
||||
std::cout << "Here is the matrix A:\n" << A << std::endl;
|
||||
Eigen::SelfAdjointEigenSolver<Eigen::Matrix2f> eigensolver(A);
|
||||
if (eigensolver.info() != Eigen::Success) abort();
|
||||
std::cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << std::endl;
|
||||
std::cout << "Here's a matrix whose columns are eigenvectors of A \n"
|
||||
<< "corresponding to these eigenvalues:\n"
|
||||
<< eigensolver.eigenvectors() << endl;
|
||||
<< eigensolver.eigenvectors() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2d A;
|
||||
Eigen::Matrix2d A;
|
||||
A << 2, 1,
|
||||
2, 0.9999999999;
|
||||
FullPivLU<Matrix2d> lu(A);
|
||||
cout << "By default, the rank of A is found to be " << lu.rank() << endl;
|
||||
Eigen::FullPivLU<Eigen::Matrix2d> lu(A);
|
||||
std::cout << "By default, the rank of A is found to be " << lu.rank() << std::endl;
|
||||
lu.setThreshold(1e-5);
|
||||
cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << endl;
|
||||
std::cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,24 +1,21 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf m(2,2);
|
||||
Eigen::ArrayXXf m(2,2);
|
||||
|
||||
// assign some values coefficient by coefficient
|
||||
m(0,0) = 1.0; m(0,1) = 2.0;
|
||||
m(1,0) = 3.0; m(1,1) = m(0,1) + m(1,0);
|
||||
|
||||
// print values to standard output
|
||||
cout << m << endl << endl;
|
||||
std::cout << m << std::endl << std::endl;
|
||||
|
||||
// using the comma-initializer is also allowed
|
||||
m << 1.0,2.0,
|
||||
3.0,4.0;
|
||||
|
||||
// print values to standard output
|
||||
cout << m << endl;
|
||||
std::cout << m << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(3,3);
|
||||
ArrayXXf b(3,3);
|
||||
Eigen::ArrayXXf a(3,3);
|
||||
Eigen::ArrayXXf b(3,3);
|
||||
a << 1,2,3,
|
||||
4,5,6,
|
||||
7,8,9;
|
||||
@@ -16,8 +13,8 @@ int main()
|
||||
1,2,3;
|
||||
|
||||
// Adding two arrays
|
||||
cout << "a + b = " << endl << a + b << endl << endl;
|
||||
std::cout << "a + b = " << std::endl << a + b << std::endl << std::endl;
|
||||
|
||||
// Subtracting a scalar from an array
|
||||
cout << "a - 2 = " << endl << a - 2 << endl;
|
||||
std::cout << "a - 2 = " << std::endl << a - 2 << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXf a = ArrayXf::Random(5);
|
||||
Eigen::ArrayXf a = Eigen::ArrayXf::Random(5);
|
||||
a *= 2;
|
||||
cout << "a =" << endl
|
||||
<< a << endl;
|
||||
cout << "a.abs() =" << endl
|
||||
<< a.abs() << endl;
|
||||
cout << "a.abs().sqrt() =" << endl
|
||||
<< a.abs().sqrt() << endl;
|
||||
cout << "a.min(a.abs().sqrt()) =" << endl
|
||||
<< a.min(a.abs().sqrt()) << endl;
|
||||
std::cout << "a =" << std::endl
|
||||
<< a << std::endl;
|
||||
std::cout << "a.abs() =" << std::endl
|
||||
<< a.abs() << std::endl;
|
||||
std::cout << "a.abs().sqrt() =" << std::endl
|
||||
<< a.abs().sqrt() << std::endl;
|
||||
std::cout << "a.min(a.abs().sqrt()) =" << std::endl
|
||||
<< a.min(a.abs().sqrt()) << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
using Eigen::MatrixXf;
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -16,7 +15,7 @@ int main()
|
||||
7,8;
|
||||
|
||||
result = (m.array() + 4).matrix() * m;
|
||||
cout << "-- Combination 1: --" << endl << result << endl << endl;
|
||||
std::cout << "-- Combination 1: --\n" << result << "\n\n";
|
||||
result = (m.array() * n.array()).matrix() * m;
|
||||
cout << "-- Combination 2: --" << endl << result << endl << endl;
|
||||
std::cout << "-- Combination 2: --\n" << result << "\n\n";
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
using Eigen::MatrixXf;
|
||||
|
||||
int main()
|
||||
{
|
||||
@@ -16,11 +15,11 @@ int main()
|
||||
7,8;
|
||||
|
||||
result = m * n;
|
||||
cout << "-- Matrix m*n: --" << endl << result << endl << endl;
|
||||
std::cout << "-- Matrix m*n: --\n" << result << "\n\n";
|
||||
result = m.array() * n.array();
|
||||
cout << "-- Array m*n: --" << endl << result << endl << endl;
|
||||
std::cout << "-- Array m*n: --\n" << result << "\n\n";
|
||||
result = m.cwiseProduct(n);
|
||||
cout << "-- With cwiseProduct: --" << endl << result << endl << endl;
|
||||
std::cout << "-- With cwiseProduct: --\n" << result << "\n\n";
|
||||
result = m.array() + 4;
|
||||
cout << "-- Array m + 4: --" << endl << result << endl << endl;
|
||||
std::cout << "-- Array m + 4: --\n" << result << "\n\n";
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(2,2);
|
||||
ArrayXXf b(2,2);
|
||||
Eigen::ArrayXXf a(2,2);
|
||||
Eigen::ArrayXXf b(2,2);
|
||||
a << 1,2,
|
||||
3,4;
|
||||
b << 5,6,
|
||||
7,8;
|
||||
cout << "a * b = " << endl << a * b << endl;
|
||||
std::cout << "a * b = " << std::endl << a * b << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Array22f m;
|
||||
Eigen::Array22f m;
|
||||
m << 1,2,
|
||||
3,4;
|
||||
Array44f a = Array44f::Constant(0.6);
|
||||
cout << "Here is the array a:" << endl << a << endl << endl;
|
||||
Eigen::Array44f a = Eigen::Array44f::Constant(0.6);
|
||||
std::cout << "Here is the array a:\n" << a << "\n\n";
|
||||
a.block<2,2>(1,1) = m;
|
||||
cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl;
|
||||
std::cout << "Here is now a with m copied into its central 2x2 block:\n" << a << "\n\n";
|
||||
a.block(0,0,2,3) = a.block(2,1,2,3);
|
||||
cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:" << endl << a << endl << endl;
|
||||
std::cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:\n" << a << "\n\n";
|
||||
}
|
||||
|
||||
@@ -2,17 +2,14 @@
|
||||
#include <Eigen/LU>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f A;
|
||||
Vector3f b;
|
||||
Eigen::Matrix3f A;
|
||||
Eigen::Vector3f b;
|
||||
A << 1,2,3, 4,5,6, 7,8,10;
|
||||
b << 3, 3, 4;
|
||||
cout << "Here is the matrix A:" << endl << A << endl;
|
||||
cout << "Here is the vector b:" << endl << b << endl;
|
||||
Vector3f x = A.lu().solve(b);
|
||||
cout << "The solution is:" << endl << x << endl;
|
||||
std::cout << "Here is the matrix A:" << std::endl << A << std::endl;
|
||||
std::cout << "Here is the vector b:" << std::endl << b << std::endl;
|
||||
Eigen::Vector3f x = A.lu().solve(b);
|
||||
std::cout << "The solution is:" << std::endl << x << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(2,4);
|
||||
@@ -15,10 +12,10 @@ int main()
|
||||
v << 2,
|
||||
3;
|
||||
|
||||
MatrixXf::Index index;
|
||||
Eigen::Index index;
|
||||
// find nearest neighbour
|
||||
(m.colwise() - v).colwise().squaredNorm().minCoeff(&index);
|
||||
|
||||
cout << "Nearest neighbour is column " << index << ":" << endl;
|
||||
cout << m.col(index) << endl;
|
||||
std::cout << "Nearest neighbour is column " << index << ":" << std::endl;
|
||||
std::cout << m.col(index) << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
MatrixXf mat(2,4);
|
||||
Eigen::MatrixXf mat(2,4);
|
||||
mat << 1, 2, 6, 9,
|
||||
3, 1, 7, 2;
|
||||
|
||||
MatrixXf::Index maxIndex;
|
||||
Eigen::Index maxIndex;
|
||||
float maxNorm = mat.colwise().sum().maxCoeff(&maxIndex);
|
||||
|
||||
std::cout << "Maximum sum at position " << maxIndex << std::endl;
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
ArrayXXf a(2,2);
|
||||
Eigen::ArrayXXf a(2,2);
|
||||
|
||||
a << 1,2,
|
||||
3,4;
|
||||
|
||||
cout << "(a > 0).all() = " << (a > 0).all() << endl;
|
||||
cout << "(a > 0).any() = " << (a > 0).any() << endl;
|
||||
cout << "(a > 0).count() = " << (a > 0).count() << endl;
|
||||
cout << endl;
|
||||
cout << "(a > 2).all() = " << (a > 2).all() << endl;
|
||||
cout << "(a > 2).any() = " << (a > 2).any() << endl;
|
||||
cout << "(a > 2).count() = " << (a > 2).count() << endl;
|
||||
std::cout << "(a > 0).all() = " << (a > 0).all() << std::endl;
|
||||
std::cout << "(a > 0).any() = " << (a > 0).any() << std::endl;
|
||||
std::cout << "(a > 0).count() = " << (a > 0).count() << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << "(a > 2).all() = " << (a > 2).all() << std::endl;
|
||||
std::cout << "(a > 2).any() = " << (a > 2).any() << std::endl;
|
||||
std::cout << "(a > 2).count() = " << (a > 2).count() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
VectorXf v(2);
|
||||
MatrixXf m(2,2), n(2,2);
|
||||
Eigen::VectorXf v(2);
|
||||
Eigen::MatrixXf m(2,2), n(2,2);
|
||||
|
||||
v << -1,
|
||||
2;
|
||||
@@ -15,14 +12,14 @@ int main()
|
||||
m << 1,-2,
|
||||
-3,4;
|
||||
|
||||
cout << "v.squaredNorm() = " << v.squaredNorm() << endl;
|
||||
cout << "v.norm() = " << v.norm() << endl;
|
||||
cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << endl;
|
||||
cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Infinity>() << endl;
|
||||
std::cout << "v.squaredNorm() = " << v.squaredNorm() << std::endl;
|
||||
std::cout << "v.norm() = " << v.norm() << std::endl;
|
||||
std::cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << std::endl;
|
||||
std::cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Eigen::Infinity>() << std::endl;
|
||||
|
||||
cout << endl;
|
||||
cout << "m.squaredNorm() = " << m.squaredNorm() << endl;
|
||||
cout << "m.norm() = " << m.norm() << endl;
|
||||
cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << endl;
|
||||
cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Infinity>() << endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << "m.squaredNorm() = " << m.squaredNorm() << std::endl;
|
||||
std::cout << "m.norm() = " << m.norm() << std::endl;
|
||||
std::cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << std::endl;
|
||||
std::cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Eigen::Infinity>() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
#include <Eigen/Dense>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf m(2,2);
|
||||
Eigen::MatrixXf m(2,2);
|
||||
m << 1,-2,
|
||||
-3,4;
|
||||
|
||||
cout << "1-norm(m) = " << m.cwiseAbs().colwise().sum().maxCoeff()
|
||||
<< " == " << m.colwise().lpNorm<1>().maxCoeff() << endl;
|
||||
std::cout << "1-norm(m) = " << m.cwiseAbs().colwise().sum().maxCoeff()
|
||||
<< " == " << m.colwise().lpNorm<1>().maxCoeff() << std::endl;
|
||||
|
||||
cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff()
|
||||
<< " == " << m.rowwise().lpNorm<1>().maxCoeff() << endl;
|
||||
std::cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff()
|
||||
<< " == " << m.rowwise().lpNorm<1>().maxCoeff() << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Eigen::MatrixXf m(2,2);
|
||||
@@ -12,15 +9,15 @@ int main()
|
||||
3, 4;
|
||||
|
||||
//get location of maximum
|
||||
MatrixXf::Index maxRow, maxCol;
|
||||
Eigen::Index maxRow, maxCol;
|
||||
float max = m.maxCoeff(&maxRow, &maxCol);
|
||||
|
||||
//get location of minimum
|
||||
MatrixXf::Index minRow, minCol;
|
||||
Eigen::Index minRow, minCol;
|
||||
float min = m.minCoeff(&minRow, &minCol);
|
||||
|
||||
cout << "Max: " << max << ", at: " <<
|
||||
maxRow << "," << maxCol << endl;
|
||||
cout << "Min: " << min << ", at: " <<
|
||||
minRow << "," << minCol << endl;
|
||||
std::cout << "Max: " << max << ", at: " <<
|
||||
maxRow << "," << maxCol << std::endl;
|
||||
std:: cout << "Min: " << min << ", at: " <<
|
||||
minRow << "," << minCol << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
for (int size=1; size<=4; ++size)
|
||||
{
|
||||
MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's
|
||||
Eigen::MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's
|
||||
for (int j=0; j<m.cols(); ++j) // loop over columns
|
||||
for (int i=0; i<m.rows(); ++i) // loop over rows
|
||||
m(i,j) = i+j*size; // to access matrix coefficients,
|
||||
@@ -15,7 +13,7 @@ int main()
|
||||
std::cout << m << "\n\n";
|
||||
}
|
||||
|
||||
VectorXf v(4); // a vector of 4 float's
|
||||
Eigen::VectorXf v(4); // a vector of 4 float's
|
||||
// to access vector coefficients, use either operator () or operator []
|
||||
v[0] = 1; v[1] = 2; v(2) = 3; v(3) = 4;
|
||||
std::cout << "\nv:\n" << v << std::endl;
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix3f m3;
|
||||
Eigen::Matrix3f m3;
|
||||
m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
||||
Matrix4f m4 = Matrix4f::Identity();
|
||||
Vector4i v4(1, 2, 3, 4);
|
||||
Eigen::Matrix4f m4 = Eigen::Matrix4f::Identity();
|
||||
Eigen::Vector4i v4(1, 2, 3, 4);
|
||||
|
||||
std::cout << "m3\n" << m3 << "\nm4:\n"
|
||||
<< m4 << "\nv4:\n" << v4 << std::endl;
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::Block<Derived>
|
||||
topLeftCorner(MatrixBase<Derived>& m, int rows, int cols)
|
||||
topLeftCorner(Eigen::MatrixBase<Derived>& m, int rows, int cols)
|
||||
{
|
||||
return Eigen::Block<Derived>(m.derived(), 0, 0, rows, cols);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
const Eigen::Block<const Derived>
|
||||
topLeftCorner(const MatrixBase<Derived>& m, int rows, int cols)
|
||||
topLeftCorner(const Eigen::MatrixBase<Derived>& m, int rows, int cols)
|
||||
{
|
||||
return Eigen::Block<const Derived>(m.derived(), 0, 0, rows, cols);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m = Matrix4d::Identity();
|
||||
cout << topLeftCorner(4*m, 2, 3) << endl; // calls the const version
|
||||
Eigen::Matrix4d m = Eigen::Matrix4d::Identity();
|
||||
std::cout << topLeftCorner(4*m, 2, 3) << std::endl; // calls the const version
|
||||
topLeftCorner(m, 2, 3) *= 5; // calls the non-const version
|
||||
cout << "Now the matrix m is:" << endl << m << endl;
|
||||
std::cout << "Now the matrix m is:" << std::endl << m << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
using Eigen::Matrix4d;
|
||||
|
||||
// define a custom template binary functor
|
||||
template<typename Scalar> struct MakeComplexOp {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(MakeComplexOp)
|
||||
typedef complex<Scalar> result_type;
|
||||
complex<Scalar> operator()(const Scalar& a, const Scalar& b) const { return complex<Scalar>(a,b); }
|
||||
typedef std::complex<Scalar> result_type;
|
||||
result_type operator()(const Scalar& a, const Scalar& b) const { return result_type(a,b); }
|
||||
};
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m1 = Matrix4d::Random(), m2 = Matrix4d::Random();
|
||||
cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << endl;
|
||||
std::cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
// define a custom template unary functor
|
||||
template<typename Scalar>
|
||||
@@ -13,7 +11,7 @@ struct CwiseClampOp {
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m1 = Matrix4d::Random();
|
||||
cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(CwiseClampOp<double>(-0.5,0.5)) << endl;
|
||||
Eigen::Matrix4d m1 = Eigen::Matrix4d::Random();
|
||||
std::cout << m1 << std::endl << "becomes: " << std::endl << m1.unaryExpr(CwiseClampOp<double>(-0.5,0.5)) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
// define function to be applied coefficient-wise
|
||||
double ramp(double x)
|
||||
@@ -14,7 +12,7 @@ double ramp(double x)
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix4d m1 = Matrix4d::Random();
|
||||
cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(ptr_fun(ramp)) << endl;
|
||||
Eigen::Matrix4d m1 = Eigen::Matrix4d::Random();
|
||||
std::cout << m1 << std::endl << "becomes: " << std::endl << m1.unaryExpr(std::ptr_fun(ramp)) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::Block<Derived, 2, 2>
|
||||
topLeft2x2Corner(MatrixBase<Derived>& m)
|
||||
topLeft2x2Corner(Eigen::MatrixBase<Derived>& m)
|
||||
{
|
||||
return Eigen::Block<Derived, 2, 2>(m.derived(), 0, 0);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
const Eigen::Block<const Derived, 2, 2>
|
||||
topLeft2x2Corner(const MatrixBase<Derived>& m)
|
||||
topLeft2x2Corner(const Eigen::MatrixBase<Derived>& m)
|
||||
{
|
||||
return Eigen::Block<const Derived, 2, 2>(m.derived(), 0, 0);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix3d m = Matrix3d::Identity();
|
||||
cout << topLeft2x2Corner(4*m) << endl; // calls the const version
|
||||
Eigen::Matrix3d m = Eigen::Matrix3d::Identity();
|
||||
std::cout << topLeft2x2Corner(4*m) << std::endl; // calls the const version
|
||||
topLeft2x2Corner(m) *= 2; // calls the non-const version
|
||||
cout << "Now the matrix m is:" << endl << m << endl;
|
||||
std::cout << "Now the matrix m is:" << std::endl << m << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,22 +1,20 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::Reshaped<Derived, 4, 2>
|
||||
reshape_helper(MatrixBase<Derived>& m)
|
||||
reshape_helper(Eigen::MatrixBase<Derived>& m)
|
||||
{
|
||||
return Eigen::Reshaped<Derived, 4, 2>(m.derived());
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
MatrixXd m(2, 4);
|
||||
Eigen::MatrixXd m(2, 4);
|
||||
m << 1, 2, 3, 4,
|
||||
5, 6, 7, 8;
|
||||
MatrixXd n = reshape_helper(m);
|
||||
cout << "matrix m is:" << endl << m << endl;
|
||||
cout << "matrix n is:" << endl << n << endl;
|
||||
Eigen::MatrixXd n = reshape_helper(m);
|
||||
std::cout << "matrix m is:" << std::endl << m << std::endl;
|
||||
std::cout << "matrix n is:" << std::endl << n << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::VectorBlock<Derived, 2>
|
||||
firstTwo(MatrixBase<Derived>& v)
|
||||
firstTwo(Eigen::MatrixBase<Derived>& v)
|
||||
{
|
||||
return Eigen::VectorBlock<Derived, 2>(v.derived(), 0);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
const Eigen::VectorBlock<const Derived, 2>
|
||||
firstTwo(const MatrixBase<Derived>& v)
|
||||
firstTwo(const Eigen::MatrixBase<Derived>& v)
|
||||
{
|
||||
return Eigen::VectorBlock<const Derived, 2>(v.derived(), 0);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix<int,1,6> v; v << 1,2,3,4,5,6;
|
||||
cout << firstTwo(4*v) << endl; // calls the const version
|
||||
Eigen::Matrix<int,1,6> v; v << 1,2,3,4,5,6;
|
||||
std::cout << firstTwo(4*v) << std::endl; // calls the const version
|
||||
firstTwo(v) *= 2; // calls the non-const version
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
std::cout << "Now the vector v is:" << std::endl << v << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace std;
|
||||
using namespace Eigen;
|
||||
|
||||
template<typename Derived>
|
||||
const Reshaped<const Derived>
|
||||
reshape_helper(const MatrixBase<Derived>& m, int rows, int cols)
|
||||
const Eigen::Reshaped<const Derived>
|
||||
reshape_helper(const Eigen::MatrixBase<Derived>& m, int rows, int cols)
|
||||
{
|
||||
return Reshaped<const Derived>(m.derived(), rows, cols);
|
||||
return Eigen::Reshaped<const Derived>(m.derived(), rows, cols);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
MatrixXd m(3, 4);
|
||||
Eigen::MatrixXd m(3, 4);
|
||||
m << 1, 4, 7, 10,
|
||||
2, 5, 8, 11,
|
||||
3, 6, 9, 12;
|
||||
cout << m << endl;
|
||||
Ref<const MatrixXd> n = reshape_helper(m, 2, 6);
|
||||
cout << "Matrix m is:" << endl << m << endl;
|
||||
cout << "Matrix n is:" << endl << n << endl;
|
||||
std::cout << m << std::endl;
|
||||
Eigen::Ref<const Eigen::MatrixXd> n = reshape_helper(m, 2, 6);
|
||||
std::cout << "Matrix m is:" << std::endl << m << std::endl;
|
||||
std::cout << "Matrix n is:" << std::endl << n << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
template<typename Derived>
|
||||
Eigen::VectorBlock<Derived>
|
||||
segmentFromRange(MatrixBase<Derived>& v, int start, int end)
|
||||
segmentFromRange(Eigen::MatrixBase<Derived>& v, int start, int end)
|
||||
{
|
||||
return Eigen::VectorBlock<Derived>(v.derived(), start, end-start);
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
const Eigen::VectorBlock<const Derived>
|
||||
segmentFromRange(const MatrixBase<Derived>& v, int start, int end)
|
||||
segmentFromRange(const Eigen::MatrixBase<Derived>& v, int start, int end)
|
||||
{
|
||||
return Eigen::VectorBlock<const Derived>(v.derived(), start, end-start);
|
||||
}
|
||||
|
||||
int main(int, char**)
|
||||
{
|
||||
Matrix<int,1,6> v; v << 1,2,3,4,5,6;
|
||||
cout << segmentFromRange(2*v, 2, 4) << endl; // calls the const version
|
||||
Eigen::Matrix<int,1,6> v; v << 1,2,3,4,5,6;
|
||||
std::cout << segmentFromRange(2*v, 2, 4) << std::endl; // calls the const version
|
||||
segmentFromRange(v, 1, 3) *= 5; // calls the non-const version
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
std::cout << "Now the vector v is:" << std::endl << v << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Core>
|
||||
using namespace Eigen;
|
||||
|
||||
template <typename Derived>
|
||||
void print_size(const EigenBase<Derived>& b)
|
||||
void print_size(const Eigen::EigenBase<Derived>& b)
|
||||
{
|
||||
std::cout << "size (rows, cols): " << b.size() << " (" << b.rows()
|
||||
<< ", " << b.cols() << ")" << std::endl;
|
||||
@@ -11,7 +10,7 @@ void print_size(const EigenBase<Derived>& b)
|
||||
|
||||
int main()
|
||||
{
|
||||
Vector3f v;
|
||||
Eigen::Vector3f v;
|
||||
print_size(v);
|
||||
// v.asDiagonal() returns a 3x3 diagonal matrix pseudo-expression
|
||||
print_size(v.asDiagonal());
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/SVD>
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
|
||||
float inv_cond(const Ref<const MatrixXf>& a)
|
||||
float inv_cond(const Eigen::Ref<const Eigen::MatrixXf>& a)
|
||||
{
|
||||
const VectorXf sing_vals = a.jacobiSvd().singularValues();
|
||||
const Eigen::VectorXf sing_vals = a.jacobiSvd().singularValues();
|
||||
return sing_vals(sing_vals.size()-1) / sing_vals(0);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix4f m = Matrix4f::Random();
|
||||
cout << "matrix m:" << endl << m << endl << endl;
|
||||
cout << "inv_cond(m): " << inv_cond(m) << endl;
|
||||
cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3)) << endl;
|
||||
cout << "inv_cond(m+I): " << inv_cond(m+Matrix4f::Identity()) << endl;
|
||||
Eigen::MatrixXf m = Eigen::MatrixXf::Random(4, 4);
|
||||
std::cout << "matrix m:\n" << m << "\n\n";
|
||||
std::cout << "inv_cond(m): " << inv_cond(m) << "\n";
|
||||
std::cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3)) << "\n";
|
||||
std::cout << "inv_cond(m+I): " << inv_cond(m+Eigen::MatrixXf::Identity(4, 4)) << "\n";
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ namespace Eigen {
|
||||
{
|
||||
typedef Circulant<ArgType> XprType;
|
||||
typedef typename nested_eval<ArgType, XprType::ColsAtCompileTime>::type ArgTypeNested;
|
||||
typedef typename remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
|
||||
typedef remove_all_t<ArgTypeNested> ArgTypeNestedCleaned;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
enum {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
// [circulant_func]
|
||||
template<class ArgType>
|
||||
class circulant_functor {
|
||||
@@ -10,8 +8,8 @@ class circulant_functor {
|
||||
public:
|
||||
circulant_functor(const ArgType& arg) : m_vec(arg) {}
|
||||
|
||||
const typename ArgType::Scalar& operator() (Index row, Index col) const {
|
||||
Index index = row - col;
|
||||
const typename ArgType::Scalar& operator() (Eigen::Index row, Eigen::Index col) const {
|
||||
Eigen::Index index = row - col;
|
||||
if (index < 0) index += m_vec.size();
|
||||
return m_vec(index);
|
||||
}
|
||||
@@ -21,10 +19,10 @@ public:
|
||||
// [square]
|
||||
template<class ArgType>
|
||||
struct circulant_helper {
|
||||
typedef Matrix<typename ArgType::Scalar,
|
||||
typedef Eigen::Matrix<typename ArgType::Scalar,
|
||||
ArgType::SizeAtCompileTime,
|
||||
ArgType::SizeAtCompileTime,
|
||||
ColMajor,
|
||||
Eigen::ColMajor,
|
||||
ArgType::MaxSizeAtCompileTime,
|
||||
ArgType::MaxSizeAtCompileTime> MatrixType;
|
||||
};
|
||||
@@ -32,7 +30,7 @@ struct circulant_helper {
|
||||
|
||||
// [makeCirculant]
|
||||
template <class ArgType>
|
||||
CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType>
|
||||
Eigen::CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType>
|
||||
makeCirculant(const Eigen::MatrixBase<ArgType>& arg)
|
||||
{
|
||||
typedef typename circulant_helper<ArgType>::MatrixType MatrixType;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include <Eigen/Core>
|
||||
#include <iostream>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
// [functor]
|
||||
template<class ArgType, class RowIndexType, class ColIndexType>
|
||||
class indexing_functor {
|
||||
@@ -10,10 +8,10 @@ class indexing_functor {
|
||||
const RowIndexType &m_rowIndices;
|
||||
const ColIndexType &m_colIndices;
|
||||
public:
|
||||
typedef Matrix<typename ArgType::Scalar,
|
||||
typedef Eigen::Matrix<typename ArgType::Scalar,
|
||||
RowIndexType::SizeAtCompileTime,
|
||||
ColIndexType::SizeAtCompileTime,
|
||||
ArgType::Flags&RowMajorBit?RowMajor:ColMajor,
|
||||
ArgType::Flags&Eigen::RowMajorBit?Eigen::RowMajor:Eigen::ColMajor,
|
||||
RowIndexType::MaxSizeAtCompileTime,
|
||||
ColIndexType::MaxSizeAtCompileTime> MatrixType;
|
||||
|
||||
@@ -21,7 +19,7 @@ public:
|
||||
: m_arg(arg), m_rowIndices(row_indices), m_colIndices(col_indices)
|
||||
{}
|
||||
|
||||
const typename ArgType::Scalar& operator() (Index row, Index col) const {
|
||||
const typename ArgType::Scalar& operator() (Eigen::Index row, Eigen::Index col) const {
|
||||
return m_arg(m_rowIndices[row], m_colIndices[col]);
|
||||
}
|
||||
};
|
||||
@@ -29,7 +27,7 @@ public:
|
||||
|
||||
// [function]
|
||||
template <class ArgType, class RowIndexType, class ColIndexType>
|
||||
CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
|
||||
Eigen::CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
|
||||
mat_indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
|
||||
{
|
||||
typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func;
|
||||
@@ -43,8 +41,8 @@ int main()
|
||||
{
|
||||
std::cout << "[main1]\n";
|
||||
Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4);
|
||||
Array3i ri(1,2,1);
|
||||
ArrayXi ci(6); ci << 3,2,1,0,0,2;
|
||||
Eigen::Array3i ri(1,2,1);
|
||||
Eigen::ArrayXi ci(6); ci << 3,2,1,0,0,2;
|
||||
Eigen::MatrixXi B = mat_indexing(A, ri, ci);
|
||||
std::cout << "A =" << std::endl;
|
||||
std::cout << A << std::endl << std::endl;
|
||||
@@ -56,11 +54,9 @@ int main()
|
||||
B = mat_indexing(A, ri+1, ci);
|
||||
std::cout << "A(ri+1,ci) =" << std::endl;
|
||||
std::cout << B << std::endl << std::endl;
|
||||
#if EIGEN_COMP_CXXVER >= 11
|
||||
B = mat_indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
|
||||
B = mat_indexing(A, Eigen::ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), Eigen::ArrayXi::LinSpaced(4,0,3));
|
||||
std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl;
|
||||
std::cout << B << std::endl << std::endl;
|
||||
#endif
|
||||
std::cout << "[main2]\n";
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2d a;
|
||||
Eigen::Matrix2d a;
|
||||
a << 1, 2,
|
||||
3, 4;
|
||||
MatrixXd b(2,2);
|
||||
Eigen::MatrixXd b(2,2);
|
||||
b << 2, 3,
|
||||
1, 4;
|
||||
std::cout << "a + b =\n" << a + b << std::endl;
|
||||
@@ -16,7 +14,7 @@ int main()
|
||||
std::cout << "Doing a += b;" << std::endl;
|
||||
a += b;
|
||||
std::cout << "Now a =\n" << a << std::endl;
|
||||
Vector3d v(1,2,3);
|
||||
Vector3d w(1,0,0);
|
||||
Eigen::Vector3d v(1,2,3);
|
||||
Eigen::Vector3d w(1,0,0);
|
||||
std::cout << "-v + w - v =\n" << -v + w - v << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
using namespace std;
|
||||
int main()
|
||||
{
|
||||
Vector3d v(1,2,3);
|
||||
Vector3d w(0,1,2);
|
||||
Eigen::Vector3d v(1,2,3);
|
||||
Eigen::Vector3d w(0,1,2);
|
||||
|
||||
cout << "Dot product: " << v.dot(w) << endl;
|
||||
std::cout << "Dot product: " << v.dot(w) << std::endl;
|
||||
double dp = v.adjoint()*w; // automatic conversion of the inner product to a scalar
|
||||
cout << "Dot product via a matrix product: " << dp << endl;
|
||||
cout << "Cross product:\n" << v.cross(w) << endl;
|
||||
std::cout << "Dot product via a matrix product: " << dp << std::endl;
|
||||
|
||||
std::cout << "Cross product:\n" << v.cross(w) << std::endl;
|
||||
Eigen::Vector2d v2(1,2);
|
||||
Eigen::Vector2d w2(0,1);
|
||||
double cp = v2.cross(w2); // returning a scalar between size-2 vectors
|
||||
std::cout << "Cross product for 2D vectors: " << cp << std::endl;
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
int main()
|
||||
{
|
||||
Matrix2d mat;
|
||||
Eigen::Matrix2d mat;
|
||||
mat << 1, 2,
|
||||
3, 4;
|
||||
Vector2d u(-1,1), v(2,0);
|
||||
Eigen::Vector2d u(-1,1), v(2,0);
|
||||
std::cout << "Here is mat*mat:\n" << mat*mat << std::endl;
|
||||
std::cout << "Here is mat*u:\n" << mat*u << std::endl;
|
||||
std::cout << "Here is u^T*mat:\n" << u.transpose()*mat << std::endl;
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix2d a;
|
||||
Eigen::Matrix2d a;
|
||||
a << 1, 2,
|
||||
3, 4;
|
||||
Vector3d v(1,2,3);
|
||||
Eigen::Vector3d v(1,2,3);
|
||||
std::cout << "a * 2.5 =\n" << a * 2.5 << std::endl;
|
||||
std::cout << "0.1 * v =\n" << 0.1 * v << std::endl;
|
||||
std::cout << "Doing v *= 2;" << std::endl;
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd m(2,2);
|
||||
Eigen::MatrixXd m(2,2);
|
||||
m(0,0) = 3;
|
||||
m(1,0) = 2.5;
|
||||
m(0,1) = -1;
|
||||
m(1,1) = m(1,0) + m(0,1);
|
||||
std::cout << "Here is the matrix m:\n" << m << std::endl;
|
||||
VectorXd v(2);
|
||||
Eigen::VectorXd v(2);
|
||||
v(0) = 4;
|
||||
v(1) = v(0) - 1;
|
||||
std::cout << "Here is the vector v:\n" << v << std::endl;
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXd m(2,5);
|
||||
Eigen::MatrixXd m(2,5);
|
||||
m.resize(4,3);
|
||||
std::cout << "The matrix m is of size "
|
||||
<< m.rows() << "x" << m.cols() << std::endl;
|
||||
std::cout << "It has " << m.size() << " coefficients" << std::endl;
|
||||
VectorXd v(2);
|
||||
Eigen::VectorXd v(2);
|
||||
v.resize(5);
|
||||
std::cout << "The vector v is of size " << v.size() << std::endl;
|
||||
std::cout << "As a matrix, v is of size "
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
#include <iostream>
|
||||
#include <Eigen/Dense>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
Matrix4d m;
|
||||
Eigen::Matrix4d m;
|
||||
m.resize(4,4); // no operation
|
||||
std::cout << "The matrix m is of size "
|
||||
<< m.rows() << "x" << m.cols() << std::endl;
|
||||
|
||||
@@ -6,31 +6,26 @@ foreach(snippet_src ${snippets_SRCS})
|
||||
get_filename_component(snippet ${snippet_src} NAME_WE)
|
||||
set(compile_snippet_target compile_${snippet})
|
||||
set(compile_snippet_src ${compile_snippet_target}.cpp)
|
||||
if((NOT ${snippet_src} MATCHES "cxx11") OR EIGEN_COMPILER_SUPPORT_CPP11)
|
||||
file(READ ${snippet_src} snippet_source_code)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compile_snippet.cpp.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
|
||||
add_executable(${compile_snippet_target}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
|
||||
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
|
||||
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
|
||||
endif()
|
||||
if(${snippet_src} MATCHES "cxx11")
|
||||
set_target_properties(${compile_snippet_target} PROPERTIES COMPILE_FLAGS "-std=c++11")
|
||||
endif()
|
||||
if(${snippet_src} MATCHES "deprecated")
|
||||
set_target_properties(${compile_snippet_target} PROPERTIES COMPILE_FLAGS "-DEIGEN_NO_DEPRECATED_WARNING")
|
||||
endif()
|
||||
add_custom_command(
|
||||
TARGET ${compile_snippet_target}
|
||||
POST_BUILD
|
||||
COMMAND ${compile_snippet_target}
|
||||
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
|
||||
)
|
||||
add_dependencies(all_snippets ${compile_snippet_target})
|
||||
set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
|
||||
PROPERTIES OBJECT_DEPENDS ${snippet_src})
|
||||
else()
|
||||
message("skip snippet ${snippet_src} because compiler does not support C++11")
|
||||
|
||||
file(READ ${snippet_src} snippet_source_code)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compile_snippet.cpp.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
|
||||
add_executable(${compile_snippet_target}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
|
||||
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
|
||||
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
|
||||
endif()
|
||||
|
||||
if(${snippet_src} MATCHES "deprecated")
|
||||
set_target_properties(${compile_snippet_target} PROPERTIES COMPILE_FLAGS "-DEIGEN_NO_DEPRECATED_WARNING")
|
||||
endif()
|
||||
add_custom_command(
|
||||
TARGET ${compile_snippet_target}
|
||||
POST_BUILD
|
||||
COMMAND ${compile_snippet_target}
|
||||
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
|
||||
)
|
||||
add_dependencies(all_snippets ${compile_snippet_target})
|
||||
set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
|
||||
PROPERTIES OBJECT_DEPENDS ${snippet_src})
|
||||
endforeach()
|
||||
|
||||
4
libs/eigen/doc/snippets/Cwise_array_atan2_array.cpp
Normal file
4
libs/eigen/doc/snippets/Cwise_array_atan2_array.cpp
Normal file
@@ -0,0 +1,4 @@
|
||||
Array<double,1,3> x(8,-25,3),
|
||||
y(1./3.,0.5,-2.);
|
||||
cout << "atan2([" << x << "], [" << y << "]) = " << x.atan2(y) << endl; // using ArrayBase::pow
|
||||
cout << "atan2([" << x << "], [" << y << "] = " << atan2(x,y) << endl; // using Eigen::pow
|
||||
@@ -1,6 +1,6 @@
|
||||
MatrixXf m = MatrixXf::Random(3,2);
|
||||
cout << "Here is the matrix m:" << endl << m << endl;
|
||||
JacobiSVD<MatrixXf> svd(m, ComputeThinU | ComputeThinV);
|
||||
JacobiSVD<MatrixXf, ComputeThinU | ComputeThinV> svd(m);
|
||||
cout << "Its singular values are:" << endl << svd.singularValues() << endl;
|
||||
cout << "Its left singular vectors are the columns of the thin U matrix:" << endl << svd.matrixU() << endl;
|
||||
cout << "Its right singular vectors are the columns of the thin V matrix:" << endl << svd.matrixV() << endl;
|
||||
|
||||
@@ -3,7 +3,7 @@ MatrixXd A = X + X.transpose();
|
||||
cout << "Here is a random symmetric matrix, A:" << endl << A << endl;
|
||||
X = MatrixXd::Random(5,5);
|
||||
MatrixXd B = X * X.transpose();
|
||||
cout << "and a random postive-definite matrix, B:" << endl << B << endl << endl;
|
||||
cout << "and a random positive-definite matrix, B:" << endl << B << endl << endl;
|
||||
|
||||
GeneralizedSelfAdjointEigenSolver<MatrixXd> es(A,B);
|
||||
cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ArrayXi ind(5); ind<<4,2,5,5,3;
|
||||
MatrixXi A = MatrixXi::Random(4,6);
|
||||
cout << "Initial matrix A:\n" << A << "\n\n";
|
||||
cout << "A(all,ind-1):\n" << A(all,ind-1) << "\n\n";
|
||||
cout << "A(all,ind-1):\n" << A(Eigen::placeholders::all,ind-1) << "\n\n";
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
|
||||
MatrixXi A = MatrixXi::Random(4,6);
|
||||
cout << "Initial matrix A:\n" << A << "\n\n";
|
||||
cout << "A(all,{4,2,5,5,3}):\n" << A(all,{4,2,5,5,3}) << "\n\n";
|
||||
#endif
|
||||
cout << "A(all,{4,2,5,5,3}):\n" << A(Eigen::placeholders::all,{4,2,5,5,3}) << "\n\n";
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
std::vector<int> ind{4,2,5,5,3};
|
||||
MatrixXi A = MatrixXi::Random(4,6);
|
||||
cout << "Initial matrix A:\n" << A << "\n\n";
|
||||
cout << "A(all,ind):\n" << A(all,ind) << "\n\n";
|
||||
cout << "A(all,ind):\n" << A(Eigen::placeholders::all,ind) << "\n\n";
|
||||
|
||||
@@ -19,16 +19,13 @@ if(QT4_FOUND)
|
||||
add_dependencies(all_examples Tutorial_sparse_example)
|
||||
endif()
|
||||
|
||||
if(EIGEN_COMPILER_SUPPORT_CPP11)
|
||||
add_executable(random_cpp11 random_cpp11.cpp)
|
||||
target_link_libraries(random_cpp11 ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
|
||||
add_dependencies(all_examples random_cpp11)
|
||||
ei_add_target_property(random_cpp11 COMPILE_FLAGS "-std=c++11")
|
||||
add_executable(random_cpp11 random_cpp11.cpp)
|
||||
target_link_libraries(random_cpp11 ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
|
||||
add_dependencies(all_examples random_cpp11)
|
||||
|
||||
add_custom_command(
|
||||
TARGET random_cpp11
|
||||
POST_BUILD
|
||||
COMMAND random_cpp11
|
||||
ARGS >${CMAKE_CURRENT_BINARY_DIR}/random_cpp11.out
|
||||
)
|
||||
endif()
|
||||
add_custom_command(
|
||||
TARGET random_cpp11
|
||||
POST_BUILD
|
||||
COMMAND random_cpp11
|
||||
ARGS >${CMAKE_CURRENT_BINARY_DIR}/random_cpp11.out
|
||||
)
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main() {
|
||||
std::default_random_engine generator;
|
||||
std::poisson_distribution<int> distribution(4.1);
|
||||
auto poisson = [&] () {return distribution(generator);};
|
||||
|
||||
RowVectorXi v = RowVectorXi::NullaryExpr(10, poisson );
|
||||
Eigen::RowVectorXi v = Eigen::RowVectorXi::NullaryExpr(10, poisson );
|
||||
std::cout << v << "\n";
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user