ADD: added other eigen lib

This commit is contained in:
Henry Winkel
2022-12-21 16:19:04 +01:00
parent a570766dc6
commit 9e56c7f2c0
832 changed files with 36586 additions and 20006 deletions

View File

View File

@@ -84,9 +84,12 @@ public :
for (int j=0;j<N;j++){
for (int i=0;i<N;i++){
somme=0.0;
if(i>=j)
{
for (int k=0;k<N;k++)
somme += A[i][k]*A[j][k];
X[j][i]=somme;
}
}
}
}

View File

@@ -38,8 +38,6 @@ void bench(int id, int rows, int size = Size)
A = A*A.adjoint();
BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_cod, t_fpqr, t_jsvd, t_bdcsvd;
int svd_opt = ComputeThinU|ComputeThinV;
int tries = 5;
int rep = 1000/size;
if(rep==0) rep = 1;
@@ -53,8 +51,8 @@ void bench(int id, int rows, int size = Size)
ColPivHouseholderQR<Mat> cpqr(A.rows(),A.cols());
CompleteOrthogonalDecomposition<Mat> cod(A.rows(),A.cols());
FullPivHouseholderQR<Mat> fpqr(A.rows(),A.cols());
JacobiSVD<MatDyn> jsvd(A.rows(),A.cols());
BDCSVD<MatDyn> bdcsvd(A.rows(),A.cols());
JacobiSVD<MatDyn, ComputeThinU|ComputeThinV> jsvd(A.rows(),A.cols());
BDCSVD<MatDyn, ComputeThinU|ComputeThinV> bdcsvd(A.rows(),A.cols());
BENCH(t_llt, tries, rep, compute_norm_equation(llt,A));
BENCH(t_ldlt, tries, rep, compute_norm_equation(ldlt,A));
@@ -67,9 +65,9 @@ void bench(int id, int rows, int size = Size)
if(size*rows<=10000000)
BENCH(t_fpqr, tries, rep, compute(fpqr,A));
if(size<500) // JacobiSVD is really too slow for too large matrices
BENCH(t_jsvd, tries, rep, jsvd.compute(A,svd_opt));
BENCH(t_jsvd, tries, rep, jsvd.compute(A));
// if(size*rows<=20000000)
BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A,svd_opt));
BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A));
results["LLT"][id] = t_llt.best();
results["LDLT"][id] = t_ldlt.best();

View File

@@ -1,7 +1,7 @@
set(BLAS_FOUND TRUE)
set(LAPACK_FOUND TRUE)
set(BLAS_FOUND EIGEN_BUILD_BLAS)
set(LAPACK_FOUND EIGEN_BUILD_LAPACK)
set(BLAS_LIBRARIES eigen_blas_static)
set(LAPACK_LIBRARIES eigen_lapack_static)

View File

@@ -219,14 +219,8 @@ template <typename Device, typename T> class BenchmarkSuite {
size_b[1] = m_;
TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
#if defined(EIGEN_HAS_INDEX_LIST)
Eigen::IndexPairList<Eigen::type2indexpair<0, 0>,
Eigen::type2indexpair<2, 1> > paddings;
#else
Eigen::array<Eigen::IndexPair<TensorIndex>, 2> paddings;
paddings[0] = Eigen::IndexPair<TensorIndex>(0, 0);
paddings[1] = Eigen::IndexPair<TensorIndex>(2, 1);
#endif
#ifdef EIGEN_USE_SYCL // warmup for sycl
for (int iter = 0; iter < 10; ++iter) {
B.device(device_) = A.pad(paddings);
@@ -251,15 +245,7 @@ template <typename Device, typename T> class BenchmarkSuite {
size_b[1] = k_/2;
TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, size_b);
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<TensorIndex, 2> strides;
strides[0] = 1;
strides[1] = 2;
#else
// Take advantage of cxx11 to give the compiler information it can use to
// optimize the code.
Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> > strides;
#endif
#ifdef EIGEN_USE_SYCL // warmup for sycl
for (int iter = 0; iter < 10; ++iter) {
@@ -284,17 +270,8 @@ template <typename Device, typename T> class BenchmarkSuite {
size_c[0] = m_;
size_c[1] = n_;
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, size_c);
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<int, 2> broadcast;
broadcast[0] = 1;
broadcast[1] = n_;
#else
// Take advantage of cxx11 to give the compiler information it can use to
// optimize the code.
Eigen::IndexList<Eigen::type2index<1>, int> broadcast;
broadcast.set(1, n_);
#endif
#ifdef EIGEN_USE_SYCL // warmup for sycl
for (int iter = 0; iter < 10; ++iter) {
@@ -385,15 +362,7 @@ for (int iter = 0; iter < 10; ++iter) {
Eigen::array<TensorIndex, 1> output_size;
output_size[0] = n_;
TensorMap<Tensor<T, 1, 0, TensorIndex>, Eigen::Aligned> C(c_, output_size);
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<TensorIndex, 1> sum_along_dim;
sum_along_dim[0] = 0;
#else
// Take advantage of cxx11 to give the compiler information it can use to
// optimize the code.
Eigen::IndexList<Eigen::type2index<0>> sum_along_dim;
#endif
#ifdef EIGEN_USE_SYCL // warmup for sycl
for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = B.sum(sum_along_dim);
@@ -564,9 +533,9 @@ for (int iter = 0; iter < 10; ++iter) {
// Initialize the content of the memory pools to prevent asan from
// complaining.
device_.memset(a_, 12, m_ * k_ * sizeof(T));
device_.memset(b_, 23, k_ * n_ * sizeof(T));
device_.memset(c_, 31, m_ * n_ * sizeof(T));
device_.fill(a_, a_ + m_ * k_, T(12));
device_.fill(b_, b_ + k_ * n_, T(23));
device_.fill(c_, c_ + m_ * n_, T(31));
}

View File

@@ -56,9 +56,9 @@ void contraction(const Device& device_, TensorIndex num_iters, TensorIndex m_, T
// Initialize the content of the memory pools to prevent asan from
// complaining.
device_.memset(a_, 12, m_ * k_ * sizeof(T));
device_.memset(b_, 23, k_ * n_ * sizeof(T));
device_.memset(c_, 31, m_ * n_ * sizeof(T));
device_.fill(a_, m_ * k_, T(12));
device_.fill(b_, k_ * n_, T(23));
device_.fill(c_, m_ * n_, T(31));
Eigen::array<TensorIndex, 2> sizeA;
sizeA[0] = m_;