update
This commit is contained in:
52
2025.09.22_cpp_with_eigen_package/Eigen/AccelerateSupport
Normal file
52
2025.09.22_cpp_with_eigen_package/Eigen/AccelerateSupport
Normal file
@@ -0,0 +1,52 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H
|
||||
#define EIGEN_ACCELERATESUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup AccelerateSupport_Module AccelerateSupport module
|
||||
*
|
||||
* This module provides an interface to the Apple Accelerate library.
|
||||
* It provides the seven following main factorization classes:
|
||||
* - class AccelerateLLT: a Cholesky (LL^T) factorization.
|
||||
* - class AccelerateLDLT: the default LDL^T factorization.
|
||||
* - class AccelerateLDLTUnpivoted: a Cholesky-like LDL^T factorization with only 1x1 pivots and no pivoting
|
||||
* - class AccelerateLDLTSBK: an LDL^T factorization with Supernode Bunch-Kaufman and static pivoting
|
||||
* - class AccelerateLDLTTPP: an LDL^T factorization with full threshold partial pivoting
|
||||
* - class AccelerateQR: a QR factorization
|
||||
* - class AccelerateCholeskyAtA: a QR factorization without storing Q (equivalent to A^TA = R^T R)
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/AccelerateSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the Accelerate headers must be accessible from
|
||||
* the include paths, and your binary must be linked to the Accelerate framework.
|
||||
* The Accelerate library is only available on Apple hardware.
|
||||
*
|
||||
* Note that many of the algorithms can be influenced by the UpLo template
|
||||
* argument. All matrices are assumed to be symmetric. For example, the following
|
||||
* creates an LDLT factorization where your matrix is symmetric (implicit) and
|
||||
* uses the lower triangle:
|
||||
*
|
||||
* \code
|
||||
* AccelerateLDLT<SparseMatrix<float>, Lower> ldlt;
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/AccelerateSupport/AccelerateSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_ACCELERATESUPPORT_MODULE_H
|
||||
43
2025.09.22_cpp_with_eigen_package/Eigen/Cholesky
Normal file
43
2025.09.22_cpp_with_eigen_package/Eigen/Cholesky
Normal file
@@ -0,0 +1,43 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CHOLESKY_MODULE_H
|
||||
#define EIGEN_CHOLESKY_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
#include "Jacobi"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup Cholesky_Module Cholesky module
|
||||
*
|
||||
*
|
||||
*
|
||||
* This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
|
||||
* Those decompositions are also accessible via the following methods:
|
||||
* - MatrixBase::llt()
|
||||
* - MatrixBase::ldlt()
|
||||
* - SelfAdjointView::llt()
|
||||
* - SelfAdjointView::ldlt()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Cholesky>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Cholesky/LLT.h"
|
||||
#include "src/Cholesky/LDLT.h"
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#include "src/misc/lapacke_helpers.h"
|
||||
#include "src/Cholesky/LLT_LAPACKE.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_CHOLESKY_MODULE_H
|
||||
48
2025.09.22_cpp_with_eigen_package/Eigen/CholmodSupport
Normal file
48
2025.09.22_cpp_with_eigen_package/Eigen/CholmodSupport
Normal file
@@ -0,0 +1,48 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
|
||||
#define EIGEN_CHOLMODSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
#include <cholmod.h>
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup CholmodSupport_Module CholmodSupport module
|
||||
*
|
||||
* This module provides an interface to the Cholmod library which is part of the <a
|
||||
* href="http://www.suitesparse.com">suitesparse</a> package. It provides the two following main factorization classes:
|
||||
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
|
||||
* - class CholmodDecomposition: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of
|
||||
* the underlying factorization method (supernodal or simplicial).
|
||||
*
|
||||
* For the sake of completeness, this module also propose the two following classes:
|
||||
* - class CholmodSimplicialLLT
|
||||
* - class CholmodSimplicialLDLT
|
||||
* Note that these classes does not bring any particular advantage compared to the built-in
|
||||
* SimplicialLLT and SimplicialLDLT factorization classes.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/CholmodSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the cholmod library and its dependencies. The dependencies depend on how cholmod has been compiled. For a
|
||||
* cmake based project, you can use our FindCholmod.cmake module to help you in this task.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/CholmodSupport/CholmodSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_CHOLMODSUPPORT_MODULE_H
|
||||
446
2025.09.22_cpp_with_eigen_package/Eigen/Core
Normal file
446
2025.09.22_cpp_with_eigen_package/Eigen/Core
Normal file
@@ -0,0 +1,446 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CORE_MODULE_H
|
||||
#define EIGEN_CORE_MODULE_H
|
||||
|
||||
// Eigen version information.
|
||||
#include "Version"
|
||||
|
||||
// first thing Eigen does: stop the compiler from reporting useless warnings.
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
// then include this file where all our macros are defined. It's really important to do it first because
|
||||
// it's where we do all the compiler/OS/arch detections and define most defaults.
|
||||
#include "src/Core/util/Macros.h"
|
||||
|
||||
// This detects SSE/AVX/NEON/etc. and configure alignment settings
|
||||
#include "src/Core/util/ConfigureVectorization.h"
|
||||
|
||||
// We need cuda_runtime.h/hip_runtime.h to ensure that
|
||||
// the EIGEN_USING_STD macro works properly on the device side
|
||||
#if defined(EIGEN_CUDACC)
|
||||
#include <cuda_runtime.h>
|
||||
#elif defined(EIGEN_HIPCC)
|
||||
#include <hip/hip_runtime.h>
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
#include <new>
|
||||
#endif
|
||||
|
||||
// Disable the ipa-cp-clone optimization flag with MinGW 6.x or older (enabled by default with -O3)
|
||||
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
|
||||
#if EIGEN_COMP_MINGW && EIGEN_GNUC_STRICT_LESS_THAN(6, 0, 0)
|
||||
#pragma GCC optimize("-fno-ipa-cp-clone")
|
||||
#endif
|
||||
|
||||
// Prevent ICC from specializing std::complex operators that silently fail
|
||||
// on device. This allows us to use our own device-compatible specializations
|
||||
// instead.
|
||||
#if EIGEN_COMP_ICC && defined(EIGEN_GPU_COMPILE_PHASE) && !defined(_OVERRIDE_COMPLEX_SPECIALIZATION_)
|
||||
#define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1
|
||||
#endif
|
||||
#include <complex>
|
||||
|
||||
// this include file manages BLAS and MKL related macros
|
||||
// and inclusion of their respective header files
|
||||
#include "src/Core/util/MKL_support.h"
|
||||
|
||||
#if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)
|
||||
#define EIGEN_HAS_GPU_FP16
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_HAS_CUDA_BF16) || defined(EIGEN_HAS_HIP_BF16)
|
||||
#define EIGEN_HAS_GPU_BF16
|
||||
#endif
|
||||
|
||||
#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
|
||||
#define EIGEN_HAS_OPENMP
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
#include <atomic>
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
||||
// MSVC for windows mobile does not have the errno.h file
|
||||
#if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM
|
||||
#define EIGEN_HAS_ERRNO
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAS_ERRNO
|
||||
#include <cerrno>
|
||||
#endif
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
#ifndef EIGEN_NO_IO
|
||||
#include <sstream>
|
||||
#include <iosfwd>
|
||||
#endif
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <limits>
|
||||
#include <climits> // for CHAR_BIT
|
||||
// for min/max:
|
||||
#include <algorithm>
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
// for std::is_nothrow_move_assignable
|
||||
#include <type_traits>
|
||||
|
||||
// for std::this_thread::yield().
|
||||
#if !defined(EIGEN_USE_BLAS) && (defined(EIGEN_HAS_OPENMP) || defined(EIGEN_GEMM_THREADPOOL))
|
||||
#include <thread>
|
||||
#endif
|
||||
|
||||
// for std::bit_cast()
|
||||
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
|
||||
#include <bit>
|
||||
#endif
|
||||
|
||||
// for outputting debug info
|
||||
#ifdef EIGEN_DEBUG_ASSIGN
|
||||
#include <iostream>
|
||||
#endif
|
||||
|
||||
// required for __cpuid, needs to be included after cmath
|
||||
// also required for _BitScanReverse on Windows on ARM
|
||||
#if EIGEN_COMP_MSVC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM64) && !EIGEN_OS_WINCE
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_USE_SYCL)
|
||||
#undef min
|
||||
#undef max
|
||||
#undef isnan
|
||||
#undef isinf
|
||||
#undef isfinite
|
||||
#include <CL/sycl.hpp>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#ifndef EIGEN_SYCL_LOCAL_THREAD_DIM0
|
||||
#define EIGEN_SYCL_LOCAL_THREAD_DIM0 16
|
||||
#endif
|
||||
#ifndef EIGEN_SYCL_LOCAL_THREAD_DIM1
|
||||
#define EIGEN_SYCL_LOCAL_THREAD_DIM1 16
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || \
|
||||
defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || \
|
||||
defined EIGEN2_SUPPORT
|
||||
// This will generate an error message:
|
||||
#error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// we use size_t frequently and we'll never remember to prepend it with std:: every time just to
|
||||
// ensure QNX/QCC support
|
||||
using std::size_t;
|
||||
// gcc 4.6.0 wants std:: for ptrdiff_t
|
||||
using std::ptrdiff_t;
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
/** \defgroup Core_Module Core module
|
||||
* This is the main module of Eigen providing dense matrix and vector support
|
||||
* (both fixed and dynamic size) with all the features corresponding to a BLAS library
|
||||
* and much more...
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Core>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#ifdef EIGEN_USE_MKL
|
||||
#include "mkl_lapacke.h"
|
||||
#else
|
||||
#include "src/misc/lapacke.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Core/util/Constants.h"
|
||||
#include "src/Core/util/Meta.h"
|
||||
#include "src/Core/util/Assert.h"
|
||||
#include "src/Core/util/ForwardDeclarations.h"
|
||||
#include "src/Core/util/StaticAssert.h"
|
||||
#include "src/Core/util/XprHelper.h"
|
||||
#include "src/Core/util/Memory.h"
|
||||
#include "src/Core/util/IntegralConstant.h"
|
||||
#include "src/Core/util/Serializer.h"
|
||||
#include "src/Core/util/SymbolicIndex.h"
|
||||
#include "src/Core/util/EmulateArray.h"
|
||||
#include "src/Core/util/MoreMeta.h"
|
||||
|
||||
#include "src/Core/NumTraits.h"
|
||||
#include "src/Core/MathFunctions.h"
|
||||
#include "src/Core/RandomImpl.h"
|
||||
#include "src/Core/GenericPacketMath.h"
|
||||
#include "src/Core/MathFunctionsImpl.h"
|
||||
#include "src/Core/arch/Default/ConjHelper.h"
|
||||
// Generic half float support
|
||||
#include "src/Core/arch/Default/Half.h"
|
||||
#include "src/Core/arch/Default/BFloat16.h"
|
||||
#include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h"
|
||||
|
||||
#if defined EIGEN_VECTORIZE_AVX512
|
||||
#include "src/Core/arch/SSE/PacketMath.h"
|
||||
#include "src/Core/arch/SSE/Reductions.h"
|
||||
#include "src/Core/arch/AVX/PacketMath.h"
|
||||
#include "src/Core/arch/AVX/Reductions.h"
|
||||
#include "src/Core/arch/AVX512/PacketMath.h"
|
||||
#include "src/Core/arch/AVX512/Reductions.h"
|
||||
#if defined EIGEN_VECTORIZE_AVX512FP16
|
||||
#include "src/Core/arch/AVX512/PacketMathFP16.h"
|
||||
#endif
|
||||
#include "src/Core/arch/SSE/TypeCasting.h"
|
||||
#include "src/Core/arch/AVX/TypeCasting.h"
|
||||
#include "src/Core/arch/AVX512/TypeCasting.h"
|
||||
#if defined EIGEN_VECTORIZE_AVX512FP16
|
||||
#include "src/Core/arch/AVX512/TypeCastingFP16.h"
|
||||
#endif
|
||||
#include "src/Core/arch/SSE/Complex.h"
|
||||
#include "src/Core/arch/AVX/Complex.h"
|
||||
#include "src/Core/arch/AVX512/Complex.h"
|
||||
#include "src/Core/arch/SSE/MathFunctions.h"
|
||||
#include "src/Core/arch/AVX/MathFunctions.h"
|
||||
#include "src/Core/arch/AVX512/MathFunctions.h"
|
||||
#if defined EIGEN_VECTORIZE_AVX512FP16
|
||||
#include "src/Core/arch/AVX512/MathFunctionsFP16.h"
|
||||
#endif
|
||||
#include "src/Core/arch/AVX512/TrsmKernel.h"
|
||||
#elif defined EIGEN_VECTORIZE_AVX
|
||||
// Use AVX for floats and doubles, SSE for integers
|
||||
#include "src/Core/arch/SSE/PacketMath.h"
|
||||
#include "src/Core/arch/SSE/Reductions.h"
|
||||
#include "src/Core/arch/SSE/TypeCasting.h"
|
||||
#include "src/Core/arch/SSE/Complex.h"
|
||||
#include "src/Core/arch/AVX/PacketMath.h"
|
||||
#include "src/Core/arch/AVX/Reductions.h"
|
||||
#include "src/Core/arch/AVX/TypeCasting.h"
|
||||
#include "src/Core/arch/AVX/Complex.h"
|
||||
#include "src/Core/arch/SSE/MathFunctions.h"
|
||||
#include "src/Core/arch/AVX/MathFunctions.h"
|
||||
#elif defined EIGEN_VECTORIZE_SSE
|
||||
#include "src/Core/arch/SSE/PacketMath.h"
|
||||
#include "src/Core/arch/SSE/Reductions.h"
|
||||
#include "src/Core/arch/SSE/TypeCasting.h"
|
||||
#include "src/Core/arch/SSE/MathFunctions.h"
|
||||
#include "src/Core/arch/SSE/Complex.h"
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
|
||||
#include "src/Core/arch/AltiVec/PacketMath.h"
|
||||
#include "src/Core/arch/AltiVec/TypeCasting.h"
|
||||
#include "src/Core/arch/AltiVec/MathFunctions.h"
|
||||
#include "src/Core/arch/AltiVec/Complex.h"
|
||||
#elif defined EIGEN_VECTORIZE_NEON
|
||||
#include "src/Core/arch/NEON/PacketMath.h"
|
||||
#include "src/Core/arch/NEON/TypeCasting.h"
|
||||
#include "src/Core/arch/NEON/MathFunctions.h"
|
||||
#include "src/Core/arch/NEON/Complex.h"
|
||||
#elif defined EIGEN_VECTORIZE_LSX
|
||||
#include "src/Core/arch/LSX/PacketMath.h"
|
||||
#include "src/Core/arch/LSX/TypeCasting.h"
|
||||
#include "src/Core/arch/LSX/MathFunctions.h"
|
||||
#include "src/Core/arch/LSX/Complex.h"
|
||||
#elif defined EIGEN_VECTORIZE_SVE
|
||||
#include "src/Core/arch/SVE/PacketMath.h"
|
||||
#include "src/Core/arch/SVE/TypeCasting.h"
|
||||
#include "src/Core/arch/SVE/MathFunctions.h"
|
||||
#elif defined EIGEN_VECTORIZE_ZVECTOR
|
||||
#include "src/Core/arch/ZVector/PacketMath.h"
|
||||
#include "src/Core/arch/ZVector/MathFunctions.h"
|
||||
#include "src/Core/arch/ZVector/Complex.h"
|
||||
#elif defined EIGEN_VECTORIZE_MSA
|
||||
#include "src/Core/arch/MSA/PacketMath.h"
|
||||
#include "src/Core/arch/MSA/MathFunctions.h"
|
||||
#include "src/Core/arch/MSA/Complex.h"
|
||||
#elif defined EIGEN_VECTORIZE_HVX
|
||||
#include "src/Core/arch/HVX/PacketMath.h"
|
||||
#endif
|
||||
|
||||
#if defined EIGEN_VECTORIZE_GPU
|
||||
#include "src/Core/arch/GPU/PacketMath.h"
|
||||
#include "src/Core/arch/GPU/MathFunctions.h"
|
||||
#include "src/Core/arch/GPU/TypeCasting.h"
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_USE_SYCL)
|
||||
#include "src/Core/arch/SYCL/InteropHeaders.h"
|
||||
#if !defined(EIGEN_DONT_VECTORIZE_SYCL)
|
||||
#include "src/Core/arch/SYCL/PacketMath.h"
|
||||
#include "src/Core/arch/SYCL/MathFunctions.h"
|
||||
#include "src/Core/arch/SYCL/TypeCasting.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include "src/Core/arch/Default/Settings.h"
|
||||
// This file provides generic implementations valid for scalar as well
|
||||
#include "src/Core/arch/Default/GenericPacketMathFunctions.h"
|
||||
|
||||
#include "src/Core/functors/TernaryFunctors.h"
|
||||
#include "src/Core/functors/BinaryFunctors.h"
|
||||
#include "src/Core/functors/UnaryFunctors.h"
|
||||
#include "src/Core/functors/NullaryFunctors.h"
|
||||
#include "src/Core/functors/StlFunctors.h"
|
||||
#include "src/Core/functors/AssignmentFunctors.h"
|
||||
|
||||
// Specialized functors for GPU.
|
||||
#ifdef EIGEN_GPUCC
|
||||
#include "src/Core/arch/GPU/Complex.h"
|
||||
#endif
|
||||
|
||||
// Specializations of vectorized activation functions for NEON.
|
||||
#ifdef EIGEN_VECTORIZE_NEON
|
||||
#include "src/Core/arch/NEON/UnaryFunctors.h"
|
||||
#endif
|
||||
|
||||
#include "src/Core/util/IndexedViewHelper.h"
|
||||
#include "src/Core/util/ReshapedHelper.h"
|
||||
#include "src/Core/ArithmeticSequence.h"
|
||||
#ifndef EIGEN_NO_IO
|
||||
#include "src/Core/IO.h"
|
||||
#endif
|
||||
#include "src/Core/DenseCoeffsBase.h"
|
||||
#include "src/Core/DenseBase.h"
|
||||
#include "src/Core/MatrixBase.h"
|
||||
#include "src/Core/EigenBase.h"
|
||||
|
||||
#include "src/Core/Product.h"
|
||||
#include "src/Core/CoreEvaluators.h"
|
||||
#include "src/Core/AssignEvaluator.h"
|
||||
#include "src/Core/RealView.h"
|
||||
#include "src/Core/Assign.h"
|
||||
|
||||
#include "src/Core/ArrayBase.h"
|
||||
#include "src/Core/util/BlasUtil.h"
|
||||
#include "src/Core/DenseStorage.h"
|
||||
#include "src/Core/NestByValue.h"
|
||||
|
||||
// #include "src/Core/ForceAlignedAccess.h"
|
||||
|
||||
#include "src/Core/ReturnByValue.h"
|
||||
#include "src/Core/NoAlias.h"
|
||||
#include "src/Core/PlainObjectBase.h"
|
||||
#include "src/Core/Matrix.h"
|
||||
#include "src/Core/Array.h"
|
||||
#include "src/Core/Fill.h"
|
||||
#include "src/Core/CwiseTernaryOp.h"
|
||||
#include "src/Core/CwiseBinaryOp.h"
|
||||
#include "src/Core/CwiseUnaryOp.h"
|
||||
#include "src/Core/CwiseNullaryOp.h"
|
||||
#include "src/Core/CwiseUnaryView.h"
|
||||
#include "src/Core/SelfCwiseBinaryOp.h"
|
||||
#include "src/Core/InnerProduct.h"
|
||||
#include "src/Core/Dot.h"
|
||||
#include "src/Core/StableNorm.h"
|
||||
#include "src/Core/Stride.h"
|
||||
#include "src/Core/MapBase.h"
|
||||
#include "src/Core/Map.h"
|
||||
#include "src/Core/Ref.h"
|
||||
#include "src/Core/Block.h"
|
||||
#include "src/Core/VectorBlock.h"
|
||||
#include "src/Core/IndexedView.h"
|
||||
#include "src/Core/Reshaped.h"
|
||||
#include "src/Core/Transpose.h"
|
||||
#include "src/Core/DiagonalMatrix.h"
|
||||
#include "src/Core/Diagonal.h"
|
||||
#include "src/Core/DiagonalProduct.h"
|
||||
#include "src/Core/SkewSymmetricMatrix3.h"
|
||||
#include "src/Core/Redux.h"
|
||||
#include "src/Core/Visitor.h"
|
||||
#include "src/Core/FindCoeff.h"
|
||||
#include "src/Core/Fuzzy.h"
|
||||
#include "src/Core/Swap.h"
|
||||
#include "src/Core/CommaInitializer.h"
|
||||
#include "src/Core/GeneralProduct.h"
|
||||
#include "src/Core/Solve.h"
|
||||
#include "src/Core/Inverse.h"
|
||||
#include "src/Core/SolverBase.h"
|
||||
#include "src/Core/PermutationMatrix.h"
|
||||
#include "src/Core/Transpositions.h"
|
||||
#include "src/Core/TriangularMatrix.h"
|
||||
#include "src/Core/SelfAdjointView.h"
|
||||
#include "src/Core/products/GeneralBlockPanelKernel.h"
|
||||
#include "src/Core/DeviceWrapper.h"
|
||||
#ifdef EIGEN_GEMM_THREADPOOL
|
||||
#include "ThreadPool"
|
||||
#endif
|
||||
#include "src/Core/products/Parallelizer.h"
|
||||
#include "src/Core/ProductEvaluators.h"
|
||||
#include "src/Core/products/GeneralMatrixVector.h"
|
||||
#include "src/Core/products/GeneralMatrixMatrix.h"
|
||||
#include "src/Core/SolveTriangular.h"
|
||||
#include "src/Core/products/GeneralMatrixMatrixTriangular.h"
|
||||
#include "src/Core/products/SelfadjointMatrixVector.h"
|
||||
#include "src/Core/products/SelfadjointMatrixMatrix.h"
|
||||
#include "src/Core/products/SelfadjointProduct.h"
|
||||
#include "src/Core/products/SelfadjointRank2Update.h"
|
||||
#include "src/Core/products/TriangularMatrixVector.h"
|
||||
#include "src/Core/products/TriangularMatrixMatrix.h"
|
||||
#include "src/Core/products/TriangularSolverMatrix.h"
|
||||
#include "src/Core/products/TriangularSolverVector.h"
|
||||
#include "src/Core/BandMatrix.h"
|
||||
#include "src/Core/CoreIterators.h"
|
||||
#include "src/Core/ConditionEstimator.h"
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_VSX)
|
||||
#include "src/Core/arch/AltiVec/MatrixProduct.h"
|
||||
#elif defined EIGEN_VECTORIZE_NEON
|
||||
#include "src/Core/arch/NEON/GeneralBlockPanelKernel.h"
|
||||
#elif defined EIGEN_VECTORIZE_LSX
|
||||
#include "src/Core/arch/LSX/GeneralBlockPanelKernel.h"
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_AVX512)
|
||||
#include "src/Core/arch/AVX512/GemmKernel.h"
|
||||
#endif
|
||||
|
||||
#include "src/Core/Select.h"
|
||||
#include "src/Core/VectorwiseOp.h"
|
||||
#include "src/Core/PartialReduxEvaluator.h"
|
||||
#include "src/Core/Random.h"
|
||||
#include "src/Core/Replicate.h"
|
||||
#include "src/Core/Reverse.h"
|
||||
#include "src/Core/ArrayWrapper.h"
|
||||
#include "src/Core/StlIterators.h"
|
||||
|
||||
#ifdef EIGEN_USE_BLAS
|
||||
#include "src/Core/products/GeneralMatrixMatrix_BLAS.h"
|
||||
#include "src/Core/products/GeneralMatrixVector_BLAS.h"
|
||||
#include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h"
|
||||
#include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h"
|
||||
#include "src/Core/products/SelfadjointMatrixVector_BLAS.h"
|
||||
#include "src/Core/products/TriangularMatrixMatrix_BLAS.h"
|
||||
#include "src/Core/products/TriangularMatrixVector_BLAS.h"
|
||||
#include "src/Core/products/TriangularSolverMatrix_BLAS.h"
|
||||
#endif // EIGEN_USE_BLAS
|
||||
|
||||
#ifdef EIGEN_USE_MKL_VML
|
||||
#include "src/Core/Assign_MKL.h"
|
||||
#endif
|
||||
|
||||
#include "src/Core/GlobalFunctions.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_CORE_MODULE_H
|
||||
7
2025.09.22_cpp_with_eigen_package/Eigen/Dense
Normal file
7
2025.09.22_cpp_with_eigen_package/Eigen/Dense
Normal file
@@ -0,0 +1,7 @@
|
||||
#include "Core"
|
||||
#include "LU"
|
||||
#include "Cholesky"
|
||||
#include "QR"
|
||||
#include "SVD"
|
||||
#include "Geometry"
|
||||
#include "Eigenvalues"
|
||||
2
2025.09.22_cpp_with_eigen_package/Eigen/Eigen
Normal file
2
2025.09.22_cpp_with_eigen_package/Eigen/Eigen
Normal file
@@ -0,0 +1,2 @@
|
||||
#include "Dense"
|
||||
#include "Sparse"
|
||||
63
2025.09.22_cpp_with_eigen_package/Eigen/Eigenvalues
Normal file
63
2025.09.22_cpp_with_eigen_package/Eigen/Eigenvalues
Normal file
@@ -0,0 +1,63 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_EIGENVALUES_MODULE_H
|
||||
#define EIGEN_EIGENVALUES_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "Cholesky"
|
||||
#include "Jacobi"
|
||||
#include "Householder"
|
||||
#include "LU"
|
||||
#include "Geometry"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup Eigenvalues_Module Eigenvalues module
|
||||
*
|
||||
*
|
||||
*
|
||||
* This module mainly provides various eigenvalue solvers.
|
||||
* This module also provides some MatrixBase methods, including:
|
||||
* - MatrixBase::eigenvalues(),
|
||||
* - MatrixBase::operatorNorm()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Eigenvalues>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#include "src/misc/RealSvd2x2.h"
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Eigenvalues/Tridiagonalization.h"
|
||||
#include "src/Eigenvalues/RealSchur.h"
|
||||
#include "src/Eigenvalues/EigenSolver.h"
|
||||
#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
|
||||
#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
|
||||
#include "src/Eigenvalues/HessenbergDecomposition.h"
|
||||
#include "src/Eigenvalues/ComplexSchur.h"
|
||||
#include "src/Eigenvalues/ComplexEigenSolver.h"
|
||||
#include "src/Eigenvalues/RealQZ.h"
|
||||
#include "src/Eigenvalues/GeneralizedEigenSolver.h"
|
||||
#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#ifdef EIGEN_USE_MKL
|
||||
#include "mkl_lapacke.h"
|
||||
#else
|
||||
#include "src/misc/lapacke.h"
|
||||
#endif
|
||||
#include "src/Eigenvalues/RealSchur_LAPACKE.h"
|
||||
#include "src/Eigenvalues/ComplexSchur_LAPACKE.h"
|
||||
#include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_EIGENVALUES_MODULE_H
|
||||
59
2025.09.22_cpp_with_eigen_package/Eigen/Geometry
Normal file
59
2025.09.22_cpp_with_eigen_package/Eigen/Geometry
Normal file
@@ -0,0 +1,59 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_GEOMETRY_MODULE_H
|
||||
#define EIGEN_GEOMETRY_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "SVD"
|
||||
#include "LU"
|
||||
#include <limits>
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup Geometry_Module Geometry module
|
||||
*
|
||||
* This module provides support for:
|
||||
* - fixed-size homogeneous transformations
|
||||
* - translation, scaling, 2D and 3D rotations
|
||||
* - \link Quaternion quaternions \endlink
|
||||
* - cross products (\ref MatrixBase::cross(), \ref MatrixBase::cross3())
|
||||
* - orthogonal vector generation (MatrixBase::unitOrthogonal)
|
||||
* - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes \endlink
|
||||
* - \link AlignedBox axis aligned bounding boxes \endlink
|
||||
* - \link umeyama() least-square transformation fitting \endlink
|
||||
* \code
|
||||
* #include <Eigen/Geometry>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Geometry/OrthoMethods.h"
|
||||
#include "src/Geometry/EulerAngles.h"
|
||||
#include "src/Geometry/Homogeneous.h"
|
||||
#include "src/Geometry/RotationBase.h"
|
||||
#include "src/Geometry/Rotation2D.h"
|
||||
#include "src/Geometry/Quaternion.h"
|
||||
#include "src/Geometry/AngleAxis.h"
|
||||
#include "src/Geometry/Transform.h"
|
||||
#include "src/Geometry/Translation.h"
|
||||
#include "src/Geometry/Scaling.h"
|
||||
#include "src/Geometry/Hyperplane.h"
|
||||
#include "src/Geometry/ParametrizedLine.h"
|
||||
#include "src/Geometry/AlignedBox.h"
|
||||
#include "src/Geometry/Umeyama.h"
|
||||
|
||||
// Use the SSE optimized version whenever possible.
|
||||
#if (defined EIGEN_VECTORIZE_SSE) || (defined EIGEN_VECTORIZE_NEON)
|
||||
#include "src/Geometry/arch/Geometry_SIMD.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_GEOMETRY_MODULE_H
|
||||
31
2025.09.22_cpp_with_eigen_package/Eigen/Householder
Normal file
31
2025.09.22_cpp_with_eigen_package/Eigen/Householder
Normal file
@@ -0,0 +1,31 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_HOUSEHOLDER_MODULE_H
|
||||
#define EIGEN_HOUSEHOLDER_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup Householder_Module Householder module
|
||||
* This module provides Householder transformations.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Householder>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Householder/Householder.h"
|
||||
#include "src/Householder/HouseholderSequence.h"
|
||||
#include "src/Householder/BlockHouseholder.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_HOUSEHOLDER_MODULE_H
|
||||
@@ -0,0 +1,52 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
|
||||
#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
#include "OrderingMethods"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/**
|
||||
* \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
|
||||
*
|
||||
* This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a
|
||||
squared matrix, usually very large and sparse.
|
||||
* Those solvers are accessible via the following classes:
|
||||
* - ConjugateGradient for selfadjoint (hermitian) matrices,
|
||||
* - LeastSquaresConjugateGradient for rectangular least-square problems,
|
||||
* - BiCGSTAB for general square matrices.
|
||||
*
|
||||
* These iterative solvers are associated with some preconditioners:
|
||||
* - IdentityPreconditioner - not really useful
|
||||
* - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices.
|
||||
* - IncompleteLUT - incomplete LU factorization with dual thresholding
|
||||
*
|
||||
* Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport,
|
||||
UmfPackSupport, SuperLUSupport, AccelerateSupport.
|
||||
*
|
||||
\code
|
||||
#include <Eigen/IterativeLinearSolvers>
|
||||
\endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/IterativeLinearSolvers/SolveWithGuess.h"
|
||||
#include "src/IterativeLinearSolvers/IterativeSolverBase.h"
|
||||
#include "src/IterativeLinearSolvers/BasicPreconditioners.h"
|
||||
#include "src/IterativeLinearSolvers/ConjugateGradient.h"
|
||||
#include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h"
|
||||
#include "src/IterativeLinearSolvers/BiCGSTAB.h"
|
||||
#include "src/IterativeLinearSolvers/IncompleteLUT.h"
|
||||
#include "src/IterativeLinearSolvers/IncompleteCholesky.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
|
||||
33
2025.09.22_cpp_with_eigen_package/Eigen/Jacobi
Normal file
33
2025.09.22_cpp_with_eigen_package/Eigen/Jacobi
Normal file
@@ -0,0 +1,33 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_JACOBI_MODULE_H
|
||||
#define EIGEN_JACOBI_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup Jacobi_Module Jacobi module
|
||||
* This module provides Jacobi and Givens rotations.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Jacobi>
|
||||
* \endcode
|
||||
*
|
||||
* In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation:
|
||||
* - MatrixBase::applyOnTheLeft()
|
||||
* - MatrixBase::applyOnTheRight().
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/Jacobi/Jacobi.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_JACOBI_MODULE_H
|
||||
43
2025.09.22_cpp_with_eigen_package/Eigen/KLUSupport
Normal file
43
2025.09.22_cpp_with_eigen_package/Eigen/KLUSupport
Normal file
@@ -0,0 +1,43 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_KLUSUPPORT_MODULE_H
|
||||
#define EIGEN_KLUSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
extern "C" {
|
||||
#include <btf.h>
|
||||
#include <klu.h>
|
||||
}
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup KLUSupport_Module KLUSupport module
|
||||
*
|
||||
* This module provides an interface to the KLU library which is part of the <a
|
||||
* href="http://www.suitesparse.com">suitesparse</a> package. It provides the following factorization class:
|
||||
* - class KLU: a sparse LU factorization, well-suited for circuit simulation.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/KLUSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must
|
||||
* be linked to the klu library and its dependencies. The dependencies depend on how umfpack has been compiled. For a
|
||||
* cmake based project, you can use our FindKLU.cmake module to help you in this task.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/KLUSupport/KLUSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_KLUSUPPORT_MODULE_H
|
||||
46
2025.09.22_cpp_with_eigen_package/Eigen/LU
Normal file
46
2025.09.22_cpp_with_eigen_package/Eigen/LU
Normal file
@@ -0,0 +1,46 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_LU_MODULE_H
|
||||
#define EIGEN_LU_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup LU_Module LU module
|
||||
* This module includes %LU decomposition and related notions such as matrix inversion and determinant.
|
||||
* This module defines the following MatrixBase methods:
|
||||
* - MatrixBase::inverse()
|
||||
* - MatrixBase::determinant()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/LU>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#include "src/misc/Kernel.h"
|
||||
#include "src/misc/Image.h"
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/LU/FullPivLU.h"
|
||||
#include "src/LU/PartialPivLU.h"
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#include "src/misc/lapacke_helpers.h"
|
||||
#include "src/LU/PartialPivLU_LAPACKE.h"
|
||||
#endif
|
||||
#include "src/LU/Determinant.h"
|
||||
#include "src/LU/InverseImpl.h"
|
||||
|
||||
#if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_NEON
|
||||
#include "src/LU/arch/InverseSize4.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_LU_MODULE_H
|
||||
35
2025.09.22_cpp_with_eigen_package/Eigen/MetisSupport
Normal file
35
2025.09.22_cpp_with_eigen_package/Eigen/MetisSupport
Normal file
@@ -0,0 +1,35 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_METISSUPPORT_MODULE_H
|
||||
#define EIGEN_METISSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
extern "C" {
|
||||
#include <metis.h>
|
||||
}
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup MetisSupport_Module MetisSupport module
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/MetisSupport>
|
||||
* \endcode
|
||||
* This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis).
|
||||
* It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/MetisSupport/MetisSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_METISSUPPORT_MODULE_H
|
||||
73
2025.09.22_cpp_with_eigen_package/Eigen/OrderingMethods
Normal file
73
2025.09.22_cpp_with_eigen_package/Eigen/OrderingMethods
Normal file
@@ -0,0 +1,73 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ORDERINGMETHODS_MODULE_H
|
||||
#define EIGEN_ORDERINGMETHODS_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/**
|
||||
* \defgroup OrderingMethods_Module OrderingMethods module
|
||||
*
|
||||
* This module is currently for internal use only
|
||||
*
|
||||
* It defines various built-in and external ordering methods for sparse matrices.
|
||||
* They are typically used to reduce the number of elements during
|
||||
* the sparse matrix decomposition (LLT, LU, QR).
|
||||
* Precisely, in a preprocessing step, a permutation matrix P is computed using
|
||||
* those ordering methods and applied to the columns of the matrix.
|
||||
* Using for instance the sparse Cholesky decomposition, it is expected that
|
||||
* the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A).
|
||||
*
|
||||
*
|
||||
* Usage :
|
||||
* \code
|
||||
* #include <Eigen/OrderingMethods>
|
||||
* \endcode
|
||||
*
|
||||
* A simple usage is as a template parameter in the sparse decomposition classes :
|
||||
*
|
||||
* \code
|
||||
* SparseLU<MatrixType, COLAMDOrdering<int> > solver;
|
||||
* \endcode
|
||||
*
|
||||
* \code
|
||||
* SparseQR<MatrixType, COLAMDOrdering<int> > solver;
|
||||
* \endcode
|
||||
*
|
||||
* It is possible as well to call directly a particular ordering method for your own purpose,
|
||||
* \code
|
||||
* AMDOrdering<int> ordering;
|
||||
* PermutationMatrix<Dynamic, Dynamic, int> perm;
|
||||
* SparseMatrix<double> A;
|
||||
* //Fill the matrix ...
|
||||
*
|
||||
* ordering(A, perm); // Call AMD
|
||||
* \endcode
|
||||
*
|
||||
* \note Some of these methods (like AMD or METIS), need the sparsity pattern
|
||||
* of the input matrix to be symmetric. When the matrix is structurally unsymmetric,
|
||||
* Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method.
|
||||
* If your matrix is already symmetric (at least in structure), you can avoid that
|
||||
* by calling the method with a SelfAdjointView type.
|
||||
*
|
||||
* \code
|
||||
* // Call the ordering on the pattern of the lower triangular matrix A
|
||||
* ordering(A.selfadjointView<Lower>(), perm);
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/OrderingMethods/Amd.h"
|
||||
#include "src/OrderingMethods/Ordering.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_ORDERINGMETHODS_MODULE_H
|
||||
51
2025.09.22_cpp_with_eigen_package/Eigen/PaStiXSupport
Normal file
51
2025.09.22_cpp_with_eigen_package/Eigen/PaStiXSupport
Normal file
@@ -0,0 +1,51 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_PASTIXSUPPORT_MODULE_H
|
||||
#define EIGEN_PASTIXSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
extern "C" {
|
||||
#include <pastix_nompi.h>
|
||||
#include <pastix.h>
|
||||
}
|
||||
|
||||
#ifdef complex
|
||||
#undef complex
|
||||
#endif
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup PaStiXSupport_Module PaStiXSupport module
|
||||
*
|
||||
* This module provides an interface to the <a href="http://pastix.gforge.inria.fr/">PaSTiX</a> library.
|
||||
* PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver.
|
||||
* It provides the two following main factorization classes:
|
||||
* - class PastixLLT : a supernodal, parallel LLt Cholesky factorization.
|
||||
* - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization.
|
||||
* - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern).
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/PaStiXSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the PaSTiX library and its dependencies. This wrapper resuires PaStiX version 5.x compiled without MPI
|
||||
* support. The dependencies depend on how PaSTiX has been compiled. For a cmake based project, you can use our
|
||||
* FindPaSTiX.cmake module to help you in this task.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/PaStiXSupport/PaStiXSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_PASTIXSUPPORT_MODULE_H
|
||||
38
2025.09.22_cpp_with_eigen_package/Eigen/PardisoSupport
Normal file
38
2025.09.22_cpp_with_eigen_package/Eigen/PardisoSupport
Normal file
@@ -0,0 +1,38 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_PARDISOSUPPORT_MODULE_H
|
||||
#define EIGEN_PARDISOSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
#include <mkl_pardiso.h>
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup PardisoSupport_Module PardisoSupport module
|
||||
*
|
||||
* This module brings support for the Intel(R) MKL PARDISO direct sparse solvers.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/PardisoSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the MKL library and its dependencies. See this \ref TopicUsingIntelMKL "page" for more information on
|
||||
* MKL-Eigen integration.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/PardisoSupport/PardisoSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_PARDISOSUPPORT_MODULE_H
|
||||
48
2025.09.22_cpp_with_eigen_package/Eigen/QR
Normal file
48
2025.09.22_cpp_with_eigen_package/Eigen/QR
Normal file
@@ -0,0 +1,48 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_QR_MODULE_H
|
||||
#define EIGEN_QR_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "Cholesky"
|
||||
#include "Jacobi"
|
||||
#include "Householder"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup QR_Module QR module
|
||||
*
|
||||
*
|
||||
*
|
||||
* This module provides various QR decompositions
|
||||
* This module also provides some MatrixBase methods, including:
|
||||
* - MatrixBase::householderQr()
|
||||
* - MatrixBase::colPivHouseholderQr()
|
||||
* - MatrixBase::fullPivHouseholderQr()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/QR>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/QR/HouseholderQR.h"
|
||||
#include "src/QR/FullPivHouseholderQR.h"
|
||||
#include "src/QR/ColPivHouseholderQR.h"
|
||||
#include "src/QR/CompleteOrthogonalDecomposition.h"
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#include "src/misc/lapacke_helpers.h"
|
||||
#include "src/QR/HouseholderQR_LAPACKE.h"
|
||||
#include "src/QR/ColPivHouseholderQR_LAPACKE.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_QR_MODULE_H
|
||||
32
2025.09.22_cpp_with_eigen_package/Eigen/QtAlignedMalloc
Normal file
32
2025.09.22_cpp_with_eigen_package/Eigen/QtAlignedMalloc
Normal file
@@ -0,0 +1,32 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_QTMALLOC_MODULE_H
|
||||
#define EIGEN_QTMALLOC_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#if (!EIGEN_MALLOC_ALREADY_ALIGNED)
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
void *qMalloc(std::size_t size) { return Eigen::internal::aligned_malloc(size); }
|
||||
|
||||
void qFree(void *ptr) { Eigen::internal::aligned_free(ptr); }
|
||||
|
||||
void *qRealloc(void *ptr, std::size_t size) {
|
||||
void *newPtr = Eigen::internal::aligned_malloc(size);
|
||||
std::memcpy(newPtr, ptr, size);
|
||||
Eigen::internal::aligned_free(ptr);
|
||||
return newPtr;
|
||||
}
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_QTMALLOC_MODULE_H
|
||||
41
2025.09.22_cpp_with_eigen_package/Eigen/SPQRSupport
Normal file
41
2025.09.22_cpp_with_eigen_package/Eigen/SPQRSupport
Normal file
@@ -0,0 +1,41 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPQRSUPPORT_MODULE_H
|
||||
#define EIGEN_SPQRSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
#include "SuiteSparseQR.hpp"
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup SPQRSupport_Module SuiteSparseQR module
|
||||
*
|
||||
* This module provides an interface to the SPQR library, which is part of the <a
|
||||
* href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SPQRSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...). For a cmake based project, you can use
|
||||
* our FindSPQR.cmake and FindCholmod.Cmake modules
|
||||
*
|
||||
*/
|
||||
|
||||
#include "CholmodSupport"
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SPQRSupport/SuiteSparseQRSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif
|
||||
56
2025.09.22_cpp_with_eigen_package/Eigen/SVD
Normal file
56
2025.09.22_cpp_with_eigen_package/Eigen/SVD
Normal file
@@ -0,0 +1,56 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SVD_MODULE_H
|
||||
#define EIGEN_SVD_MODULE_H
|
||||
|
||||
#include "QR"
|
||||
#include "Householder"
|
||||
#include "Jacobi"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup SVD_Module SVD module
|
||||
*
|
||||
*
|
||||
*
|
||||
* This module provides SVD decomposition for matrices (both real and complex).
|
||||
* Two decomposition algorithms are provided:
|
||||
* - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very
|
||||
* slow for larger ones.
|
||||
* - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast
|
||||
* for large problems. These decompositions are accessible via the respective classes and following MatrixBase methods:
|
||||
* - MatrixBase::jacobiSvd()
|
||||
* - MatrixBase::bdcSvd()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SVD>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/misc/RealSvd2x2.h"
|
||||
#include "src/SVD/UpperBidiagonalization.h"
|
||||
#include "src/SVD/SVDBase.h"
|
||||
#include "src/SVD/JacobiSVD.h"
|
||||
#include "src/SVD/BDCSVD.h"
|
||||
#ifdef EIGEN_USE_LAPACKE
|
||||
#ifdef EIGEN_USE_MKL
|
||||
#include "mkl_lapacke.h"
|
||||
#else
|
||||
#include "src/misc/lapacke.h"
|
||||
#endif
|
||||
#ifndef EIGEN_USE_LAPACKE_STRICT
|
||||
#include "src/SVD/JacobiSVD_LAPACKE.h"
|
||||
#endif
|
||||
#include "src/SVD/BDCSVD_LAPACKE.h"
|
||||
#endif
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_SVD_MODULE_H
|
||||
33
2025.09.22_cpp_with_eigen_package/Eigen/Sparse
Normal file
33
2025.09.22_cpp_with_eigen_package/Eigen/Sparse
Normal file
@@ -0,0 +1,33 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_MODULE_H
|
||||
#define EIGEN_SPARSE_MODULE_H
|
||||
|
||||
/** \defgroup Sparse_Module Sparse meta-module
|
||||
*
|
||||
* Meta-module including all related modules:
|
||||
* - \ref SparseCore_Module
|
||||
* - \ref OrderingMethods_Module
|
||||
* - \ref SparseCholesky_Module
|
||||
* - \ref SparseLU_Module
|
||||
* - \ref SparseQR_Module
|
||||
* - \ref IterativeLinearSolvers_Module
|
||||
*
|
||||
\code
|
||||
#include <Eigen/Sparse>
|
||||
\endcode
|
||||
*/
|
||||
|
||||
#include "SparseCore"
|
||||
#include "OrderingMethods"
|
||||
#include "SparseCholesky"
|
||||
#include "SparseLU"
|
||||
#include "SparseQR"
|
||||
#include "IterativeLinearSolvers"
|
||||
|
||||
#endif // EIGEN_SPARSE_MODULE_H
|
||||
40
2025.09.22_cpp_with_eigen_package/Eigen/SparseCholesky
Normal file
40
2025.09.22_cpp_with_eigen_package/Eigen/SparseCholesky
Normal file
@@ -0,0 +1,40 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSECHOLESKY_MODULE_H
|
||||
#define EIGEN_SPARSECHOLESKY_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
#include "OrderingMethods"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/**
|
||||
* \defgroup SparseCholesky_Module SparseCholesky module
|
||||
*
|
||||
* This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian)
|
||||
* matrices. Those decompositions are accessible via the following classes:
|
||||
* - SimplicialLLt,
|
||||
* - SimplicialLDLt
|
||||
*
|
||||
* Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SparseCholesky>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SparseCholesky/SimplicialCholesky.h"
|
||||
#include "src/SparseCholesky/SimplicialCholesky_impl.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_SPARSECHOLESKY_MODULE_H
|
||||
70
2025.09.22_cpp_with_eigen_package/Eigen/SparseCore
Normal file
70
2025.09.22_cpp_with_eigen_package/Eigen/SparseCore
Normal file
@@ -0,0 +1,70 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSECORE_MODULE_H
|
||||
#define EIGEN_SPARSECORE_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
|
||||
/**
|
||||
* \defgroup SparseCore_Module SparseCore module
|
||||
*
|
||||
* This module provides a sparse matrix representation, and basic associated matrix manipulations
|
||||
* and operations.
|
||||
*
|
||||
* See the \ref TutorialSparse "Sparse tutorial"
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SparseCore>
|
||||
* \endcode
|
||||
*
|
||||
* This module depends on: Core.
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SparseCore/SparseUtil.h"
|
||||
#include "src/SparseCore/SparseMatrixBase.h"
|
||||
#include "src/SparseCore/SparseAssign.h"
|
||||
#include "src/SparseCore/CompressedStorage.h"
|
||||
#include "src/SparseCore/AmbiVector.h"
|
||||
#include "src/SparseCore/SparseCompressedBase.h"
|
||||
#include "src/SparseCore/SparseMatrix.h"
|
||||
#include "src/SparseCore/SparseMap.h"
|
||||
#include "src/SparseCore/SparseVector.h"
|
||||
#include "src/SparseCore/SparseRef.h"
|
||||
#include "src/SparseCore/SparseCwiseUnaryOp.h"
|
||||
#include "src/SparseCore/SparseCwiseBinaryOp.h"
|
||||
#include "src/SparseCore/SparseTranspose.h"
|
||||
#include "src/SparseCore/SparseBlock.h"
|
||||
#include "src/SparseCore/SparseDot.h"
|
||||
#include "src/SparseCore/SparseRedux.h"
|
||||
#include "src/SparseCore/SparseView.h"
|
||||
#include "src/SparseCore/SparseDiagonalProduct.h"
|
||||
#include "src/SparseCore/ConservativeSparseSparseProduct.h"
|
||||
#include "src/SparseCore/SparseSparseProductWithPruning.h"
|
||||
#include "src/SparseCore/SparseProduct.h"
|
||||
#include "src/SparseCore/SparseDenseProduct.h"
|
||||
#include "src/SparseCore/SparseSelfAdjointView.h"
|
||||
#include "src/SparseCore/SparseTriangularView.h"
|
||||
#include "src/SparseCore/TriangularSolver.h"
|
||||
#include "src/SparseCore/SparsePermutation.h"
|
||||
#include "src/SparseCore/SparseFuzzy.h"
|
||||
#include "src/SparseCore/SparseSolverBase.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_SPARSECORE_MODULE_H
|
||||
50
2025.09.22_cpp_with_eigen_package/Eigen/SparseLU
Normal file
50
2025.09.22_cpp_with_eigen_package/Eigen/SparseLU
Normal file
@@ -0,0 +1,50 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
||||
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSELU_MODULE_H
|
||||
#define EIGEN_SPARSELU_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
/**
|
||||
* \defgroup SparseLU_Module SparseLU module
|
||||
* This module defines a supernodal factorization of general sparse matrices.
|
||||
* The code is fully optimized for supernode-panel updates with specialized kernels.
|
||||
* Please, see the documentation of the SparseLU class for more details.
|
||||
*/
|
||||
|
||||
// Ordering interface
|
||||
#include "OrderingMethods"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SparseLU/SparseLU_Structs.h"
|
||||
#include "src/SparseLU/SparseLU_SupernodalMatrix.h"
|
||||
#include "src/SparseLU/SparseLUImpl.h"
|
||||
#include "src/SparseCore/SparseColEtree.h"
|
||||
#include "src/SparseLU/SparseLU_Memory.h"
|
||||
#include "src/SparseLU/SparseLU_heap_relax_snode.h"
|
||||
#include "src/SparseLU/SparseLU_relax_snode.h"
|
||||
#include "src/SparseLU/SparseLU_pivotL.h"
|
||||
#include "src/SparseLU/SparseLU_panel_dfs.h"
|
||||
#include "src/SparseLU/SparseLU_kernel_bmod.h"
|
||||
#include "src/SparseLU/SparseLU_panel_bmod.h"
|
||||
#include "src/SparseLU/SparseLU_column_dfs.h"
|
||||
#include "src/SparseLU/SparseLU_column_bmod.h"
|
||||
#include "src/SparseLU/SparseLU_copy_to_ucol.h"
|
||||
#include "src/SparseLU/SparseLU_pruneL.h"
|
||||
#include "src/SparseLU/SparseLU_Utils.h"
|
||||
#include "src/SparseLU/SparseLU.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_SPARSELU_MODULE_H
|
||||
38
2025.09.22_cpp_with_eigen_package/Eigen/SparseQR
Normal file
38
2025.09.22_cpp_with_eigen_package/Eigen/SparseQR
Normal file
@@ -0,0 +1,38 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEQR_MODULE_H
|
||||
#define EIGEN_SPARSEQR_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
#include "OrderingMethods"
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup SparseQR_Module SparseQR module
|
||||
* \brief Provides QR decomposition for sparse matrices
|
||||
*
|
||||
* This module provides a simplicial version of the left-looking Sparse QR decomposition.
|
||||
* The columns of the input matrix should be reordered to limit the fill-in during the
|
||||
* decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end.
|
||||
* See the \link OrderingMethods_Module OrderingMethods\endlink module for the list
|
||||
* of built-in and external ordering methods.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SparseQR>
|
||||
* \endcode
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SparseCore/SparseColEtree.h"
|
||||
#include "src/SparseQR/SparseQR.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif
|
||||
30
2025.09.22_cpp_with_eigen_package/Eigen/StdDeque
Normal file
30
2025.09.22_cpp_with_eigen_package/Eigen/StdDeque
Normal file
@@ -0,0 +1,30 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_STDDEQUE_MODULE_H
|
||||
#define EIGEN_STDDEQUE_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
#include <deque>
|
||||
|
||||
#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \
|
||||
(EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
|
||||
|
||||
#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...)
|
||||
|
||||
#else
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/StlSupport/StdDeque.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_STDDEQUE_MODULE_H
|
||||
29
2025.09.22_cpp_with_eigen_package/Eigen/StdList
Normal file
29
2025.09.22_cpp_with_eigen_package/Eigen/StdList
Normal file
@@ -0,0 +1,29 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_STDLIST_MODULE_H
|
||||
#define EIGEN_STDLIST_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
#include <list>
|
||||
|
||||
#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \
|
||||
(EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
|
||||
|
||||
#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...)
|
||||
|
||||
#else
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/StlSupport/StdList.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_STDLIST_MODULE_H
|
||||
30
2025.09.22_cpp_with_eigen_package/Eigen/StdVector
Normal file
30
2025.09.22_cpp_with_eigen_package/Eigen/StdVector
Normal file
@@ -0,0 +1,30 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_STDVECTOR_MODULE_H
|
||||
#define EIGEN_STDVECTOR_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
#include <vector>
|
||||
|
||||
#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && \
|
||||
(EIGEN_MAX_STATIC_ALIGN_BYTES <= 16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
|
||||
|
||||
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...)
|
||||
|
||||
#else
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/StlSupport/StdVector.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_STDVECTOR_MODULE_H
|
||||
70
2025.09.22_cpp_with_eigen_package/Eigen/SuperLUSupport
Normal file
70
2025.09.22_cpp_with_eigen_package/Eigen/SuperLUSupport
Normal file
@@ -0,0 +1,70 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H
|
||||
#define EIGEN_SUPERLUSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
#ifdef EMPTY
|
||||
#define EIGEN_EMPTY_WAS_ALREADY_DEFINED
|
||||
#endif
|
||||
|
||||
typedef int int_t;
|
||||
#include <slu_Cnames.h>
|
||||
#include <supermatrix.h>
|
||||
#include <slu_util.h>
|
||||
|
||||
// slu_util.h defines a preprocessor token named EMPTY which is really polluting,
|
||||
// so we remove it in favor of a SUPERLU_EMPTY token.
|
||||
// If EMPTY was already defined then we don't undef it.
|
||||
|
||||
#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED)
|
||||
#undef EIGEN_EMPTY_WAS_ALREADY_DEFINED
|
||||
#elif defined(EMPTY)
|
||||
#undef EMPTY
|
||||
#endif
|
||||
|
||||
#define SUPERLU_EMPTY (-1)
|
||||
|
||||
namespace Eigen {
|
||||
struct SluMatrix;
|
||||
}
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup SuperLUSupport_Module SuperLUSupport module
|
||||
*
|
||||
* This module provides an interface to the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library.
|
||||
* It provides the following factorization class:
|
||||
* - class SuperLU: a supernodal sequential LU factorization.
|
||||
* - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative
|
||||
* methods).
|
||||
*
|
||||
* \warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported.
|
||||
*
|
||||
* \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined
|
||||
* because it is too polluting.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SuperLUSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the superlu library and its dependencies. The dependencies depend on how superlu has been compiled. For a
|
||||
* cmake based project, you can use our FindSuperLU.cmake module to help you in this task.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/SuperLUSupport/SuperLUSupport.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_SUPERLUSUPPORT_MODULE_H
|
||||
80
2025.09.22_cpp_with_eigen_package/Eigen/ThreadPool
Normal file
80
2025.09.22_cpp_with_eigen_package/Eigen/ThreadPool
Normal file
@@ -0,0 +1,80 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_THREADPOOL_MODULE_H
|
||||
#define EIGEN_THREADPOOL_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \defgroup ThreadPool_Module ThreadPool Module
|
||||
*
|
||||
* This module provides 2 threadpool implementations
|
||||
* - a simple reference implementation
|
||||
* - a faster non blocking implementation
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/ThreadPool>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <time.h>
|
||||
|
||||
#include <vector>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
// There are non-parenthesized calls to "max" in the <unordered_map> header,
|
||||
// which trigger a check in test/main.h causing compilation to fail.
|
||||
// We work around the check here by removing the check for max in
|
||||
// the case where we have to emulate thread_local.
|
||||
#ifdef max
|
||||
#undef max
|
||||
#endif
|
||||
#include <unordered_map>
|
||||
|
||||
#include "src/Core/util/Meta.h"
|
||||
#include "src/Core/util/MaxSizeVector.h"
|
||||
|
||||
#ifndef EIGEN_MUTEX
|
||||
#define EIGEN_MUTEX std::mutex
|
||||
#endif
|
||||
#ifndef EIGEN_MUTEX_LOCK
|
||||
#define EIGEN_MUTEX_LOCK std::unique_lock<std::mutex>
|
||||
#endif
|
||||
#ifndef EIGEN_CONDVAR
|
||||
#define EIGEN_CONDVAR std::condition_variable
|
||||
#endif
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/ThreadPool/ThreadLocal.h"
|
||||
#include "src/ThreadPool/ThreadYield.h"
|
||||
#include "src/ThreadPool/ThreadCancel.h"
|
||||
#include "src/ThreadPool/EventCount.h"
|
||||
#include "src/ThreadPool/RunQueue.h"
|
||||
#include "src/ThreadPool/ThreadPoolInterface.h"
|
||||
#include "src/ThreadPool/ThreadEnvironment.h"
|
||||
#include "src/ThreadPool/Barrier.h"
|
||||
#include "src/ThreadPool/NonBlockingThreadPool.h"
|
||||
#include "src/ThreadPool/CoreThreadPoolDevice.h"
|
||||
#include "src/ThreadPool/ForkJoin.h"
|
||||
// IWYU pragma: end_exports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_CXX11_THREADPOOL_MODULE_H
|
||||
42
2025.09.22_cpp_with_eigen_package/Eigen/UmfPackSupport
Normal file
42
2025.09.22_cpp_with_eigen_package/Eigen/UmfPackSupport
Normal file
@@ -0,0 +1,42 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H
|
||||
#define EIGEN_UMFPACKSUPPORT_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
extern "C" {
|
||||
#include <umfpack.h>
|
||||
}
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
||||
*
|
||||
* This module provides an interface to the UmfPack library which is part of the <a
|
||||
* href="http://www.suitesparse.com">suitesparse</a> package. It provides the following factorization class:
|
||||
* - class UmfPackLU: a multifrontal sequential LU factorization.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/UmfPackSupport>
|
||||
* \endcode
|
||||
*
|
||||
* In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be
|
||||
* linked to the umfpack library and its dependencies. The dependencies depend on how umfpack has been compiled. For a
|
||||
* cmake based project, you can use our FindUmfPack.cmake module to help you in this task.
|
||||
*
|
||||
*/
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/UmfPackSupport/UmfPackSupport.h"
|
||||
// IWYU pragma: endexports
|
||||
|
||||
#include "src/Core/util/ReenableStupidWarnings.h"
|
||||
|
||||
#endif // EIGEN_UMFPACKSUPPORT_MODULE_H
|
||||
14
2025.09.22_cpp_with_eigen_package/Eigen/Version
Normal file
14
2025.09.22_cpp_with_eigen_package/Eigen/Version
Normal file
@@ -0,0 +1,14 @@
|
||||
#ifndef EIGEN_VERSION_H
|
||||
#define EIGEN_VERSION_H
|
||||
|
||||
// The "WORLD" version will forever remain "3" for the "Eigen3" library.
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
// As of Eigen3 5.0.0, we have moved to Semantic Versioning (semver.org).
|
||||
#define EIGEN_MAJOR_VERSION 5
|
||||
#define EIGEN_MINOR_VERSION 0
|
||||
#define EIGEN_PATCH_VERSION 0
|
||||
#define EIGEN_PRERELEASE_VERSION "dev"
|
||||
#define EIGEN_BUILD_VERSION "master"
|
||||
#define EIGEN_VERSION_STRING "5.0.0-dev+master"
|
||||
|
||||
#endif // EIGEN_VERSION_H
|
||||
@@ -0,0 +1,423 @@
|
||||
#ifndef EIGEN_ACCELERATESUPPORT_H
|
||||
#define EIGEN_ACCELERATESUPPORT_H
|
||||
|
||||
#include <Accelerate/Accelerate.h>
|
||||
|
||||
#include <Eigen/Sparse>
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
class AccelerateImpl;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateLLT
|
||||
* \brief A direct Cholesky (LLT) factorization and solver based on Accelerate
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ additional information about the matrix structure. Default is Lower.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateLLT
|
||||
*/
|
||||
template <typename MatrixType, int UpLo = Lower>
|
||||
using AccelerateLLT = AccelerateImpl<MatrixType, UpLo | Symmetric, SparseFactorizationCholesky, true>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateLDLT
|
||||
* \brief The default Cholesky (LDLT) factorization and solver based on Accelerate
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ additional information about the matrix structure. Default is Lower.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLT
|
||||
*/
|
||||
template <typename MatrixType, int UpLo = Lower>
|
||||
using AccelerateLDLT = AccelerateImpl<MatrixType, UpLo | Symmetric, SparseFactorizationLDLT, true>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateLDLTUnpivoted
|
||||
* \brief A direct Cholesky-like LDL^T factorization and solver based on Accelerate with only 1x1 pivots and no pivoting
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ additional information about the matrix structure. Default is Lower.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTUnpivoted
|
||||
*/
|
||||
template <typename MatrixType, int UpLo = Lower>
|
||||
using AccelerateLDLTUnpivoted = AccelerateImpl<MatrixType, UpLo | Symmetric, SparseFactorizationLDLTUnpivoted, true>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateLDLTSBK
|
||||
* \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with Supernode Bunch-Kaufman and static
|
||||
* pivoting
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ additional information about the matrix structure. Default is Lower.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTSBK
|
||||
*/
|
||||
template <typename MatrixType, int UpLo = Lower>
|
||||
using AccelerateLDLTSBK = AccelerateImpl<MatrixType, UpLo | Symmetric, SparseFactorizationLDLTSBK, true>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateLDLTTPP
|
||||
* \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with full threshold partial pivoting
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ additional information about the matrix structure. Default is Lower.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTTPP
|
||||
*/
|
||||
template <typename MatrixType, int UpLo = Lower>
|
||||
using AccelerateLDLTTPP = AccelerateImpl<MatrixType, UpLo | Symmetric, SparseFactorizationLDLTTPP, true>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateQR
|
||||
* \brief A QR factorization and solver based on Accelerate
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateQR
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
using AccelerateQR = AccelerateImpl<MatrixType, 0, SparseFactorizationQR, false>;
|
||||
|
||||
/** \ingroup AccelerateSupport_Module
|
||||
* \typedef AccelerateCholeskyAtA
|
||||
* \brief A QR factorization and solver based on Accelerate without storing Q (equivalent to A^TA = R^T R)
|
||||
*
|
||||
* \warning Only single and double precision real scalar types are supported by Accelerate
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class AccelerateCholeskyAtA
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
using AccelerateCholeskyAtA = AccelerateImpl<MatrixType, 0, SparseFactorizationCholeskyAtA, false>;
|
||||
|
||||
namespace internal {
|
||||
template <typename T>
|
||||
struct AccelFactorizationDeleter {
|
||||
void operator()(T* sym) {
|
||||
if (sym) {
|
||||
SparseCleanup(*sym);
|
||||
delete sym;
|
||||
sym = nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename DenseVecT, typename DenseMatT, typename SparseMatT, typename NumFactT>
|
||||
struct SparseTypesTraitBase {
|
||||
typedef DenseVecT AccelDenseVector;
|
||||
typedef DenseMatT AccelDenseMatrix;
|
||||
typedef SparseMatT AccelSparseMatrix;
|
||||
|
||||
typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
|
||||
typedef NumFactT NumericFactorization;
|
||||
|
||||
typedef AccelFactorizationDeleter<SymbolicFactorization> SymbolicFactorizationDeleter;
|
||||
typedef AccelFactorizationDeleter<NumericFactorization> NumericFactorizationDeleter;
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct SparseTypesTrait {};
|
||||
|
||||
template <>
|
||||
struct SparseTypesTrait<double> : SparseTypesTraitBase<DenseVector_Double, DenseMatrix_Double, SparseMatrix_Double,
|
||||
SparseOpaqueFactorization_Double> {};
|
||||
|
||||
template <>
|
||||
struct SparseTypesTrait<float>
|
||||
: SparseTypesTraitBase<DenseVector_Float, DenseMatrix_Float, SparseMatrix_Float, SparseOpaqueFactorization_Float> {
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
class AccelerateImpl : public SparseSolverBase<AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_> > {
|
||||
protected:
|
||||
using Base = SparseSolverBase<AccelerateImpl>;
|
||||
using Base::derived;
|
||||
using Base::m_isInitialized;
|
||||
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic };
|
||||
enum { UpLo = UpLo_ };
|
||||
|
||||
using AccelDenseVector = typename internal::SparseTypesTrait<Scalar>::AccelDenseVector;
|
||||
using AccelDenseMatrix = typename internal::SparseTypesTrait<Scalar>::AccelDenseMatrix;
|
||||
using AccelSparseMatrix = typename internal::SparseTypesTrait<Scalar>::AccelSparseMatrix;
|
||||
using SymbolicFactorization = typename internal::SparseTypesTrait<Scalar>::SymbolicFactorization;
|
||||
using NumericFactorization = typename internal::SparseTypesTrait<Scalar>::NumericFactorization;
|
||||
using SymbolicFactorizationDeleter = typename internal::SparseTypesTrait<Scalar>::SymbolicFactorizationDeleter;
|
||||
using NumericFactorizationDeleter = typename internal::SparseTypesTrait<Scalar>::NumericFactorizationDeleter;
|
||||
|
||||
AccelerateImpl() {
|
||||
m_isInitialized = false;
|
||||
|
||||
auto check_flag_set = [](int value, int flag) { return ((value & flag) == flag); };
|
||||
|
||||
if (check_flag_set(UpLo_, Symmetric)) {
|
||||
m_sparseKind = SparseSymmetric;
|
||||
m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle;
|
||||
} else if (check_flag_set(UpLo_, UnitLower)) {
|
||||
m_sparseKind = SparseUnitTriangular;
|
||||
m_triType = SparseLowerTriangle;
|
||||
} else if (check_flag_set(UpLo_, UnitUpper)) {
|
||||
m_sparseKind = SparseUnitTriangular;
|
||||
m_triType = SparseUpperTriangle;
|
||||
} else if (check_flag_set(UpLo_, StrictlyLower)) {
|
||||
m_sparseKind = SparseTriangular;
|
||||
m_triType = SparseLowerTriangle;
|
||||
} else if (check_flag_set(UpLo_, StrictlyUpper)) {
|
||||
m_sparseKind = SparseTriangular;
|
||||
m_triType = SparseUpperTriangle;
|
||||
} else if (check_flag_set(UpLo_, Lower)) {
|
||||
m_sparseKind = SparseTriangular;
|
||||
m_triType = SparseLowerTriangle;
|
||||
} else if (check_flag_set(UpLo_, Upper)) {
|
||||
m_sparseKind = SparseTriangular;
|
||||
m_triType = SparseUpperTriangle;
|
||||
} else {
|
||||
m_sparseKind = SparseOrdinary;
|
||||
m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle;
|
||||
}
|
||||
|
||||
m_order = SparseOrderDefault;
|
||||
}
|
||||
|
||||
explicit AccelerateImpl(const MatrixType& matrix) : AccelerateImpl() { compute(matrix); }
|
||||
|
||||
~AccelerateImpl() {}
|
||||
|
||||
inline Index cols() const { return m_nCols; }
|
||||
inline Index rows() const { return m_nRows; }
|
||||
|
||||
ComputationInfo info() const {
|
||||
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
void analyzePattern(const MatrixType& matrix);
|
||||
|
||||
void factorize(const MatrixType& matrix);
|
||||
|
||||
void compute(const MatrixType& matrix);
|
||||
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_impl(const MatrixBase<Rhs>& b, MatrixBase<Dest>& dest) const;
|
||||
|
||||
/** Sets the ordering algorithm to use. */
|
||||
void setOrder(SparseOrder_t order) { m_order = order; }
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void buildAccelSparseMatrix(const SparseMatrix<T>& a, AccelSparseMatrix& A, std::vector<long>& columnStarts) {
|
||||
const Index nColumnsStarts = a.cols() + 1;
|
||||
|
||||
columnStarts.resize(nColumnsStarts);
|
||||
|
||||
for (Index i = 0; i < nColumnsStarts; i++) columnStarts[i] = a.outerIndexPtr()[i];
|
||||
|
||||
SparseAttributes_t attributes{};
|
||||
attributes.transpose = false;
|
||||
attributes.triangle = m_triType;
|
||||
attributes.kind = m_sparseKind;
|
||||
|
||||
SparseMatrixStructure structure{};
|
||||
structure.attributes = attributes;
|
||||
structure.rowCount = static_cast<int>(a.rows());
|
||||
structure.columnCount = static_cast<int>(a.cols());
|
||||
structure.blockSize = 1;
|
||||
structure.columnStarts = columnStarts.data();
|
||||
structure.rowIndices = const_cast<int*>(a.innerIndexPtr());
|
||||
|
||||
A.structure = structure;
|
||||
A.data = const_cast<T*>(a.valuePtr());
|
||||
}
|
||||
|
||||
void doAnalysis(AccelSparseMatrix& A) {
|
||||
m_numericFactorization.reset(nullptr);
|
||||
|
||||
SparseSymbolicFactorOptions opts{};
|
||||
opts.control = SparseDefaultControl;
|
||||
opts.orderMethod = m_order;
|
||||
opts.order = nullptr;
|
||||
opts.ignoreRowsAndColumns = nullptr;
|
||||
opts.malloc = malloc;
|
||||
opts.free = free;
|
||||
opts.reportError = nullptr;
|
||||
|
||||
m_symbolicFactorization.reset(new SymbolicFactorization(SparseFactor(Solver_, A.structure, opts)));
|
||||
|
||||
SparseStatus_t status = m_symbolicFactorization->status;
|
||||
|
||||
updateInfoStatus(status);
|
||||
|
||||
if (status != SparseStatusOK) m_symbolicFactorization.reset(nullptr);
|
||||
}
|
||||
|
||||
void doFactorization(AccelSparseMatrix& A) {
|
||||
SparseStatus_t status = SparseStatusReleased;
|
||||
|
||||
if (m_symbolicFactorization) {
|
||||
m_numericFactorization.reset(new NumericFactorization(SparseFactor(*m_symbolicFactorization, A)));
|
||||
|
||||
status = m_numericFactorization->status;
|
||||
|
||||
if (status != SparseStatusOK) m_numericFactorization.reset(nullptr);
|
||||
}
|
||||
|
||||
updateInfoStatus(status);
|
||||
}
|
||||
|
||||
protected:
|
||||
void updateInfoStatus(SparseStatus_t status) const {
|
||||
switch (status) {
|
||||
case SparseStatusOK:
|
||||
m_info = Success;
|
||||
break;
|
||||
case SparseFactorizationFailed:
|
||||
case SparseMatrixIsSingular:
|
||||
m_info = NumericalIssue;
|
||||
break;
|
||||
case SparseInternalError:
|
||||
case SparseParameterError:
|
||||
case SparseStatusReleased:
|
||||
default:
|
||||
m_info = InvalidInput;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutable ComputationInfo m_info;
|
||||
Index m_nRows, m_nCols;
|
||||
std::unique_ptr<SymbolicFactorization, SymbolicFactorizationDeleter> m_symbolicFactorization;
|
||||
std::unique_ptr<NumericFactorization, NumericFactorizationDeleter> m_numericFactorization;
|
||||
SparseKind_t m_sparseKind;
|
||||
SparseTriangle_t m_triType;
|
||||
SparseOrder_t m_order;
|
||||
};
|
||||
|
||||
/** Computes the symbolic and numeric decomposition of matrix \a a */
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::compute(const MatrixType& a) {
|
||||
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
|
||||
|
||||
m_nRows = a.rows();
|
||||
m_nCols = a.cols();
|
||||
|
||||
AccelSparseMatrix A{};
|
||||
std::vector<long> columnStarts;
|
||||
|
||||
buildAccelSparseMatrix(a, A, columnStarts);
|
||||
|
||||
doAnalysis(A);
|
||||
|
||||
if (m_symbolicFactorization) doFactorization(A);
|
||||
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** Performs a symbolic decomposition on the sparsity pattern of matrix \a a.
|
||||
*
|
||||
* This function is particularly useful when solving for several problems having the same structure.
|
||||
*
|
||||
* \sa factorize()
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::analyzePattern(const MatrixType& a) {
|
||||
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
|
||||
|
||||
m_nRows = a.rows();
|
||||
m_nCols = a.cols();
|
||||
|
||||
AccelSparseMatrix A{};
|
||||
std::vector<long> columnStarts;
|
||||
|
||||
buildAccelSparseMatrix(a, A, columnStarts);
|
||||
|
||||
doAnalysis(A);
|
||||
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** Performs a numeric decomposition of matrix \a a.
|
||||
*
|
||||
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been
|
||||
* performed.
|
||||
*
|
||||
* \sa analyzePattern()
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::factorize(const MatrixType& a) {
|
||||
eigen_assert(m_symbolicFactorization && "You must first call analyzePattern()");
|
||||
eigen_assert(m_nRows == a.rows() && m_nCols == a.cols());
|
||||
|
||||
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
|
||||
|
||||
AccelSparseMatrix A{};
|
||||
std::vector<long> columnStarts;
|
||||
|
||||
buildAccelSparseMatrix(a, A, columnStarts);
|
||||
|
||||
doFactorization(A);
|
||||
}
|
||||
|
||||
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
|
||||
template <typename Rhs, typename Dest>
|
||||
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::_solve_impl(const MatrixBase<Rhs>& b,
|
||||
MatrixBase<Dest>& x) const {
|
||||
if (!m_numericFactorization) {
|
||||
m_info = InvalidInput;
|
||||
return;
|
||||
}
|
||||
|
||||
eigen_assert(m_nRows == b.rows());
|
||||
eigen_assert(((b.cols() == 1) || b.outerStride() == b.rows()));
|
||||
|
||||
SparseStatus_t status = SparseStatusOK;
|
||||
|
||||
Scalar* b_ptr = const_cast<Scalar*>(b.derived().data());
|
||||
Scalar* x_ptr = const_cast<Scalar*>(x.derived().data());
|
||||
|
||||
AccelDenseMatrix xmat{};
|
||||
xmat.attributes = SparseAttributes_t();
|
||||
xmat.columnCount = static_cast<int>(x.cols());
|
||||
xmat.rowCount = static_cast<int>(x.rows());
|
||||
xmat.columnStride = xmat.rowCount;
|
||||
xmat.data = x_ptr;
|
||||
|
||||
AccelDenseMatrix bmat{};
|
||||
bmat.attributes = SparseAttributes_t();
|
||||
bmat.columnCount = static_cast<int>(b.cols());
|
||||
bmat.rowCount = static_cast<int>(b.rows());
|
||||
bmat.columnStride = bmat.rowCount;
|
||||
bmat.data = b_ptr;
|
||||
|
||||
SparseSolve(*m_numericFactorization, bmat, xmat);
|
||||
|
||||
updateInfoStatus(status);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ACCELERATESUPPORT_H
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H
|
||||
#error "Please include Eigen/AccelerateSupport instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_CHOLESKY_MODULE_H
|
||||
#error "Please include Eigen/Cholesky instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
649
2025.09.22_cpp_with_eigen_package/Eigen/src/Cholesky/LDLT.h
Normal file
649
2025.09.22_cpp_with_eigen_package/Eigen/src/Cholesky/LDLT.h
Normal file
@@ -0,0 +1,649 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
|
||||
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_LDLT_H
|
||||
#define EIGEN_LDLT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
struct traits<LDLT<MatrixType_, UpLo_> > : traits<MatrixType_> {
|
||||
typedef MatrixXpr XprKind;
|
||||
typedef SolverStorage StorageKind;
|
||||
typedef int StorageIndex;
|
||||
enum { Flags = 0 };
|
||||
};
|
||||
|
||||
template <typename MatrixType, int UpLo>
|
||||
struct LDLT_Traits;
|
||||
|
||||
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
|
||||
enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
|
||||
} // namespace internal
|
||||
|
||||
/** \ingroup Cholesky_Module
|
||||
*
|
||||
* \class LDLT
|
||||
*
|
||||
* \brief Robust Cholesky decomposition of a matrix with pivoting
|
||||
*
|
||||
* \tparam MatrixType_ the type of the matrix of which to compute the LDL^T Cholesky decomposition
|
||||
* \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper.
|
||||
* The other triangular part won't be read.
|
||||
*
|
||||
* Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
|
||||
* matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L
|
||||
* is lower triangular with a unit diagonal and D is a diagonal matrix.
|
||||
*
|
||||
* The decomposition uses pivoting to ensure stability, so that D will have
|
||||
* zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
|
||||
* on D also stabilizes the computation.
|
||||
*
|
||||
* Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
|
||||
* decomposition to determine whether a system of equations has a solution.
|
||||
*
|
||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||
*
|
||||
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
class LDLT : public SolverBase<LDLT<MatrixType_, UpLo_> > {
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef SolverBase<LDLT> Base;
|
||||
friend class SolverBase<LDLT>;
|
||||
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT)
|
||||
enum {
|
||||
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
||||
UpLo = UpLo_
|
||||
};
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType;
|
||||
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
|
||||
|
||||
typedef internal::LDLT_Traits<MatrixType, UpLo> Traits;
|
||||
|
||||
/** \brief Default Constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via LDLT::compute(const MatrixType&).
|
||||
*/
|
||||
LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
*
|
||||
* Like the default constructor but with preallocation of the internal data
|
||||
* according to the specified problem \a size.
|
||||
* \sa LDLT()
|
||||
*/
|
||||
explicit LDLT(Index size)
|
||||
: m_matrix(size, size),
|
||||
m_transpositions(size),
|
||||
m_temporary(size),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false) {}
|
||||
|
||||
/** \brief Constructor with decomposition
|
||||
*
|
||||
* This calculates the decomposition for the input \a matrix.
|
||||
*
|
||||
* \sa LDLT(Index size)
|
||||
*/
|
||||
template <typename InputType>
|
||||
explicit LDLT(const EigenBase<InputType>& matrix)
|
||||
: m_matrix(matrix.rows(), matrix.cols()),
|
||||
m_transpositions(matrix.rows()),
|
||||
m_temporary(matrix.rows()),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false) {
|
||||
compute(matrix.derived());
|
||||
}
|
||||
|
||||
/** \brief Constructs a LDLT factorization from a given matrix
|
||||
*
|
||||
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c
|
||||
* MatrixType is a Eigen::Ref.
|
||||
*
|
||||
* \sa LDLT(const EigenBase&)
|
||||
*/
|
||||
template <typename InputType>
|
||||
explicit LDLT(EigenBase<InputType>& matrix)
|
||||
: m_matrix(matrix.derived()),
|
||||
m_transpositions(matrix.rows()),
|
||||
m_temporary(matrix.rows()),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false) {
|
||||
compute(matrix.derived());
|
||||
}
|
||||
|
||||
/** Clear any existing decomposition
|
||||
* \sa rankUpdate(w,sigma)
|
||||
*/
|
||||
void setZero() { m_isInitialized = false; }
|
||||
|
||||
/** \returns a view of the upper triangular matrix U */
|
||||
inline typename Traits::MatrixU matrixU() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return Traits::getU(m_matrix);
|
||||
}
|
||||
|
||||
/** \returns a view of the lower triangular matrix L */
|
||||
inline typename Traits::MatrixL matrixL() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return Traits::getL(m_matrix);
|
||||
}
|
||||
|
||||
/** \returns the permutation matrix P as a transposition sequence.
|
||||
*/
|
||||
inline const TranspositionType& transpositionsP() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_transpositions;
|
||||
}
|
||||
|
||||
/** \returns the coefficients of the diagonal matrix D */
|
||||
inline Diagonal<const MatrixType> vectorD() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_matrix.diagonal();
|
||||
}
|
||||
|
||||
/** \returns true if the matrix is positive (semidefinite) */
|
||||
inline bool isPositive() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
|
||||
}
|
||||
|
||||
/** \returns true if the matrix is negative (semidefinite) */
|
||||
inline bool isNegative(void) const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
*
|
||||
* This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
|
||||
*
|
||||
* \note_about_checking_solutions
|
||||
*
|
||||
* More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
|
||||
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
|
||||
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
|
||||
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
|
||||
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
|
||||
* computes the least-square solution of \f$ A x = b \f$ if \f$ A \f$ is singular.
|
||||
*
|
||||
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt()
|
||||
*/
|
||||
template <typename Rhs>
|
||||
inline const Solve<LDLT, Rhs> solve(const MatrixBase<Rhs>& b) const;
|
||||
#endif
|
||||
|
||||
template <typename Derived>
|
||||
bool solveInPlace(MatrixBase<Derived>& bAndX) const;
|
||||
|
||||
template <typename InputType>
|
||||
LDLT& compute(const EigenBase<InputType>& matrix);
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of
|
||||
* which \c *this is the LDLT decomposition.
|
||||
*/
|
||||
RealScalar rcond() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return internal::rcond_estimate_helper(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha = 1);
|
||||
|
||||
/** \returns the internal LDLT decomposition matrix
|
||||
*
|
||||
* TODO: document the storage layout
|
||||
*/
|
||||
inline const MatrixType& matrixLDLT() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_matrix;
|
||||
}
|
||||
|
||||
MatrixType reconstructedMatrix() const;
|
||||
|
||||
/** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix
|
||||
* is self-adjoint.
|
||||
*
|
||||
* This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
|
||||
* \code x = decomposition.adjoint().solve(b) \endcode
|
||||
*/
|
||||
const LDLT& adjoint() const { return *this; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_matrix.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_matrix.cols(); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was successful,
|
||||
* \c NumericalIssue if the factorization failed because of a zero pivot.
|
||||
*/
|
||||
ComputationInfo info() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename RhsType, typename DstType>
|
||||
void _solve_impl(const RhsType& rhs, DstType& dst) const;
|
||||
|
||||
template <bool Conjugate, typename RhsType, typename DstType>
|
||||
void _solve_impl_transposed(const RhsType& rhs, DstType& dst) const;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||
* The strict upper part is used during the decomposition, the strict lower
|
||||
* part correspond to the coefficients of L (its diagonal is equal to 1 and
|
||||
* is not stored), and the diagonal entries correspond to D.
|
||||
*/
|
||||
MatrixType m_matrix;
|
||||
RealScalar m_l1_norm;
|
||||
TranspositionType m_transpositions;
|
||||
TmpMatrixType m_temporary;
|
||||
internal::SignMatrix m_sign;
|
||||
bool m_isInitialized;
|
||||
ComputationInfo m_info;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <int UpLo>
|
||||
struct ldlt_inplace;
|
||||
|
||||
template <>
|
||||
struct ldlt_inplace<Lower> {
|
||||
template <typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) {
|
||||
using std::abs;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename TranspositionType::StorageIndex IndexType;
|
||||
eigen_assert(mat.rows() == mat.cols());
|
||||
const Index size = mat.rows();
|
||||
bool found_zero_pivot = false;
|
||||
bool ret = true;
|
||||
|
||||
if (size <= 1) {
|
||||
transpositions.setIdentity();
|
||||
if (size == 0)
|
||||
sign = ZeroSign;
|
||||
else if (numext::real(mat.coeff(0, 0)) > static_cast<RealScalar>(0))
|
||||
sign = PositiveSemiDef;
|
||||
else if (numext::real(mat.coeff(0, 0)) < static_cast<RealScalar>(0))
|
||||
sign = NegativeSemiDef;
|
||||
else
|
||||
sign = ZeroSign;
|
||||
return true;
|
||||
}
|
||||
|
||||
for (Index k = 0; k < size; ++k) {
|
||||
// Find largest diagonal element
|
||||
Index index_of_biggest_in_corner;
|
||||
mat.diagonal().tail(size - k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += k;
|
||||
|
||||
transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner);
|
||||
if (k != index_of_biggest_in_corner) {
|
||||
// apply the transposition while taking care to consider only
|
||||
// the lower triangular part
|
||||
Index s = size - index_of_biggest_in_corner - 1; // trailing size after the biggest element
|
||||
mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
|
||||
mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
|
||||
std::swap(mat.coeffRef(k, k), mat.coeffRef(index_of_biggest_in_corner, index_of_biggest_in_corner));
|
||||
for (Index i = k + 1; i < index_of_biggest_in_corner; ++i) {
|
||||
Scalar tmp = mat.coeffRef(i, k);
|
||||
mat.coeffRef(i, k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner, i));
|
||||
mat.coeffRef(index_of_biggest_in_corner, i) = numext::conj(tmp);
|
||||
}
|
||||
if (NumTraits<Scalar>::IsComplex)
|
||||
mat.coeffRef(index_of_biggest_in_corner, k) = numext::conj(mat.coeff(index_of_biggest_in_corner, k));
|
||||
}
|
||||
|
||||
// partition the matrix:
|
||||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index rs = size - k - 1;
|
||||
Block<MatrixType, Dynamic, 1> A21(mat, k + 1, k, rs, 1);
|
||||
Block<MatrixType, 1, Dynamic> A10(mat, k, 0, 1, k);
|
||||
Block<MatrixType, Dynamic, Dynamic> A20(mat, k + 1, 0, rs, k);
|
||||
|
||||
if (k > 0) {
|
||||
temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
|
||||
mat.coeffRef(k, k) -= (A10 * temp.head(k)).value();
|
||||
if (rs > 0) A21.noalias() -= A20 * temp.head(k);
|
||||
}
|
||||
|
||||
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
|
||||
// was smaller than the cutoff value. However, since LDLT is not rank-revealing
|
||||
// we should only make sure that we do not introduce INF or NaN values.
|
||||
// Remark that LAPACK also uses 0 as the cutoff value.
|
||||
RealScalar realAkk = numext::real(mat.coeffRef(k, k));
|
||||
bool pivot_is_valid = (abs(realAkk) > RealScalar(0));
|
||||
|
||||
if (k == 0 && !pivot_is_valid) {
|
||||
// The entire diagonal is zero, there is nothing more to do
|
||||
// except filling the transpositions, and checking whether the matrix is zero.
|
||||
sign = ZeroSign;
|
||||
for (Index j = 0; j < size; ++j) {
|
||||
transpositions.coeffRef(j) = IndexType(j);
|
||||
ret = ret && (mat.col(j).tail(size - j - 1).array() == Scalar(0)).all();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((rs > 0) && pivot_is_valid)
|
||||
A21 /= realAkk;
|
||||
else if (rs > 0)
|
||||
ret = ret && (A21.array() == Scalar(0)).all();
|
||||
|
||||
if (found_zero_pivot && pivot_is_valid)
|
||||
ret = false; // factorization failed
|
||||
else if (!pivot_is_valid)
|
||||
found_zero_pivot = true;
|
||||
|
||||
if (sign == PositiveSemiDef) {
|
||||
if (realAkk < static_cast<RealScalar>(0)) sign = Indefinite;
|
||||
} else if (sign == NegativeSemiDef) {
|
||||
if (realAkk > static_cast<RealScalar>(0)) sign = Indefinite;
|
||||
} else if (sign == ZeroSign) {
|
||||
if (realAkk > static_cast<RealScalar>(0))
|
||||
sign = PositiveSemiDef;
|
||||
else if (realAkk < static_cast<RealScalar>(0))
|
||||
sign = NegativeSemiDef;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Reference for the algorithm: Davis and Hager, "Multiple Rank
|
||||
// Modifications of a Sparse Cholesky Factorization" (Algorithm 1)
|
||||
// Trivial rearrangements of their computations (Timothy E. Holy)
|
||||
// allow their algorithm to work for rank-1 updates even if the
|
||||
// original matrix is not of full rank.
|
||||
// Here only rank-1 updates are implemented, to reduce the
|
||||
// requirement for intermediate storage and improve accuracy
|
||||
template <typename MatrixType, typename WDerived>
|
||||
static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w,
|
||||
const typename MatrixType::RealScalar& sigma = 1) {
|
||||
using numext::isfinite;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
|
||||
const Index size = mat.rows();
|
||||
eigen_assert(mat.cols() == size && w.size() == size);
|
||||
|
||||
RealScalar alpha = 1;
|
||||
|
||||
// Apply the update
|
||||
for (Index j = 0; j < size; j++) {
|
||||
// Check for termination due to an original decomposition of low-rank
|
||||
if (!(isfinite)(alpha)) break;
|
||||
|
||||
// Update the diagonal terms
|
||||
RealScalar dj = numext::real(mat.coeff(j, j));
|
||||
Scalar wj = w.coeff(j);
|
||||
RealScalar swj2 = sigma * numext::abs2(wj);
|
||||
RealScalar gamma = dj * alpha + swj2;
|
||||
|
||||
mat.coeffRef(j, j) += swj2 / alpha;
|
||||
alpha += swj2 / dj;
|
||||
|
||||
// Update the terms of L
|
||||
Index rs = size - j - 1;
|
||||
w.tail(rs) -= wj * mat.col(j).tail(rs);
|
||||
if (!numext::is_exactly_zero(gamma)) mat.col(j).tail(rs) += (sigma * numext::conj(wj) / gamma) * w.tail(rs);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
|
||||
static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w,
|
||||
const typename MatrixType::RealScalar& sigma = 1) {
|
||||
// Apply the permutation to the input w
|
||||
tmp = transpositions * w;
|
||||
|
||||
return ldlt_inplace<Lower>::updateInPlace(mat, tmp, sigma);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ldlt_inplace<Upper> {
|
||||
template <typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp,
|
||||
SignMatrix& sign) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
|
||||
static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w,
|
||||
const typename MatrixType::RealScalar& sigma = 1) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename MatrixType>
|
||||
struct LDLT_Traits<MatrixType, Lower> {
|
||||
typedef const TriangularView<const MatrixType, UnitLower> MatrixL;
|
||||
typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
|
||||
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
|
||||
};
|
||||
|
||||
template <typename MatrixType>
|
||||
struct LDLT_Traits<MatrixType, Upper> {
|
||||
typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
|
||||
typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }
|
||||
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
|
||||
*/
|
||||
template <typename MatrixType, int UpLo_>
|
||||
template <typename InputType>
|
||||
LDLT<MatrixType, UpLo_>& LDLT<MatrixType, UpLo_>::compute(const EigenBase<InputType>& a) {
|
||||
eigen_assert(a.rows() == a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
m_matrix = a.derived();
|
||||
|
||||
// Compute matrix L1 norm = max abs column sum.
|
||||
m_l1_norm = RealScalar(0);
|
||||
// TODO move this code to SelfAdjointView
|
||||
for (Index col = 0; col < size; ++col) {
|
||||
RealScalar abs_col_sum;
|
||||
if (UpLo_ == Lower)
|
||||
abs_col_sum =
|
||||
m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
|
||||
else
|
||||
abs_col_sum =
|
||||
m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum;
|
||||
}
|
||||
|
||||
m_transpositions.resize(size);
|
||||
m_isInitialized = false;
|
||||
m_temporary.resize(size);
|
||||
m_sign = internal::ZeroSign;
|
||||
|
||||
m_info = internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success
|
||||
: NumericalIssue;
|
||||
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T.
|
||||
* \param w a vector to be incorporated into the decomposition.
|
||||
* \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column
|
||||
* vectors. Optional; default value is +1. \sa setZero()
|
||||
*/
|
||||
template <typename MatrixType, int UpLo_>
|
||||
template <typename Derived>
|
||||
LDLT<MatrixType, UpLo_>& LDLT<MatrixType, UpLo_>::rankUpdate(
|
||||
const MatrixBase<Derived>& w, const typename LDLT<MatrixType, UpLo_>::RealScalar& sigma) {
|
||||
typedef typename TranspositionType::StorageIndex IndexType;
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized) {
|
||||
eigen_assert(m_matrix.rows() == size);
|
||||
} else {
|
||||
m_matrix.resize(size, size);
|
||||
m_matrix.setZero();
|
||||
m_transpositions.resize(size);
|
||||
for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = IndexType(i);
|
||||
m_temporary.resize(size);
|
||||
m_sign = sigma >= 0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
template <typename RhsType, typename DstType>
|
||||
void LDLT<MatrixType_, UpLo_>::_solve_impl(const RhsType& rhs, DstType& dst) const {
|
||||
_solve_impl_transposed<true>(rhs, dst);
|
||||
}
|
||||
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
template <bool Conjugate, typename RhsType, typename DstType>
|
||||
void LDLT<MatrixType_, UpLo_>::_solve_impl_transposed(const RhsType& rhs, DstType& dst) const {
|
||||
// dst = P b
|
||||
dst = m_transpositions * rhs;
|
||||
|
||||
// dst = L^-1 (P b)
|
||||
// dst = L^-*T (P b)
|
||||
matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
|
||||
|
||||
// dst = D^-* (L^-1 P b)
|
||||
// dst = D^-1 (L^-*T P b)
|
||||
// more precisely, use pseudo-inverse of D (see bug 241)
|
||||
using std::abs;
|
||||
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
|
||||
// In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min())
|
||||
// and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS:
|
||||
// RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1)
|
||||
// / NumTraits<RealScalar>::highest()); However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the
|
||||
// highest diagonal element is not well justified and leads to numerical issues in some cases. Moreover, Lapack's
|
||||
// xSYTRS routines use 0 for the tolerance. Using numeric_limits::min() gives us more robustness to denormals.
|
||||
RealScalar tolerance = (std::numeric_limits<RealScalar>::min)();
|
||||
for (Index i = 0; i < vecD.size(); ++i) {
|
||||
if (abs(vecD(i)) > tolerance)
|
||||
dst.row(i) /= vecD(i);
|
||||
else
|
||||
dst.row(i).setZero();
|
||||
}
|
||||
|
||||
// dst = L^-* (D^-* L^-1 P b)
|
||||
// dst = L^-T (D^-1 L^-*T P b)
|
||||
matrixL().transpose().template conjugateIf<Conjugate>().solveInPlace(dst);
|
||||
|
||||
// dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b
|
||||
// dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b
|
||||
dst = m_transpositions.transpose() * dst;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \internal use x = ldlt_object.solve(x);
|
||||
*
|
||||
* This is the \em in-place version of solve().
|
||||
*
|
||||
* \param bAndX represents both the right-hand side matrix b and result x.
|
||||
*
|
||||
* \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
|
||||
*
|
||||
* This version avoids a copy when the right hand side matrix b is not
|
||||
* needed anymore.
|
||||
*
|
||||
* \sa LDLT::solve(), MatrixBase::ldlt()
|
||||
*/
|
||||
template <typename MatrixType, int UpLo_>
|
||||
template <typename Derived>
|
||||
bool LDLT<MatrixType, UpLo_>::solveInPlace(MatrixBase<Derived>& bAndX) const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
eigen_assert(m_matrix.rows() == bAndX.rows());
|
||||
|
||||
bAndX = this->solve(bAndX);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** \returns the matrix represented by the decomposition,
|
||||
* i.e., it returns the product: P^T L D L^* P.
|
||||
* This function is provided for debug purpose. */
|
||||
template <typename MatrixType, int UpLo_>
|
||||
MatrixType LDLT<MatrixType, UpLo_>::reconstructedMatrix() const {
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
const Index size = m_matrix.rows();
|
||||
MatrixType res(size, size);
|
||||
|
||||
// P
|
||||
res.setIdentity();
|
||||
res = transpositionsP() * res;
|
||||
// L^* P
|
||||
res = matrixU() * res;
|
||||
// D(L^*P)
|
||||
res = vectorD().real().asDiagonal() * res;
|
||||
// L(DL^*P)
|
||||
res = matrixL() * res;
|
||||
// P^T (LDL^*P)
|
||||
res = transpositionsP().transpose() * res;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
* \sa MatrixBase::ldlt()
|
||||
*/
|
||||
template <typename MatrixType, unsigned int UpLo>
|
||||
inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
|
||||
SelfAdjointView<MatrixType, UpLo>::ldlt() const {
|
||||
return LDLT<PlainObject, UpLo>(m_matrix);
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
* \sa SelfAdjointView::ldlt()
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const LDLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::ldlt() const {
|
||||
return LDLT<PlainObject>(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_LDLT_H
|
||||
514
2025.09.22_cpp_with_eigen_package/Eigen/src/Cholesky/LLT.h
Normal file
514
2025.09.22_cpp_with_eigen_package/Eigen/src/Cholesky/LLT.h
Normal file
@@ -0,0 +1,514 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_LLT_H
|
||||
#define EIGEN_LLT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
struct traits<LLT<MatrixType_, UpLo_> > : traits<MatrixType_> {
|
||||
typedef MatrixXpr XprKind;
|
||||
typedef SolverStorage StorageKind;
|
||||
typedef int StorageIndex;
|
||||
enum { Flags = 0 };
|
||||
};
|
||||
|
||||
template <typename MatrixType, int UpLo>
|
||||
struct LLT_Traits;
|
||||
} // namespace internal
|
||||
|
||||
/** \ingroup Cholesky_Module
|
||||
*
|
||||
* \class LLT
|
||||
*
|
||||
* \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features
|
||||
*
|
||||
* \tparam MatrixType_ the type of the matrix of which we are computing the LL^T Cholesky decomposition
|
||||
* \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper.
|
||||
* The other triangular part won't be read.
|
||||
*
|
||||
* This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
|
||||
* matrix A such that A = LL^* = U^*U, where L is lower triangular.
|
||||
*
|
||||
* While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b,
|
||||
* for that purpose, we recommend the Cholesky decomposition without square root which is more stable
|
||||
* and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other
|
||||
* situations like generalised eigen problems with hermitian matrices.
|
||||
*
|
||||
* Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive
|
||||
* definite matrices, use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine
|
||||
* whether a system of equations has a solution.
|
||||
*
|
||||
* Example: \include LLT_example.cpp
|
||||
* Output: \verbinclude LLT_example.out
|
||||
*
|
||||
* \b Performance: for best performance, it is recommended to use a column-major storage format
|
||||
* with the Lower triangular part (the default), or, equivalently, a row-major storage format
|
||||
* with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization
|
||||
* step, and rank-updates can be up to 3 times slower.
|
||||
*
|
||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||
*
|
||||
* Note that during the decomposition, only the lower (or upper, as defined by UpLo_) triangular part of A is
|
||||
* considered. Therefore, the strict lower part does not have to store correct values.
|
||||
*
|
||||
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
class LLT : public SolverBase<LLT<MatrixType_, UpLo_> > {
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef SolverBase<LLT> Base;
|
||||
friend class SolverBase<LLT>;
|
||||
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(LLT)
|
||||
enum { MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime };
|
||||
|
||||
enum { PacketSize = internal::packet_traits<Scalar>::size, AlignmentMask = int(PacketSize) - 1, UpLo = UpLo_ };
|
||||
|
||||
typedef internal::LLT_Traits<MatrixType, UpLo> Traits;
|
||||
|
||||
/**
|
||||
* \brief Default Constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via LLT::compute(const MatrixType&).
|
||||
*/
|
||||
LLT() : m_matrix(), m_isInitialized(false) {}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
*
|
||||
* Like the default constructor but with preallocation of the internal data
|
||||
* according to the specified problem \a size.
|
||||
* \sa LLT()
|
||||
*/
|
||||
explicit LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {}
|
||||
|
||||
template <typename InputType>
|
||||
explicit LLT(const EigenBase<InputType>& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) {
|
||||
compute(matrix.derived());
|
||||
}
|
||||
|
||||
/** \brief Constructs a LLT factorization from a given matrix
|
||||
*
|
||||
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when
|
||||
* \c MatrixType is a Eigen::Ref.
|
||||
*
|
||||
* \sa LLT(const EigenBase&)
|
||||
*/
|
||||
template <typename InputType>
|
||||
explicit LLT(EigenBase<InputType>& matrix) : m_matrix(matrix.derived()), m_isInitialized(false) {
|
||||
compute(matrix.derived());
|
||||
}
|
||||
|
||||
/** \returns a view of the upper triangular matrix U */
|
||||
inline typename Traits::MatrixU matrixU() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
return Traits::getU(m_matrix);
|
||||
}
|
||||
|
||||
/** \returns a view of the lower triangular matrix L */
|
||||
inline typename Traits::MatrixL matrixL() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
return Traits::getL(m_matrix);
|
||||
}
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
*
|
||||
* Since this LLT class assumes anyway that the matrix A is invertible, the solution
|
||||
* theoretically exists and is unique regardless of b.
|
||||
*
|
||||
* Example: \include LLT_solve.cpp
|
||||
* Output: \verbinclude LLT_solve.out
|
||||
*
|
||||
* \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt()
|
||||
*/
|
||||
template <typename Rhs>
|
||||
inline const Solve<LLT, Rhs> solve(const MatrixBase<Rhs>& b) const;
|
||||
#endif
|
||||
|
||||
template <typename Derived>
|
||||
void solveInPlace(const MatrixBase<Derived>& bAndX) const;
|
||||
|
||||
template <typename InputType>
|
||||
LLT& compute(const EigenBase<InputType>& matrix);
|
||||
|
||||
/** \returns an estimate of the reciprocal condition number of the matrix of
|
||||
* which \c *this is the Cholesky decomposition.
|
||||
*/
|
||||
RealScalar rcond() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
|
||||
return internal::rcond_estimate_helper(m_l1_norm, *this);
|
||||
}
|
||||
|
||||
/** \returns the LLT decomposition matrix
|
||||
*
|
||||
* TODO: document the storage layout
|
||||
*/
|
||||
inline const MatrixType& matrixLLT() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
return m_matrix;
|
||||
}
|
||||
|
||||
MatrixType reconstructedMatrix() const;
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was successful,
|
||||
* \c NumericalIssue if the matrix.appears not to be positive definite.
|
||||
*/
|
||||
ComputationInfo info() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix
|
||||
* is self-adjoint.
|
||||
*
|
||||
* This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
|
||||
* \code x = decomposition.adjoint().solve(b) \endcode
|
||||
*/
|
||||
const LLT& adjoint() const noexcept { return *this; }
|
||||
|
||||
constexpr Index rows() const noexcept { return m_matrix.rows(); }
|
||||
constexpr Index cols() const noexcept { return m_matrix.cols(); }
|
||||
|
||||
template <typename VectorType>
|
||||
LLT& rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename RhsType, typename DstType>
|
||||
void _solve_impl(const RhsType& rhs, DstType& dst) const;
|
||||
|
||||
template <bool Conjugate, typename RhsType, typename DstType>
|
||||
void _solve_impl_transposed(const RhsType& rhs, DstType& dst) const;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store L
|
||||
* The strict upper part is not used and even not initialized.
|
||||
*/
|
||||
MatrixType m_matrix;
|
||||
RealScalar m_l1_norm;
|
||||
bool m_isInitialized;
|
||||
ComputationInfo m_info;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Scalar, int UpLo>
|
||||
struct llt_inplace;
|
||||
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec,
|
||||
const typename MatrixType::RealScalar& sigma) {
|
||||
using std::sqrt;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::ColXpr ColXpr;
|
||||
typedef internal::remove_all_t<ColXpr> ColXprCleaned;
|
||||
typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
|
||||
typedef Matrix<Scalar, Dynamic, 1> TempVectorType;
|
||||
typedef typename TempVectorType::SegmentReturnType TempVecSegment;
|
||||
|
||||
Index n = mat.cols();
|
||||
eigen_assert(mat.rows() == n && vec.size() == n);
|
||||
|
||||
TempVectorType temp;
|
||||
|
||||
if (sigma > 0) {
|
||||
// This version is based on Givens rotations.
|
||||
// It is faster than the other one below, but only works for updates,
|
||||
// i.e., for sigma > 0
|
||||
temp = sqrt(sigma) * vec;
|
||||
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
JacobiRotation<Scalar> g;
|
||||
g.makeGivens(mat(i, i), -temp(i), &mat(i, i));
|
||||
|
||||
Index rs = n - i - 1;
|
||||
if (rs > 0) {
|
||||
ColXprSegment x(mat.col(i).tail(rs));
|
||||
TempVecSegment y(temp.tail(rs));
|
||||
apply_rotation_in_the_plane(x, y, g);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
temp = vec;
|
||||
RealScalar beta = 1;
|
||||
for (Index j = 0; j < n; ++j) {
|
||||
RealScalar Ljj = numext::real(mat.coeff(j, j));
|
||||
RealScalar dj = numext::abs2(Ljj);
|
||||
Scalar wj = temp.coeff(j);
|
||||
RealScalar swj2 = sigma * numext::abs2(wj);
|
||||
RealScalar gamma = dj * beta + swj2;
|
||||
|
||||
RealScalar x = dj + swj2 / beta;
|
||||
if (x <= RealScalar(0)) return j;
|
||||
RealScalar nLjj = sqrt(x);
|
||||
mat.coeffRef(j, j) = nLjj;
|
||||
beta += swj2 / dj;
|
||||
|
||||
// Update the terms of L
|
||||
Index rs = n - j - 1;
|
||||
if (rs) {
|
||||
temp.tail(rs) -= (wj / Ljj) * mat.col(j).tail(rs);
|
||||
if (!numext::is_exactly_zero(gamma))
|
||||
mat.col(j).tail(rs) =
|
||||
(nLjj / Ljj) * mat.col(j).tail(rs) + (nLjj * sigma * numext::conj(wj) / gamma) * temp.tail(rs);
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
struct llt_inplace<Scalar, Lower> {
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
template <typename MatrixType>
|
||||
static Index unblocked(MatrixType& mat) {
|
||||
using std::sqrt;
|
||||
|
||||
eigen_assert(mat.rows() == mat.cols());
|
||||
const Index size = mat.rows();
|
||||
for (Index k = 0; k < size; ++k) {
|
||||
Index rs = size - k - 1; // remaining size
|
||||
|
||||
Block<MatrixType, Dynamic, 1> A21(mat, k + 1, k, rs, 1);
|
||||
Block<MatrixType, 1, Dynamic> A10(mat, k, 0, 1, k);
|
||||
Block<MatrixType, Dynamic, Dynamic> A20(mat, k + 1, 0, rs, k);
|
||||
|
||||
RealScalar x = numext::real(mat.coeff(k, k));
|
||||
if (k > 0) x -= A10.squaredNorm();
|
||||
if (x <= RealScalar(0)) return k;
|
||||
mat.coeffRef(k, k) = x = sqrt(x);
|
||||
if (k > 0 && rs > 0) A21.noalias() -= A20 * A10.adjoint();
|
||||
if (rs > 0) A21 /= x;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
template <typename MatrixType>
|
||||
static Index blocked(MatrixType& m) {
|
||||
eigen_assert(m.rows() == m.cols());
|
||||
Index size = m.rows();
|
||||
if (size < 32) return unblocked(m);
|
||||
|
||||
Index blockSize = size / 8;
|
||||
blockSize = (blockSize / 16) * 16;
|
||||
blockSize = (std::min)((std::max)(blockSize, Index(8)), Index(128));
|
||||
|
||||
for (Index k = 0; k < size; k += blockSize) {
|
||||
// partition the matrix:
|
||||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index bs = (std::min)(blockSize, size - k);
|
||||
Index rs = size - k - bs;
|
||||
Block<MatrixType, Dynamic, Dynamic> A11(m, k, k, bs, bs);
|
||||
Block<MatrixType, Dynamic, Dynamic> A21(m, k + bs, k, rs, bs);
|
||||
Block<MatrixType, Dynamic, Dynamic> A22(m, k + bs, k + bs, rs, rs);
|
||||
|
||||
Index ret;
|
||||
if ((ret = unblocked(A11)) >= 0) return k + ret;
|
||||
if (rs > 0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21);
|
||||
if (rs > 0)
|
||||
A22.template selfadjointView<Lower>().rankUpdate(A21,
|
||||
typename NumTraits<RealScalar>::Literal(-1)); // bottleneck
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) {
|
||||
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct llt_inplace<Scalar, Upper> {
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
template <typename MatrixType>
|
||||
static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return llt_inplace<Scalar, Lower>::unblocked(matt);
|
||||
}
|
||||
template <typename MatrixType>
|
||||
static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return llt_inplace<Scalar, Lower>::blocked(matt);
|
||||
}
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename MatrixType>
|
||||
struct LLT_Traits<MatrixType, Lower> {
|
||||
typedef const TriangularView<const MatrixType, Lower> MatrixL;
|
||||
typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
|
||||
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
|
||||
static bool inplace_decomposition(MatrixType& m) {
|
||||
return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m) == -1;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename MatrixType>
|
||||
struct LLT_Traits<MatrixType, Upper> {
|
||||
typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL;
|
||||
typedef const TriangularView<const MatrixType, Upper> MatrixU;
|
||||
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }
|
||||
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }
|
||||
static bool inplace_decomposition(MatrixType& m) {
|
||||
return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m) == -1;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix
|
||||
*
|
||||
* \returns a reference to *this
|
||||
*
|
||||
* Example: \include TutorialLinAlgComputeTwice.cpp
|
||||
* Output: \verbinclude TutorialLinAlgComputeTwice.out
|
||||
*/
|
||||
template <typename MatrixType, int UpLo_>
|
||||
template <typename InputType>
|
||||
LLT<MatrixType, UpLo_>& LLT<MatrixType, UpLo_>::compute(const EigenBase<InputType>& a) {
|
||||
eigen_assert(a.rows() == a.cols());
|
||||
const Index size = a.rows();
|
||||
m_matrix.resize(size, size);
|
||||
if (!internal::is_same_dense(m_matrix, a.derived())) m_matrix = a.derived();
|
||||
|
||||
// Compute matrix L1 norm = max abs column sum.
|
||||
m_l1_norm = RealScalar(0);
|
||||
// TODO move this code to SelfAdjointView
|
||||
for (Index col = 0; col < size; ++col) {
|
||||
RealScalar abs_col_sum;
|
||||
if (UpLo_ == Lower)
|
||||
abs_col_sum =
|
||||
m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
|
||||
else
|
||||
abs_col_sum =
|
||||
m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
|
||||
if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum;
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
bool ok = Traits::inplace_decomposition(m_matrix);
|
||||
m_info = ok ? Success : NumericalIssue;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Performs a rank one update (or dowdate) of the current decomposition.
|
||||
* If A = LL^* before the rank one update,
|
||||
* then after it we have LL^* = A + sigma * v v^* where \a v must be a vector
|
||||
* of same dimension.
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
template <typename VectorType>
|
||||
LLT<MatrixType_, UpLo_>& LLT<MatrixType_, UpLo_>::rankUpdate(const VectorType& v, const RealScalar& sigma) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType);
|
||||
eigen_assert(v.size() == m_matrix.cols());
|
||||
eigen_assert(m_isInitialized);
|
||||
if (internal::llt_inplace<typename MatrixType::Scalar, UpLo>::rankUpdate(m_matrix, v, sigma) >= 0)
|
||||
m_info = NumericalIssue;
|
||||
else
|
||||
m_info = Success;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
template <typename RhsType, typename DstType>
|
||||
void LLT<MatrixType_, UpLo_>::_solve_impl(const RhsType& rhs, DstType& dst) const {
|
||||
_solve_impl_transposed<true>(rhs, dst);
|
||||
}
|
||||
|
||||
template <typename MatrixType_, int UpLo_>
|
||||
template <bool Conjugate, typename RhsType, typename DstType>
|
||||
void LLT<MatrixType_, UpLo_>::_solve_impl_transposed(const RhsType& rhs, DstType& dst) const {
|
||||
dst = rhs;
|
||||
|
||||
matrixL().template conjugateIf<!Conjugate>().solveInPlace(dst);
|
||||
matrixU().template conjugateIf<!Conjugate>().solveInPlace(dst);
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \internal use x = llt_object.solve(x);
|
||||
*
|
||||
* This is the \em in-place version of solve().
|
||||
*
|
||||
* \param bAndX represents both the right-hand side matrix b and result x.
|
||||
*
|
||||
* This version avoids a copy when the right hand side matrix b is not needed anymore.
|
||||
*
|
||||
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
|
||||
* This function will const_cast it, so constness isn't honored here.
|
||||
*
|
||||
* \sa LLT::solve(), MatrixBase::llt()
|
||||
*/
|
||||
template <typename MatrixType, int UpLo_>
|
||||
template <typename Derived>
|
||||
void LLT<MatrixType, UpLo_>::solveInPlace(const MatrixBase<Derived>& bAndX) const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
eigen_assert(m_matrix.rows() == bAndX.rows());
|
||||
matrixL().solveInPlace(bAndX);
|
||||
matrixU().solveInPlace(bAndX);
|
||||
}
|
||||
|
||||
/** \returns the matrix represented by the decomposition,
|
||||
* i.e., it returns the product: L L^*.
|
||||
* This function is provided for debug purpose. */
|
||||
template <typename MatrixType, int UpLo_>
|
||||
MatrixType LLT<MatrixType, UpLo_>::reconstructedMatrix() const {
|
||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||
return matrixL() * matrixL().adjoint().toDenseMatrix();
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the LLT decomposition of \c *this
|
||||
* \sa SelfAdjointView::llt()
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const LLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::llt() const {
|
||||
return LLT<PlainObject>(derived());
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the LLT decomposition of \c *this
|
||||
* \sa SelfAdjointView::llt()
|
||||
*/
|
||||
template <typename MatrixType, unsigned int UpLo>
|
||||
inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> SelfAdjointView<MatrixType, UpLo>::llt()
|
||||
const {
|
||||
return LLT<PlainObject, UpLo>(m_matrix);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_LLT_H
|
||||
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
********************************************************************************
|
||||
* Content : Eigen bindings to LAPACKe
|
||||
* LLt decomposition based on LAPACKE_?potrf function.
|
||||
********************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef EIGEN_LLT_LAPACKE_H
|
||||
#define EIGEN_LLT_LAPACKE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
namespace lapacke_helpers {
|
||||
// -------------------------------------------------------------------------------------------------------------------
|
||||
// Dispatch for rank update handling upper and lower parts
|
||||
// -------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
template <UpLoType Mode>
|
||||
struct rank_update {};
|
||||
|
||||
template <>
|
||||
struct rank_update<Lower> {
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index run(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) {
|
||||
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct rank_update<Upper> {
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index run(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) {
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return Eigen::internal::llt_rank_update_lower(matt, vec.conjugate(), sigma);
|
||||
}
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------
|
||||
// Generic lapacke llt implementation that hands of to the dispatches
|
||||
// -------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
template <typename Scalar, UpLoType Mode>
|
||||
struct lapacke_llt {
|
||||
EIGEN_STATIC_ASSERT(((Mode == Lower) || (Mode == Upper)), MODE_MUST_BE_UPPER_OR_LOWER)
|
||||
template <typename MatrixType>
|
||||
static Index blocked(MatrixType &m) {
|
||||
eigen_assert(m.rows() == m.cols());
|
||||
if (m.rows() == 0) {
|
||||
return -1;
|
||||
}
|
||||
/* Set up parameters for ?potrf */
|
||||
lapack_int size = to_lapack(m.rows());
|
||||
lapack_int matrix_order = lapack_storage_of(m);
|
||||
constexpr char uplo = Mode == Upper ? 'U' : 'L';
|
||||
Scalar *a = &(m.coeffRef(0, 0));
|
||||
lapack_int lda = to_lapack(m.outerStride());
|
||||
|
||||
lapack_int info = potrf(matrix_order, uplo, size, to_lapack(a), lda);
|
||||
info = (info == 0) ? -1 : info > 0 ? info - 1 : size;
|
||||
return info;
|
||||
}
|
||||
|
||||
template <typename MatrixType, typename VectorType>
|
||||
static Index rankUpdate(MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma) {
|
||||
return rank_update<Mode>::run(mat, vec, sigma);
|
||||
}
|
||||
};
|
||||
} // namespace lapacke_helpers
|
||||
// end namespace lapacke_helpers
|
||||
|
||||
/*
|
||||
* Here, we just put the generic implementation from lapacke_llt into a full specialization of the llt_inplace
|
||||
* type. By being a full specialization, the versions defined here thus get precedence over the generic implementation
|
||||
* in LLT.h for double, float and complex double, complex float types.
|
||||
*/
|
||||
|
||||
#define EIGEN_LAPACKE_LLT(EIGTYPE) \
|
||||
template <> \
|
||||
struct llt_inplace<EIGTYPE, Lower> : public lapacke_helpers::lapacke_llt<EIGTYPE, Lower> {}; \
|
||||
template <> \
|
||||
struct llt_inplace<EIGTYPE, Upper> : public lapacke_helpers::lapacke_llt<EIGTYPE, Upper> {};
|
||||
|
||||
EIGEN_LAPACKE_LLT(double)
|
||||
EIGEN_LAPACKE_LLT(float)
|
||||
EIGEN_LAPACKE_LLT(std::complex<double>)
|
||||
EIGEN_LAPACKE_LLT(std::complex<float>)
|
||||
|
||||
#undef EIGEN_LAPACKE_LLT
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_LLT_LAPACKE_H
|
||||
@@ -0,0 +1,738 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CHOLMODSUPPORT_H
|
||||
#define EIGEN_CHOLMODSUPPORT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Scalar>
|
||||
struct cholmod_configure_matrix;
|
||||
|
||||
template <>
|
||||
struct cholmod_configure_matrix<double> {
|
||||
template <typename CholmodType>
|
||||
static void run(CholmodType& mat) {
|
||||
mat.xtype = CHOLMOD_REAL;
|
||||
mat.dtype = CHOLMOD_DOUBLE;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct cholmod_configure_matrix<std::complex<double> > {
|
||||
template <typename CholmodType>
|
||||
static void run(CholmodType& mat) {
|
||||
mat.xtype = CHOLMOD_COMPLEX;
|
||||
mat.dtype = CHOLMOD_DOUBLE;
|
||||
}
|
||||
};
|
||||
|
||||
// Other scalar types are not yet supported by Cholmod
|
||||
// template<> struct cholmod_configure_matrix<float> {
|
||||
// template<typename CholmodType>
|
||||
// static void run(CholmodType& mat) {
|
||||
// mat.xtype = CHOLMOD_REAL;
|
||||
// mat.dtype = CHOLMOD_SINGLE;
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// template<> struct cholmod_configure_matrix<std::complex<float> > {
|
||||
// template<typename CholmodType>
|
||||
// static void run(CholmodType& mat) {
|
||||
// mat.xtype = CHOLMOD_COMPLEX;
|
||||
// mat.dtype = CHOLMOD_SINGLE;
|
||||
// }
|
||||
// };
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
|
||||
* Note that the data are shared.
|
||||
*/
|
||||
template <typename Scalar_, int Options_, typename StorageIndex_>
|
||||
cholmod_sparse viewAsCholmod(Ref<SparseMatrix<Scalar_, Options_, StorageIndex_> > mat) {
|
||||
cholmod_sparse res;
|
||||
res.nzmax = mat.nonZeros();
|
||||
res.nrow = mat.rows();
|
||||
res.ncol = mat.cols();
|
||||
res.p = mat.outerIndexPtr();
|
||||
res.i = mat.innerIndexPtr();
|
||||
res.x = mat.valuePtr();
|
||||
res.z = 0;
|
||||
res.sorted = 1;
|
||||
if (mat.isCompressed()) {
|
||||
res.packed = 1;
|
||||
res.nz = 0;
|
||||
} else {
|
||||
res.packed = 0;
|
||||
res.nz = mat.innerNonZeroPtr();
|
||||
}
|
||||
|
||||
res.dtype = 0;
|
||||
res.stype = -1;
|
||||
|
||||
if (internal::is_same<StorageIndex_, int>::value) {
|
||||
res.itype = CHOLMOD_INT;
|
||||
} else if (internal::is_same<StorageIndex_, SuiteSparse_long>::value) {
|
||||
res.itype = CHOLMOD_LONG;
|
||||
} else {
|
||||
eigen_assert(false && "Index type not supported yet");
|
||||
}
|
||||
|
||||
// setup res.xtype
|
||||
internal::cholmod_configure_matrix<Scalar_>::run(res);
|
||||
|
||||
res.stype = 0;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename Scalar_, int Options_, typename Index_>
|
||||
const cholmod_sparse viewAsCholmod(const SparseMatrix<Scalar_, Options_, Index_>& mat) {
|
||||
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<Scalar_, Options_, Index_> >(mat.const_cast_derived()));
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename Scalar_, int Options_, typename Index_>
|
||||
const cholmod_sparse viewAsCholmod(const SparseVector<Scalar_, Options_, Index_>& mat) {
|
||||
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<Scalar_, Options_, Index_> >(mat.const_cast_derived()));
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
|
||||
* The data are not copied but shared. */
|
||||
template <typename Scalar_, int Options_, typename Index_, unsigned int UpLo>
|
||||
cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<Scalar_, Options_, Index_>, UpLo>& mat) {
|
||||
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<Scalar_, Options_, Index_> >(mat.matrix().const_cast_derived()));
|
||||
|
||||
if (UpLo == Upper) res.stype = 1;
|
||||
if (UpLo == Lower) res.stype = -1;
|
||||
// swap stype for rowmajor matrices (only works for real matrices)
|
||||
EIGEN_STATIC_ASSERT((Options_ & RowMajorBit) == 0 || NumTraits<Scalar_>::IsComplex == 0,
|
||||
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
if (Options_ & RowMajorBit) res.stype *= -1;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
|
||||
* The data are not copied but shared. */
|
||||
template <typename Derived>
|
||||
cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat) {
|
||||
EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags & RowMajorBit) == 0,
|
||||
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
|
||||
cholmod_dense res;
|
||||
res.nrow = mat.rows();
|
||||
res.ncol = mat.cols();
|
||||
res.nzmax = res.nrow * res.ncol;
|
||||
res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
|
||||
res.x = (void*)(mat.derived().data());
|
||||
res.z = 0;
|
||||
|
||||
internal::cholmod_configure_matrix<Scalar>::run(res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
|
||||
* The data are not copied but shared. */
|
||||
template <typename Scalar, typename StorageIndex>
|
||||
Map<const SparseMatrix<Scalar, ColMajor, StorageIndex> > viewAsEigen(cholmod_sparse& cm) {
|
||||
return Map<const SparseMatrix<Scalar, ColMajor, StorageIndex> >(
|
||||
cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol], static_cast<StorageIndex*>(cm.p),
|
||||
static_cast<StorageIndex*>(cm.i), static_cast<Scalar*>(cm.x));
|
||||
}
|
||||
|
||||
/** Returns a view of the Cholmod sparse matrix factor \a cm as an Eigen sparse matrix.
|
||||
* The data are not copied but shared. */
|
||||
template <typename Scalar, typename StorageIndex>
|
||||
Map<const SparseMatrix<Scalar, ColMajor, StorageIndex> > viewAsEigen(cholmod_factor& cm) {
|
||||
return Map<const SparseMatrix<Scalar, ColMajor, StorageIndex> >(
|
||||
cm.n, cm.n, static_cast<StorageIndex*>(cm.p)[cm.n], static_cast<StorageIndex*>(cm.p),
|
||||
static_cast<StorageIndex*>(cm.i), static_cast<Scalar*>(cm.x));
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
// template specializations for int and long that call the correct cholmod method
|
||||
|
||||
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
|
||||
template <typename StorageIndex_> \
|
||||
inline ret cm_##name(cholmod_common& Common) { \
|
||||
return cholmod_##name(&Common); \
|
||||
} \
|
||||
template <> \
|
||||
inline ret cm_##name<SuiteSparse_long>(cholmod_common & Common) { \
|
||||
return cholmod_l_##name(&Common); \
|
||||
}
|
||||
|
||||
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
|
||||
template <typename StorageIndex_> \
|
||||
inline ret cm_##name(t1& a1, cholmod_common& Common) { \
|
||||
return cholmod_##name(&a1, &Common); \
|
||||
} \
|
||||
template <> \
|
||||
inline ret cm_##name<SuiteSparse_long>(t1 & a1, cholmod_common & Common) { \
|
||||
return cholmod_l_##name(&a1, &Common); \
|
||||
}
|
||||
|
||||
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
|
||||
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
|
||||
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(int, free_factor, cholmod_factor*, L)
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(int, free_dense, cholmod_dense*, X)
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)
|
||||
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
|
||||
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_sparse*, factor_to_sparse, cholmod_factor, L)
|
||||
|
||||
template <typename StorageIndex_>
|
||||
inline cholmod_dense* cm_solve(int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common& Common) {
|
||||
return cholmod_solve(sys, &L, &B, &Common);
|
||||
}
|
||||
template <>
|
||||
inline cholmod_dense* cm_solve<SuiteSparse_long>(int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common& Common) {
|
||||
return cholmod_l_solve(sys, &L, &B, &Common);
|
||||
}
|
||||
|
||||
template <typename StorageIndex_>
|
||||
inline cholmod_sparse* cm_spsolve(int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common& Common) {
|
||||
return cholmod_spsolve(sys, &L, &B, &Common);
|
||||
}
|
||||
template <>
|
||||
inline cholmod_sparse* cm_spsolve<SuiteSparse_long>(int sys, cholmod_factor& L, cholmod_sparse& B,
|
||||
cholmod_common& Common) {
|
||||
return cholmod_l_spsolve(sys, &L, &B, &Common);
|
||||
}
|
||||
|
||||
template <typename StorageIndex_>
|
||||
inline int cm_factorize_p(cholmod_sparse* A, double beta[2], StorageIndex_* fset, std::size_t fsize, cholmod_factor* L,
|
||||
cholmod_common& Common) {
|
||||
return cholmod_factorize_p(A, beta, fset, fsize, L, &Common);
|
||||
}
|
||||
template <>
|
||||
inline int cm_factorize_p<SuiteSparse_long>(cholmod_sparse* A, double beta[2], SuiteSparse_long* fset,
|
||||
std::size_t fsize, cholmod_factor* L, cholmod_common& Common) {
|
||||
return cholmod_l_factorize_p(A, beta, fset, fsize, L, &Common);
|
||||
}
|
||||
|
||||
#undef EIGEN_CHOLMOD_SPECIALIZE0
|
||||
#undef EIGEN_CHOLMOD_SPECIALIZE1
|
||||
|
||||
} // namespace internal
|
||||
|
||||
enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt };
|
||||
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodBase
|
||||
* \brief The base class for the direct Cholesky factorization of Cholmod
|
||||
* \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_, typename Derived>
|
||||
class CholmodBase : public SparseSolverBase<Derived> {
|
||||
protected:
|
||||
typedef SparseSolverBase<Derived> Base;
|
||||
using Base::derived;
|
||||
using Base::m_isInitialized;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
enum { UpLo = UpLo_ };
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef MatrixType CholMatrixType;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime };
|
||||
|
||||
public:
|
||||
CholmodBase() : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<double, RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);
|
||||
m_shiftOffset[0] = m_shiftOffset[1] = 0.0;
|
||||
internal::cm_start<StorageIndex>(m_cholmod);
|
||||
}
|
||||
|
||||
explicit CholmodBase(const MatrixType& matrix)
|
||||
: m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<double, RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);
|
||||
m_shiftOffset[0] = m_shiftOffset[1] = 0.0;
|
||||
internal::cm_start<StorageIndex>(m_cholmod);
|
||||
compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodBase() {
|
||||
if (m_cholmodFactor) internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);
|
||||
internal::cm_finish<StorageIndex>(m_cholmod);
|
||||
}
|
||||
|
||||
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was successful,
|
||||
* \c NumericalIssue if the matrix.appears to be negative.
|
||||
*/
|
||||
ComputationInfo info() const {
|
||||
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** Computes the sparse Cholesky decomposition of \a matrix */
|
||||
Derived& compute(const MatrixType& matrix) {
|
||||
analyzePattern(matrix);
|
||||
factorize(matrix);
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
|
||||
*
|
||||
* This function is particularly useful when solving for several problems having the same structure.
|
||||
*
|
||||
* \sa factorize()
|
||||
*/
|
||||
void analyzePattern(const MatrixType& matrix) {
|
||||
if (m_cholmodFactor) {
|
||||
internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);
|
||||
m_cholmodFactor = 0;
|
||||
}
|
||||
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
|
||||
m_cholmodFactor = internal::cm_analyze<StorageIndex>(A, m_cholmod);
|
||||
|
||||
this->m_isInitialized = true;
|
||||
this->m_info = Success;
|
||||
m_analysisIsOk = true;
|
||||
m_factorizationIsOk = false;
|
||||
}
|
||||
|
||||
/** Performs a numeric decomposition of \a matrix
|
||||
*
|
||||
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been
|
||||
* performed.
|
||||
*
|
||||
* \sa analyzePattern()
|
||||
*/
|
||||
void factorize(const MatrixType& matrix) {
|
||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
|
||||
internal::cm_factorize_p<StorageIndex>(&A, m_shiftOffset, 0, 0, m_cholmodFactor, m_cholmod);
|
||||
|
||||
// If the factorization failed, either the input matrix was zero (so m_cholmodFactor == nullptr), or minor is the
|
||||
// column at which it failed. On success minor == n.
|
||||
this->m_info =
|
||||
(m_cholmodFactor != nullptr && m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
|
||||
m_factorizationIsOk = true;
|
||||
}
|
||||
|
||||
/** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
|
||||
* See the Cholmod user guide for details. */
|
||||
cholmod_common& cholmod() { return m_cholmod; }
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal */
|
||||
template <typename Rhs, typename Dest>
|
||||
void _solve_impl(const MatrixBase<Rhs>& b, MatrixBase<Dest>& dest) const {
|
||||
eigen_assert(m_factorizationIsOk &&
|
||||
"The decomposition is not in a valid state for solving, you must first call either compute() or "
|
||||
"symbolic()/numeric()");
|
||||
const Index size = m_cholmodFactor->n;
|
||||
EIGEN_UNUSED_VARIABLE(size);
|
||||
eigen_assert(size == b.rows());
|
||||
|
||||
// Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref.
|
||||
Ref<const Matrix<typename Rhs::Scalar, Dynamic, Dynamic, ColMajor> > b_ref(b.derived());
|
||||
|
||||
cholmod_dense b_cd = viewAsCholmod(b_ref);
|
||||
cholmod_dense* x_cd = internal::cm_solve<StorageIndex>(CHOLMOD_A, *m_cholmodFactor, b_cd, m_cholmod);
|
||||
if (!x_cd) {
|
||||
this->m_info = NumericalIssue;
|
||||
return;
|
||||
}
|
||||
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
|
||||
// NOTE Actually, the copy can be avoided by calling cholmod_solve2 instead of cholmod_solve
|
||||
dest = Matrix<Scalar, Dest::RowsAtCompileTime, Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),
|
||||
b.rows(), b.cols());
|
||||
internal::cm_free_dense<StorageIndex>(x_cd, m_cholmod);
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template <typename RhsDerived, typename DestDerived>
|
||||
void _solve_impl(const SparseMatrixBase<RhsDerived>& b, SparseMatrixBase<DestDerived>& dest) const {
|
||||
eigen_assert(m_factorizationIsOk &&
|
||||
"The decomposition is not in a valid state for solving, you must first call either compute() or "
|
||||
"symbolic()/numeric()");
|
||||
const Index size = m_cholmodFactor->n;
|
||||
EIGEN_UNUSED_VARIABLE(size);
|
||||
eigen_assert(size == b.rows());
|
||||
|
||||
// note: cs stands for Cholmod Sparse
|
||||
Ref<SparseMatrix<typename RhsDerived::Scalar, ColMajor, typename RhsDerived::StorageIndex> > b_ref(
|
||||
b.const_cast_derived());
|
||||
cholmod_sparse b_cs = viewAsCholmod(b_ref);
|
||||
cholmod_sparse* x_cs = internal::cm_spsolve<StorageIndex>(CHOLMOD_A, *m_cholmodFactor, b_cs, m_cholmod);
|
||||
if (!x_cs) {
|
||||
this->m_info = NumericalIssue;
|
||||
return;
|
||||
}
|
||||
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
|
||||
// NOTE cholmod_spsolve in fact just calls the dense solver for blocks of 4 columns at a time (similar to Eigen's
|
||||
// sparse solver)
|
||||
dest.derived() = viewAsEigen<typename DestDerived::Scalar, typename DestDerived::StorageIndex>(*x_cs);
|
||||
internal::cm_free_sparse<StorageIndex>(x_cs, m_cholmod);
|
||||
}
|
||||
#endif // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
|
||||
*
|
||||
* During the numerical factorization, an offset term is added to the diagonal coefficients:\n
|
||||
* \c d_ii = \a offset + \c d_ii
|
||||
*
|
||||
* The default is \a offset=0.
|
||||
*
|
||||
* \returns a reference to \c *this.
|
||||
*/
|
||||
Derived& setShift(const RealScalar& offset) {
|
||||
m_shiftOffset[0] = double(offset);
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** \returns the determinant of the underlying matrix from the current factorization */
|
||||
Scalar determinant() const {
|
||||
using std::exp;
|
||||
return exp(logDeterminant());
|
||||
}
|
||||
|
||||
/** \returns the log determinant of the underlying matrix from the current factorization */
|
||||
Scalar logDeterminant() const {
|
||||
using numext::real;
|
||||
using std::log;
|
||||
eigen_assert(m_factorizationIsOk &&
|
||||
"The decomposition is not in a valid state for solving, you must first call either compute() or "
|
||||
"symbolic()/numeric()");
|
||||
|
||||
RealScalar logDet = 0;
|
||||
Scalar* x = static_cast<Scalar*>(m_cholmodFactor->x);
|
||||
if (m_cholmodFactor->is_super) {
|
||||
// Supernodal factorization stored as a packed list of dense column-major blocks,
|
||||
// as described by the following structure:
|
||||
|
||||
// super[k] == index of the first column of the j-th super node
|
||||
StorageIndex* super = static_cast<StorageIndex*>(m_cholmodFactor->super);
|
||||
// pi[k] == offset to the description of row indices
|
||||
StorageIndex* pi = static_cast<StorageIndex*>(m_cholmodFactor->pi);
|
||||
// px[k] == offset to the respective dense block
|
||||
StorageIndex* px = static_cast<StorageIndex*>(m_cholmodFactor->px);
|
||||
|
||||
Index nb_super_nodes = m_cholmodFactor->nsuper;
|
||||
for (Index k = 0; k < nb_super_nodes; ++k) {
|
||||
StorageIndex ncols = super[k + 1] - super[k];
|
||||
StorageIndex nrows = pi[k + 1] - pi[k];
|
||||
|
||||
Map<const Array<Scalar, 1, Dynamic>, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows + 1));
|
||||
logDet += sk.real().log().sum();
|
||||
}
|
||||
} else {
|
||||
// Simplicial factorization stored as standard CSC matrix.
|
||||
StorageIndex* p = static_cast<StorageIndex*>(m_cholmodFactor->p);
|
||||
Index size = m_cholmodFactor->n;
|
||||
for (Index k = 0; k < size; ++k) logDet += log(real(x[p[k]]));
|
||||
}
|
||||
if (m_cholmodFactor->is_ll) logDet *= 2.0;
|
||||
return logDet;
|
||||
}
|
||||
|
||||
template <typename Stream>
|
||||
void dumpMemory(Stream& /*s*/) {}
|
||||
|
||||
protected:
|
||||
mutable cholmod_common m_cholmod;
|
||||
cholmod_factor* m_cholmodFactor;
|
||||
double m_shiftOffset[2];
|
||||
mutable ComputationInfo m_info;
|
||||
int m_factorizationIsOk;
|
||||
int m_analysisIsOk;
|
||||
};
|
||||
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodSimplicialLLT
|
||||
* \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical
|
||||
* interest. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices X and B can be
|
||||
* either dense or sparse.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non
|
||||
* compressed.
|
||||
*
|
||||
* \warning Only double precision real and complex scalar types are supported by Cholmod.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_ = Lower>
|
||||
class CholmodSimplicialLLT : public CholmodBase<MatrixType_, UpLo_, CholmodSimplicialLLT<MatrixType_, UpLo_> > {
|
||||
typedef CholmodBase<MatrixType_, UpLo_, CholmodSimplicialLLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef TriangularView<const MatrixType, Eigen::Lower> MatrixL;
|
||||
typedef TriangularView<const typename MatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
|
||||
|
||||
CholmodSimplicialLLT() : Base() { init(); }
|
||||
|
||||
CholmodSimplicialLLT(const MatrixType& matrix) : Base() {
|
||||
init();
|
||||
this->compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLLT() {}
|
||||
|
||||
/** \returns an expression of the factor L */
|
||||
inline MatrixL matrixL() const { return viewAsEigen<Scalar, StorageIndex>(*Base::m_cholmodFactor); }
|
||||
|
||||
/** \returns an expression of the factor U (= L^*) */
|
||||
inline MatrixU matrixU() const { return matrixL().adjoint(); }
|
||||
|
||||
protected:
|
||||
void init() {
|
||||
m_cholmod.final_asis = 0;
|
||||
m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
|
||||
m_cholmod.final_ll = 1;
|
||||
}
|
||||
};
|
||||
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodSimplicialLDLT
|
||||
* \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical
|
||||
* interest. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices X and B can be
|
||||
* either dense or sparse.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non
|
||||
* compressed.
|
||||
*
|
||||
* \warning Only double precision real and complex scalar types are supported by Cholmod.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_ = Lower>
|
||||
class CholmodSimplicialLDLT : public CholmodBase<MatrixType_, UpLo_, CholmodSimplicialLDLT<MatrixType_, UpLo_> > {
|
||||
typedef CholmodBase<MatrixType_, UpLo_, CholmodSimplicialLDLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<Scalar, Dynamic, 1> VectorType;
|
||||
typedef TriangularView<const MatrixType, Eigen::UnitLower> MatrixL;
|
||||
typedef TriangularView<const typename MatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
|
||||
|
||||
CholmodSimplicialLDLT() : Base() { init(); }
|
||||
|
||||
CholmodSimplicialLDLT(const MatrixType& matrix) : Base() {
|
||||
init();
|
||||
this->compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLDLT() {}
|
||||
|
||||
/** \returns a vector expression of the diagonal D */
|
||||
inline VectorType vectorD() const {
|
||||
auto cholmodL = viewAsEigen<Scalar, StorageIndex>(*Base::m_cholmodFactor);
|
||||
|
||||
VectorType D{cholmodL.rows()};
|
||||
|
||||
for (Index k = 0; k < cholmodL.outerSize(); ++k) {
|
||||
typename decltype(cholmodL)::InnerIterator it{cholmodL, k};
|
||||
D(k) = it.value();
|
||||
}
|
||||
|
||||
return D;
|
||||
}
|
||||
|
||||
/** \returns an expression of the factor L */
|
||||
inline MatrixL matrixL() const { return viewAsEigen<Scalar, StorageIndex>(*Base::m_cholmodFactor); }
|
||||
|
||||
/** \returns an expression of the factor U (= L^*) */
|
||||
inline MatrixU matrixU() const { return matrixL().adjoint(); }
|
||||
|
||||
protected:
|
||||
void init() {
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
|
||||
}
|
||||
};
|
||||
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodSupernodalLLT
|
||||
* \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
|
||||
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non
|
||||
* compressed.
|
||||
*
|
||||
* \warning Only double precision real and complex scalar types are supported by Cholmod.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_ = Lower>
|
||||
class CholmodSupernodalLLT : public CholmodBase<MatrixType_, UpLo_, CholmodSupernodalLLT<MatrixType_, UpLo_> > {
|
||||
typedef CholmodBase<MatrixType_, UpLo_, CholmodSupernodalLLT> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
|
||||
CholmodSupernodalLLT() : Base() { init(); }
|
||||
|
||||
CholmodSupernodalLLT(const MatrixType& matrix) : Base() {
|
||||
init();
|
||||
this->compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSupernodalLLT() {}
|
||||
|
||||
/** \returns an expression of the factor L */
|
||||
inline MatrixType matrixL() const {
|
||||
// Convert Cholmod factor's supernodal storage format to Eigen's CSC storage format
|
||||
cholmod_sparse* cholmodL = internal::cm_factor_to_sparse(*Base::m_cholmodFactor, m_cholmod);
|
||||
MatrixType L = viewAsEigen<Scalar, StorageIndex>(*cholmodL);
|
||||
internal::cm_free_sparse<StorageIndex>(cholmodL, m_cholmod);
|
||||
|
||||
return L;
|
||||
}
|
||||
|
||||
/** \returns an expression of the factor U (= L^*) */
|
||||
inline MatrixType matrixU() const { return matrixL().adjoint(); }
|
||||
|
||||
protected:
|
||||
void init() {
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
|
||||
}
|
||||
};
|
||||
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodDecomposition
|
||||
* \brief A general Cholesky factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
|
||||
* using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* This variant permits to change the underlying Cholesky method at runtime.
|
||||
* On the other hand, it does not provide access to the result of the factorization.
|
||||
* The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.
|
||||
*
|
||||
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
* \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
*
|
||||
* \implsparsesolverconcept
|
||||
*
|
||||
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non
|
||||
* compressed.
|
||||
*
|
||||
* \warning Only double precision real and complex scalar types are supported by Cholmod.
|
||||
*
|
||||
* \sa \ref TutorialSparseSolverConcept
|
||||
*/
|
||||
template <typename MatrixType_, int UpLo_ = Lower>
|
||||
class CholmodDecomposition : public CholmodBase<MatrixType_, UpLo_, CholmodDecomposition<MatrixType_, UpLo_> > {
|
||||
typedef CholmodBase<MatrixType_, UpLo_, CholmodDecomposition> Base;
|
||||
using Base::m_cholmod;
|
||||
|
||||
public:
|
||||
typedef MatrixType_ MatrixType;
|
||||
|
||||
CholmodDecomposition() : Base() { init(); }
|
||||
|
||||
CholmodDecomposition(const MatrixType& matrix) : Base() {
|
||||
init();
|
||||
this->compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodDecomposition() {}
|
||||
|
||||
void setMode(CholmodMode mode) {
|
||||
switch (mode) {
|
||||
case CholmodAuto:
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_AUTO;
|
||||
break;
|
||||
case CholmodSimplicialLLt:
|
||||
m_cholmod.final_asis = 0;
|
||||
m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
|
||||
m_cholmod.final_ll = 1;
|
||||
break;
|
||||
case CholmodSupernodalLLt:
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
|
||||
break;
|
||||
case CholmodLDLt:
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
void init() {
|
||||
m_cholmod.final_asis = 1;
|
||||
m_cholmod.supernodal = CHOLMOD_AUTO;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CHOLMODSUPPORT_H
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
|
||||
#error "Please include Eigen/CholmodSupport instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
@@ -0,0 +1,239 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ARITHMETIC_SEQUENCE_H
|
||||
#define EIGEN_ARITHMETIC_SEQUENCE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Helper to cleanup the type of the increment:
|
||||
template <typename T>
|
||||
struct cleanup_seq_incr {
|
||||
typedef typename cleanup_index_type<T, DynamicIndex>::type type;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
// seq(first,last,incr) and seqN(first,size,incr)
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
template <typename FirstType = Index, typename SizeType = Index, typename IncrType = internal::FixedInt<1> >
|
||||
class ArithmeticSequence;
|
||||
|
||||
template <typename FirstType, typename SizeType, typename IncrType>
|
||||
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
|
||||
typename internal::cleanup_index_type<SizeType>::type,
|
||||
typename internal::cleanup_seq_incr<IncrType>::type>
|
||||
seqN(FirstType first, SizeType size, IncrType incr);
|
||||
|
||||
/** \class ArithmeticSequence
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* This class represents an arithmetic progression \f$ a_0, a_1, a_2, ..., a_{n-1}\f$ defined by
|
||||
* its \em first value \f$ a_0 \f$, its \em size (aka length) \em n, and the \em increment (aka stride)
|
||||
* that is equal to \f$ a_{i+1}-a_{i}\f$ for any \em i.
|
||||
*
|
||||
* It is internally used as the return type of the Eigen::seq and Eigen::seqN functions, and as the input arguments
|
||||
* of DenseBase::operator()(const RowIndices&, const ColIndices&), and most of the time this is the
|
||||
* only way it is used.
|
||||
*
|
||||
* \tparam FirstType type of the first element, usually an Index,
|
||||
* but internally it can be a symbolic expression
|
||||
* \tparam SizeType type representing the size of the sequence, usually an Index
|
||||
* or a compile time integral constant. Internally, it can also be a symbolic expression
|
||||
* \tparam IncrType type of the increment, can be a runtime Index, or a compile time integral constant (default is
|
||||
* compile-time 1)
|
||||
*
|
||||
* \sa Eigen::seq, Eigen::seqN, DenseBase::operator()(const RowIndices&, const ColIndices&), class IndexedView
|
||||
*/
|
||||
template <typename FirstType, typename SizeType, typename IncrType>
|
||||
class ArithmeticSequence {
|
||||
public:
|
||||
constexpr ArithmeticSequence() = default;
|
||||
constexpr ArithmeticSequence(FirstType first, SizeType size) : m_first(first), m_size(size) {}
|
||||
constexpr ArithmeticSequence(FirstType first, SizeType size, IncrType incr)
|
||||
: m_first(first), m_size(size), m_incr(incr) {}
|
||||
|
||||
enum {
|
||||
// SizeAtCompileTime = internal::get_fixed_value<SizeType>::value,
|
||||
IncrAtCompileTime = internal::get_fixed_value<IncrType, DynamicIndex>::value
|
||||
};
|
||||
|
||||
/** \returns the size, i.e., number of elements, of the sequence */
|
||||
constexpr Index size() const { return m_size; }
|
||||
|
||||
/** \returns the first element \f$ a_0 \f$ in the sequence */
|
||||
constexpr Index first() const { return m_first; }
|
||||
|
||||
/** \returns the value \f$ a_i \f$ at index \a i in the sequence. */
|
||||
constexpr Index operator[](Index i) const { return m_first + i * m_incr; }
|
||||
|
||||
constexpr const FirstType& firstObject() const { return m_first; }
|
||||
constexpr const SizeType& sizeObject() const { return m_size; }
|
||||
constexpr const IncrType& incrObject() const { return m_incr; }
|
||||
|
||||
protected:
|
||||
FirstType m_first;
|
||||
SizeType m_size;
|
||||
IncrType m_incr;
|
||||
|
||||
public:
|
||||
constexpr auto reverse() const -> decltype(Eigen::seqN(m_first + (m_size + fix<-1>()) * m_incr, m_size, -m_incr)) {
|
||||
return seqN(m_first + (m_size + fix<-1>()) * m_incr, m_size, -m_incr);
|
||||
}
|
||||
};
|
||||
|
||||
/** \returns an ArithmeticSequence starting at \a first, of length \a size, and increment \a incr
|
||||
*
|
||||
* \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */
|
||||
template <typename FirstType, typename SizeType, typename IncrType>
|
||||
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
|
||||
typename internal::cleanup_index_type<SizeType>::type,
|
||||
typename internal::cleanup_seq_incr<IncrType>::type>
|
||||
seqN(FirstType first, SizeType size, IncrType incr) {
|
||||
return ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
|
||||
typename internal::cleanup_index_type<SizeType>::type,
|
||||
typename internal::cleanup_seq_incr<IncrType>::type>(first, size, incr);
|
||||
}
|
||||
|
||||
/** \returns an ArithmeticSequence starting at \a first, of length \a size, and unit increment
|
||||
*
|
||||
* \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */
|
||||
template <typename FirstType, typename SizeType>
|
||||
ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
|
||||
typename internal::cleanup_index_type<SizeType>::type>
|
||||
seqN(FirstType first, SizeType size) {
|
||||
return ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,
|
||||
typename internal::cleanup_index_type<SizeType>::type>(first, size);
|
||||
}
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and with positive (or negative) increment \a
|
||||
* incr
|
||||
*
|
||||
* It is essentially an alias to:
|
||||
* \code
|
||||
* seqN(f, (l-f+incr)/incr, incr);
|
||||
* \endcode
|
||||
*
|
||||
* \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType)
|
||||
*/
|
||||
template <typename FirstType, typename LastType, typename IncrType>
|
||||
auto seq(FirstType f, LastType l, IncrType incr);
|
||||
|
||||
/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and unit increment
|
||||
*
|
||||
* It is essentially an alias to:
|
||||
* \code
|
||||
* seqN(f,l-f+1);
|
||||
* \endcode
|
||||
*
|
||||
* \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType)
|
||||
*/
|
||||
template <typename FirstType, typename LastType>
|
||||
auto seq(FirstType f, LastType l);
|
||||
|
||||
#else // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
template <typename FirstType, typename LastType>
|
||||
auto seq(FirstType f, LastType l)
|
||||
-> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),
|
||||
(typename internal::cleanup_index_type<LastType>::type(l) -
|
||||
typename internal::cleanup_index_type<FirstType>::type(f) + fix<1>()))) {
|
||||
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
|
||||
(typename internal::cleanup_index_type<LastType>::type(l) -
|
||||
typename internal::cleanup_index_type<FirstType>::type(f) + fix<1>()));
|
||||
}
|
||||
|
||||
template <typename FirstType, typename LastType, typename IncrType>
|
||||
auto seq(FirstType f, LastType l, IncrType incr)
|
||||
-> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),
|
||||
(typename internal::cleanup_index_type<LastType>::type(l) -
|
||||
typename internal::cleanup_index_type<FirstType>::type(f) +
|
||||
typename internal::cleanup_seq_incr<IncrType>::type(incr)) /
|
||||
typename internal::cleanup_seq_incr<IncrType>::type(incr),
|
||||
typename internal::cleanup_seq_incr<IncrType>::type(incr))) {
|
||||
typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;
|
||||
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
|
||||
(typename internal::cleanup_index_type<LastType>::type(l) -
|
||||
typename internal::cleanup_index_type<FirstType>::type(f) + CleanedIncrType(incr)) /
|
||||
CleanedIncrType(incr),
|
||||
CleanedIncrType(incr));
|
||||
}
|
||||
|
||||
#endif // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
namespace placeholders {
|
||||
|
||||
/** \cpp11
|
||||
* \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr.
|
||||
*
|
||||
* It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode
|
||||
*
|
||||
* \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */
|
||||
template <typename SizeType, typename IncrType>
|
||||
auto lastN(SizeType size, IncrType incr)
|
||||
-> decltype(seqN(Eigen::placeholders::last - (size - fix<1>()) * incr, size, incr)) {
|
||||
return seqN(Eigen::placeholders::last - (size - fix<1>()) * incr, size, incr);
|
||||
}
|
||||
|
||||
/** \cpp11
|
||||
* \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment.
|
||||
*
|
||||
* It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode
|
||||
*
|
||||
* \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */
|
||||
template <typename SizeType>
|
||||
auto lastN(SizeType size) -> decltype(seqN(Eigen::placeholders::last + fix<1>() - size, size)) {
|
||||
return seqN(Eigen::placeholders::last + fix<1>() - size, size);
|
||||
}
|
||||
|
||||
} // namespace placeholders
|
||||
|
||||
/** \namespace Eigen::indexing
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* The sole purpose of this namespace is to be able to import all functions
|
||||
* and symbols that are expected to be used within operator() for indexing
|
||||
* and slicing. If you already imported the whole Eigen namespace:
|
||||
* \code using namespace Eigen; \endcode
|
||||
* then you are already all set. Otherwise, if you don't want/cannot import
|
||||
* the whole Eigen namespace, the following line:
|
||||
* \code using namespace Eigen::indexing; \endcode
|
||||
* is equivalent to:
|
||||
* \code
|
||||
using Eigen::fix;
|
||||
using Eigen::seq;
|
||||
using Eigen::seqN;
|
||||
using Eigen::placeholders::all;
|
||||
using Eigen::placeholders::last;
|
||||
using Eigen::placeholders::lastN; // c++11 only
|
||||
using Eigen::placeholders::lastp1;
|
||||
\endcode
|
||||
*/
|
||||
namespace indexing {
|
||||
using Eigen::fix;
|
||||
using Eigen::seq;
|
||||
using Eigen::seqN;
|
||||
using Eigen::placeholders::all;
|
||||
using Eigen::placeholders::last;
|
||||
using Eigen::placeholders::lastN;
|
||||
using Eigen::placeholders::lastp1;
|
||||
} // namespace indexing
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ARITHMETIC_SEQUENCE_H
|
||||
376
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Array.h
Normal file
376
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Array.h
Normal file
@@ -0,0 +1,376 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ARRAY_H
|
||||
#define EIGEN_ARRAY_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
|
||||
struct traits<Array<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>>
|
||||
: traits<Matrix<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
|
||||
typedef ArrayXpr XprKind;
|
||||
typedef ArrayBase<Array<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> XprBase;
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class Array
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief General-purpose arrays with easy API for coefficient-wise operations
|
||||
*
|
||||
* The %Array class is very similar to the Matrix class. It provides
|
||||
* general-purpose one- and two-dimensional arrays. The difference between the
|
||||
* %Array and the %Matrix class is primarily in the API: the API for the
|
||||
* %Array class provides easy access to coefficient-wise operations, while the
|
||||
* API for the %Matrix class provides easy access to linear-algebra
|
||||
* operations.
|
||||
*
|
||||
* See documentation of class Matrix for detailed information on the template parameters
|
||||
* storage layout.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN.
|
||||
*
|
||||
* \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
|
||||
class Array : public PlainObjectBase<Array<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
|
||||
public:
|
||||
typedef PlainObjectBase<Array> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Array)
|
||||
|
||||
enum { Options = Options_ };
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
protected:
|
||||
template <typename Derived, typename OtherDerived, bool IsVector>
|
||||
friend struct internal::conservative_resize_like_impl;
|
||||
|
||||
using Base::m_storage;
|
||||
|
||||
public:
|
||||
using Base::base;
|
||||
using Base::coeff;
|
||||
using Base::coeffRef;
|
||||
|
||||
/**
|
||||
* The usage of
|
||||
* using Base::operator=;
|
||||
* fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
|
||||
* the usage of 'using'. This should be done only for operator=.
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived>& other) {
|
||||
return Base::operator=(other);
|
||||
}
|
||||
|
||||
/** Set all the entries to \a value.
|
||||
* \sa DenseBase::setConstant(), DenseBase::fill()
|
||||
*/
|
||||
/* This overload is needed because the usage of
|
||||
* using Base::operator=;
|
||||
* fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
|
||||
* the usage of 'using'. This should be done only for operator=.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Scalar& value) {
|
||||
Base::setConstant(value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Copies the value of the expression \a other into \c *this with automatic resizing.
|
||||
*
|
||||
* *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
|
||||
* it will be initialized.
|
||||
*
|
||||
* Note that copying a row-vector into a vector (and conversely) is allowed.
|
||||
* The resizing, if any, is then done in the appropriate way so that row-vectors
|
||||
* remain row-vectors and vectors remain vectors.
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const DenseBase<OtherDerived>& other) {
|
||||
return Base::_set(other);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Assigns arrays to each other.
|
||||
*
|
||||
* \note This is a special case of the templated operator=. Its purpose is
|
||||
* to prevent a default operator= from hiding the templated operator=.
|
||||
*
|
||||
* \callgraph
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); }
|
||||
|
||||
/** Default constructor.
|
||||
*
|
||||
* For fixed-size matrices, does nothing.
|
||||
*
|
||||
* For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
|
||||
* is called a null matrix. This constructor is the unique way to create null matrices: resizing
|
||||
* a matrix to 0 is not supported.
|
||||
*
|
||||
* \sa resize(Index,Index)
|
||||
*/
|
||||
#ifdef EIGEN_INITIALIZE_COEFFS
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array() : Base() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array() = default;
|
||||
#endif
|
||||
/** \brief Move constructor */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array(Array&&) = default;
|
||||
EIGEN_DEVICE_FUNC Array& operator=(Array&& other) noexcept(std::is_nothrow_move_assignable<Scalar>::value) {
|
||||
Base::operator=(std::move(other));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This constructor is for 1D array or vectors with more than 4 coefficients.
|
||||
*
|
||||
* \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
|
||||
* constructor must match the the fixed number of rows (resp. columns) of \c *this.
|
||||
*
|
||||
*
|
||||
* Example: \include Array_variadic_ctor_cxx11.cpp
|
||||
* Output: \verbinclude Array_variadic_ctor_cxx11.out
|
||||
*
|
||||
* \sa Array(const std::initializer_list<std::initializer_list<Scalar>>&)
|
||||
* \sa Array(const Scalar&), Array(const Scalar&,const Scalar&)
|
||||
*/
|
||||
template <typename... ArgTypes>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3,
|
||||
const ArgTypes&... args)
|
||||
: Base(a0, a1, a2, a3, args...) {}
|
||||
|
||||
/** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row.
|
||||
* \cpp11
|
||||
*
|
||||
* In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
|
||||
*
|
||||
* Example: \include Array_initializer_list_23_cxx11.cpp
|
||||
* Output: \verbinclude Array_initializer_list_23_cxx11.out
|
||||
*
|
||||
* Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is
|
||||
* triggered.
|
||||
*
|
||||
* In the case of a compile-time column 1D array, implicit transposition from a single row is allowed.
|
||||
* Therefore <code> Array<int,Dynamic,1>{{1,2,3,4,5}}</code> is legal and the more verbose syntax
|
||||
* <code>Array<int,Dynamic,1>{{1},{2},{3},{4},{5}}</code> can be avoided:
|
||||
*
|
||||
* Example: \include Array_initializer_list_vector_cxx11.cpp
|
||||
* Output: \verbinclude Array_initializer_list_vector_cxx11.out
|
||||
*
|
||||
* In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes,
|
||||
* and implicit transposition is allowed for compile-time 1D arrays only.
|
||||
*
|
||||
* \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array(
|
||||
const std::initializer_list<std::initializer_list<Scalar>>& list)
|
||||
: Base(list) {}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename T>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(const T& x) {
|
||||
Base::template _init1<T>(x);
|
||||
}
|
||||
|
||||
template <typename T0, typename T1>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) {
|
||||
this->template _init2<T0, T1>(val0, val1);
|
||||
}
|
||||
|
||||
#else
|
||||
/** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */
|
||||
EIGEN_DEVICE_FUNC explicit Array(const Scalar* data);
|
||||
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
||||
* it is redundant to pass the dimension here, so it makes more sense to use the default
|
||||
* constructor Array() instead.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(Index dim);
|
||||
/** constructs an initialized 1x1 Array with the given coefficient
|
||||
* \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */
|
||||
Array(const Scalar& value);
|
||||
/** constructs an uninitialized array with \a rows rows and \a cols columns.
|
||||
*
|
||||
* This is useful for dynamic-size arrays. For fixed-size arrays,
|
||||
* it is redundant to pass these parameters, so one should use the default constructor
|
||||
* Array() instead. */
|
||||
Array(Index rows, Index cols);
|
||||
/** constructs an initialized 2D vector with given coefficients
|
||||
* \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */
|
||||
Array(const Scalar& val0, const Scalar& val1);
|
||||
#endif // end EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** constructs an initialized 3D vector with given coefficients
|
||||
* \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
|
||||
m_storage.data()[0] = val0;
|
||||
m_storage.data()[1] = val1;
|
||||
m_storage.data()[2] = val2;
|
||||
}
|
||||
/** constructs an initialized 4D vector with given coefficients
|
||||
* \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2,
|
||||
const Scalar& val3) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
|
||||
m_storage.data()[0] = val0;
|
||||
m_storage.data()[1] = val1;
|
||||
m_storage.data()[2] = val2;
|
||||
m_storage.data()[3] = val3;
|
||||
}
|
||||
|
||||
/** Copy constructor */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Array(const Array&) = default;
|
||||
|
||||
private:
|
||||
struct PrivateType {};
|
||||
|
||||
public:
|
||||
/** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(
|
||||
const EigenBase<OtherDerived>& other,
|
||||
std::enable_if_t<internal::is_convertible<typename OtherDerived::Scalar, Scalar>::value, PrivateType> =
|
||||
PrivateType())
|
||||
: Base(other.derived()) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return 1; }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return this->innerSize(); }
|
||||
|
||||
#ifdef EIGEN_ARRAY_PLUGIN
|
||||
#include EIGEN_ARRAY_PLUGIN
|
||||
#endif
|
||||
|
||||
private:
|
||||
template <typename MatrixType, typename OtherDerived, bool SwapPointers>
|
||||
friend struct internal::matrix_swap_impl;
|
||||
};
|
||||
|
||||
/** \defgroup arraytypedefs Global array typedefs
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* %Eigen defines several typedef shortcuts for most common 1D and 2D array types.
|
||||
*
|
||||
* The general patterns are the following:
|
||||
*
|
||||
* \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for
|
||||
* dynamic size, and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c
|
||||
* cd for complex double.
|
||||
*
|
||||
* For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of
|
||||
* floats.
|
||||
*
|
||||
* There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
|
||||
* a fixed-size 1D array of 4 complex floats.
|
||||
*
|
||||
* With \cpp11, template alias are also defined for common sizes.
|
||||
* They follow the same pattern as above except that the scalar type suffix is replaced by a
|
||||
* template parameter, i.e.:
|
||||
* - `ArrayRowsCols<Type>` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size.
|
||||
* - `ArraySize<Type>` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays.
|
||||
*
|
||||
* \sa class Array
|
||||
*/
|
||||
|
||||
#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix; \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
typedef Array<Type, Size, 1> Array##SizeSuffix##TypeSuffix;
|
||||
|
||||
#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix; \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;
|
||||
|
||||
#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
|
||||
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
|
||||
|
||||
#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
|
||||
#undef EIGEN_MAKE_ARRAY_TYPEDEFS
|
||||
#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
|
||||
|
||||
#define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
/** \brief \cpp11 */ \
|
||||
template <typename Type> \
|
||||
using Array##SizeSuffix##SizeSuffix = Array<Type, Size, Size>; \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
/** \brief \cpp11 */ \
|
||||
template <typename Type> \
|
||||
using Array##SizeSuffix = Array<Type, Size, 1>;
|
||||
|
||||
#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
/** \brief \cpp11 */ \
|
||||
template <typename Type> \
|
||||
using Array##Size##X = Array<Type, Size, Dynamic>; \
|
||||
/** \ingroup arraytypedefs */ \
|
||||
/** \brief \cpp11 */ \
|
||||
template <typename Type> \
|
||||
using Array##X##Size = Array<Type, Dynamic, Size>;
|
||||
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4)
|
||||
EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X)
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2)
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3)
|
||||
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4)
|
||||
|
||||
#undef EIGEN_MAKE_ARRAY_TYPEDEFS
|
||||
#undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS
|
||||
|
||||
#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
|
||||
using Eigen::Matrix##SizeSuffix##TypeSuffix; \
|
||||
using Eigen::Vector##SizeSuffix##TypeSuffix; \
|
||||
using Eigen::RowVector##SizeSuffix##TypeSuffix;
|
||||
|
||||
#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X)
|
||||
|
||||
#define EIGEN_USING_ARRAY_TYPEDEFS \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \
|
||||
EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ARRAY_H
|
||||
213
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ArrayBase.h
Normal file
213
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ArrayBase.h
Normal file
@@ -0,0 +1,213 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ARRAYBASE_H
|
||||
#define EIGEN_ARRAYBASE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename ExpressionType>
|
||||
class MatrixWrapper;
|
||||
|
||||
/** \class ArrayBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for all 1D and 2D array, and related expressions
|
||||
*
|
||||
* An array is similar to a dense vector or matrix. While matrices are mathematical
|
||||
* objects with well defined linear algebra operators, an array is just a collection
|
||||
* of scalar values arranged in a one or two dimensional fashion. As the main consequence,
|
||||
* all operations applied to an array are performed coefficient wise. Furthermore,
|
||||
* arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient
|
||||
* constructors allowing to easily write generic code working for both scalar values
|
||||
* and arrays.
|
||||
*
|
||||
* This class is the base that is inherited by all array expression types.
|
||||
*
|
||||
* \tparam Derived is the derived type, e.g., an array or an expression type.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN.
|
||||
*
|
||||
* \sa class MatrixBase, \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class ArrayBase : public DenseBase<Derived> {
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** The base class for a given storage type. */
|
||||
typedef ArrayBase StorageBaseType;
|
||||
|
||||
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
typedef DenseBase<Derived> Base;
|
||||
using Base::ColsAtCompileTime;
|
||||
using Base::Flags;
|
||||
using Base::IsVectorAtCompileTime;
|
||||
using Base::MaxColsAtCompileTime;
|
||||
using Base::MaxRowsAtCompileTime;
|
||||
using Base::MaxSizeAtCompileTime;
|
||||
using Base::RowsAtCompileTime;
|
||||
using Base::SizeAtCompileTime;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::coeffRef;
|
||||
using Base::cols;
|
||||
using Base::const_cast_derived;
|
||||
using Base::derived;
|
||||
using Base::lazyAssign;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
using Base::operator-;
|
||||
using Base::operator=;
|
||||
using Base::operator+=;
|
||||
using Base::operator-=;
|
||||
using Base::operator*=;
|
||||
using Base::operator/=;
|
||||
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase
|
||||
#define EIGEN_DOC_UNARY_ADDONS(X, Y)
|
||||
#include "../plugins/MatrixCwiseUnaryOps.inc"
|
||||
#include "../plugins/ArrayCwiseUnaryOps.inc"
|
||||
#include "../plugins/CommonCwiseBinaryOps.inc"
|
||||
#include "../plugins/MatrixCwiseBinaryOps.inc"
|
||||
#include "../plugins/ArrayCwiseBinaryOps.inc"
|
||||
#ifdef EIGEN_ARRAYBASE_PLUGIN
|
||||
#include EIGEN_ARRAYBASE_PLUGIN
|
||||
#endif
|
||||
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
|
||||
#undef EIGEN_DOC_UNARY_ADDONS
|
||||
|
||||
/** Special case of the template operator=, in order to prevent the compiler
|
||||
* from generating a default operator= (issue hit with g++ 4.1)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Set all the entries to \a value.
|
||||
* \sa DenseBase::setConstant(), DenseBase::fill() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar& value) {
|
||||
Base::setConstant(value);
|
||||
return derived();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& other) {
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(), cols(), other),
|
||||
internal::add_assign_op<Scalar, Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& other) {
|
||||
internal::call_assignment(this->derived(), PlainObject::Constant(rows(), cols(), other),
|
||||
internal::sub_assign_op<Scalar, Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this + \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this - \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other coefficient wise.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this / \a other coefficient wise.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC ArrayBase<Derived>& array() { return *this; }
|
||||
EIGEN_DEVICE_FUNC const ArrayBase<Derived>& array() const { return *this; }
|
||||
|
||||
/** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array
|
||||
* \sa MatrixBase::array() */
|
||||
EIGEN_DEVICE_FUNC MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); }
|
||||
EIGEN_DEVICE_FUNC const MatrixWrapper<const Derived> matrix() const {
|
||||
return MatrixWrapper<const Derived>(derived());
|
||||
}
|
||||
|
||||
// template<typename Dest>
|
||||
// inline void evalTo(Dest& dst) const { dst = matrix(); }
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase)
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase)
|
||||
|
||||
private:
|
||||
explicit ArrayBase(Index);
|
||||
ArrayBase(Index, Index);
|
||||
template <typename OtherDerived>
|
||||
explicit ArrayBase(const ArrayBase<OtherDerived>&);
|
||||
|
||||
protected:
|
||||
// mixing arrays and matrices is not legal
|
||||
template <typename OtherDerived>
|
||||
Derived& operator+=(const MatrixBase<OtherDerived>&) {
|
||||
EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
|
||||
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
|
||||
return *this;
|
||||
}
|
||||
// mixing arrays and matrices is not legal
|
||||
template <typename OtherDerived>
|
||||
Derived& operator-=(const MatrixBase<OtherDerived>&) {
|
||||
EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
|
||||
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ARRAYBASE_H
|
||||
165
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ArrayWrapper.h
Normal file
165
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ArrayWrapper.h
Normal file
@@ -0,0 +1,165 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ARRAYWRAPPER_H
|
||||
#define EIGEN_ARRAYWRAPPER_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class ArrayWrapper
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of a mathematical vector or matrix as an array object
|
||||
*
|
||||
* This class is the return type of MatrixBase::array(), and most of the time
|
||||
* this is the only way it is use.
|
||||
*
|
||||
* \sa MatrixBase::array(), class MatrixWrapper
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename ExpressionType>
|
||||
struct traits<ArrayWrapper<ExpressionType> > : public traits<remove_all_t<typename ExpressionType::Nested> > {
|
||||
typedef ArrayXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<remove_all_t<typename ExpressionType::Nested> >::Flags,
|
||||
LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
|
||||
Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <typename ExpressionType>
|
||||
class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> > {
|
||||
public:
|
||||
typedef ArrayBase<ArrayWrapper> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
|
||||
typedef internal::remove_all_t<ExpressionType> NestedExpression;
|
||||
|
||||
typedef std::conditional_t<internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar>
|
||||
ScalarWithConstIfNotLvalue;
|
||||
|
||||
typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
|
||||
|
||||
using Base::coeffRef;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_expression.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_expression.cols(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return m_expression.outerStride(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return m_expression.innerStride(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
|
||||
EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_expression.data(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
|
||||
return m_expression.coeffRef(rowId, colId);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); }
|
||||
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const {
|
||||
dst = m_expression;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC const internal::remove_all_t<NestedExpressionType>& nestedExpression() const {
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index) */
|
||||
EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); }
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index,Index)*/
|
||||
EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); }
|
||||
|
||||
protected:
|
||||
NestedExpressionType m_expression;
|
||||
};
|
||||
|
||||
/** \class MatrixWrapper
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of an array as a mathematical vector or matrix
|
||||
*
|
||||
* This class is the return type of ArrayBase::matrix(), and most of the time
|
||||
* this is the only way it is use.
|
||||
*
|
||||
* \sa MatrixBase::matrix(), class ArrayWrapper
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename ExpressionType>
|
||||
struct traits<MatrixWrapper<ExpressionType> > : public traits<remove_all_t<typename ExpressionType::Nested> > {
|
||||
typedef MatrixXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<remove_all_t<typename ExpressionType::Nested> >::Flags,
|
||||
LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
|
||||
Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <typename ExpressionType>
|
||||
class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> > {
|
||||
public:
|
||||
typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
|
||||
typedef internal::remove_all_t<ExpressionType> NestedExpression;
|
||||
|
||||
typedef std::conditional_t<internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar>
|
||||
ScalarWithConstIfNotLvalue;
|
||||
|
||||
typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
|
||||
|
||||
using Base::coeffRef;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_expression.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_expression.cols(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return m_expression.outerStride(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return m_expression.innerStride(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
|
||||
EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_expression.data(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
|
||||
return m_expression.derived().coeffRef(rowId, colId);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const internal::remove_all_t<NestedExpressionType>& nestedExpression() const {
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index) */
|
||||
EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); }
|
||||
/** Forwards the resizing request to the nested expression
|
||||
* \sa DenseBase::resize(Index,Index)*/
|
||||
EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); }
|
||||
|
||||
protected:
|
||||
NestedExpressionType m_expression;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ARRAYWRAPPER_H
|
||||
80
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Assign.h
Normal file
80
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Assign.h
Normal file
@@ -0,0 +1,80 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
|
||||
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ASSIGN_H
|
||||
#define EIGEN_ASSIGN_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::lazyAssign(const DenseBase<OtherDerived>& other) {
|
||||
enum { SameType = internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value };
|
||||
|
||||
EIGEN_STATIC_ASSERT_LVALUE(Derived)
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived, OtherDerived)
|
||||
EIGEN_STATIC_ASSERT(
|
||||
SameType,
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
|
||||
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
||||
internal::call_assignment_no_alias(derived(), other.derived());
|
||||
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other) {
|
||||
internal::call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(
|
||||
const ReturnByValue<OtherDerived>& other) {
|
||||
other.derived().evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ASSIGN_H
|
||||
1057
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/AssignEvaluator.h
Normal file
1057
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/AssignEvaluator.h
Normal file
File diff suppressed because it is too large
Load Diff
183
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Assign_MKL.h
Normal file
183
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Assign_MKL.h
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
||||
Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
********************************************************************************
|
||||
* Content : Eigen bindings to Intel(R) MKL
|
||||
* MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
|
||||
********************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef EIGEN_ASSIGN_VML_H
|
||||
#define EIGEN_ASSIGN_VML_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Dst, typename Src>
|
||||
class vml_assign_traits {
|
||||
private:
|
||||
enum {
|
||||
DstHasDirectAccess = Dst::Flags & DirectAccessBit,
|
||||
SrcHasDirectAccess = Src::Flags & DirectAccessBit,
|
||||
StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),
|
||||
InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)
|
||||
: int(Dst::Flags) & RowMajorBit ? int(Dst::ColsAtCompileTime)
|
||||
: int(Dst::RowsAtCompileTime),
|
||||
InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
|
||||
: int(Dst::Flags) & RowMajorBit ? int(Dst::MaxColsAtCompileTime)
|
||||
: int(Dst::MaxRowsAtCompileTime),
|
||||
MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
|
||||
|
||||
MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess &&
|
||||
Src::InnerStrideAtCompileTime == 1 && Dst::InnerStrideAtCompileTime == 1,
|
||||
MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),
|
||||
VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,
|
||||
LargeEnough = VmlSize == Dynamic || VmlSize >= EIGEN_MKL_VML_THRESHOLD
|
||||
};
|
||||
|
||||
public:
|
||||
enum { EnableVml = MightEnableVml && LargeEnough, Traversal = MightLinearize ? LinearTraversal : DefaultTraversal };
|
||||
};
|
||||
|
||||
#define EIGEN_PP_EXPAND(ARG) ARG
|
||||
#if !defined(EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
|
||||
#define EIGEN_VMLMODE_EXPAND_xLA , VML_HA
|
||||
#else
|
||||
#define EIGEN_VMLMODE_EXPAND_xLA , VML_LA
|
||||
#endif
|
||||
|
||||
#define EIGEN_VMLMODE_EXPAND_x_
|
||||
|
||||
#define EIGEN_VMLMODE_PREFIX_xLA vm
|
||||
#define EIGEN_VMLMODE_PREFIX_x_ v
|
||||
#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x, VMLMODE)
|
||||
|
||||
#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
|
||||
template <typename DstXprType, typename SrcXprNested> \
|
||||
struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, \
|
||||
assign_op<EIGENTYPE, EIGENTYPE>, Dense2Dense, \
|
||||
std::enable_if_t<vml_assign_traits<DstXprType, SrcXprNested>::EnableVml>> { \
|
||||
typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType; \
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE, EIGENTYPE> &func) { \
|
||||
resize_if_allowed(dst, src, func); \
|
||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
|
||||
if (vml_assign_traits<DstXprType, SrcXprNested>::Traversal == (int)LinearTraversal) { \
|
||||
VMLOP(dst.size(), (const VMLTYPE *)src.nestedExpression().data(), \
|
||||
(VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
|
||||
} else { \
|
||||
const Index outerSize = dst.outerSize(); \
|
||||
for (Index outer = 0; outer < outerSize; ++outer) { \
|
||||
const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer, 0)) \
|
||||
: &(src.nestedExpression().coeffRef(0, outer)); \
|
||||
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \
|
||||
VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, \
|
||||
(VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
};
|
||||
|
||||
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), s##VMLOP), float, float, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), d##VMLOP), double, double, VMLMODE)
|
||||
|
||||
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), c##VMLOP), scomplex, \
|
||||
MKL_Complex8, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE), z##VMLOP), dcomplex, \
|
||||
MKL_Complex16, VMLMODE)
|
||||
|
||||
#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE)
|
||||
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin, Sin, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin, Asin, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh, Sinh, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos, Cos, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos, Acos, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh, Cosh, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan, Tan, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan, Atan, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh, Tanh, LA)
|
||||
// EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp, Exp, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log, Ln, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt, Sqrt, _)
|
||||
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _)
|
||||
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(cbrt, Cbrt, _)
|
||||
|
||||
#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \
|
||||
template <typename DstXprType, typename SrcXprNested, typename Plain> \
|
||||
struct Assignment<DstXprType, \
|
||||
CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE, EIGENTYPE>, SrcXprNested, \
|
||||
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>, Plain>>, \
|
||||
assign_op<EIGENTYPE, EIGENTYPE>, Dense2Dense, \
|
||||
std::enable_if_t<vml_assign_traits<DstXprType, SrcXprNested>::EnableVml>> { \
|
||||
typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE, EIGENTYPE>, SrcXprNested, \
|
||||
const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>, Plain>> \
|
||||
SrcXprType; \
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE, EIGENTYPE> &func) { \
|
||||
resize_if_allowed(dst, src, func); \
|
||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \
|
||||
VMLTYPE exponent = reinterpret_cast<const VMLTYPE &>(src.rhs().functor().m_other); \
|
||||
if (vml_assign_traits<DstXprType, SrcXprNested>::Traversal == LinearTraversal) { \
|
||||
VMLOP(dst.size(), (const VMLTYPE *)src.lhs().data(), exponent, \
|
||||
(VMLTYPE *)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
|
||||
} else { \
|
||||
const Index outerSize = dst.outerSize(); \
|
||||
for (Index outer = 0; outer < outerSize; ++outer) { \
|
||||
const EIGENTYPE *src_ptr = \
|
||||
src.IsRowMajor ? &(src.lhs().coeffRef(outer, 0)) : &(src.lhs().coeffRef(0, outer)); \
|
||||
EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer, 0)) : &(dst.coeffRef(0, outer)); \
|
||||
VMLOP(dst.innerSize(), (const VMLTYPE *)src_ptr, exponent, \
|
||||
(VMLTYPE *)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
};
|
||||
|
||||
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float, float, LA)
|
||||
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double, double, LA)
|
||||
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8, LA)
|
||||
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA)
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ASSIGN_VML_H
|
||||
338
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/BandMatrix.h
Normal file
338
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/BandMatrix.h
Normal file
@@ -0,0 +1,338 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_BANDMATRIX_H
|
||||
#define EIGEN_BANDMATRIX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived>
|
||||
class BandMatrixBase : public EigenBase<Derived> {
|
||||
public:
|
||||
enum {
|
||||
Flags = internal::traits<Derived>::Flags,
|
||||
CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
|
||||
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
|
||||
Supers = internal::traits<Derived>::Supers,
|
||||
Subs = internal::traits<Derived>::Subs,
|
||||
Options = internal::traits<Derived>::Options
|
||||
};
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime> DenseMatrixType;
|
||||
typedef typename DenseMatrixType::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
|
||||
typedef EigenBase<Derived> Base;
|
||||
|
||||
protected:
|
||||
enum {
|
||||
DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic,
|
||||
SizeAtCompileTime = min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)
|
||||
};
|
||||
|
||||
public:
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rows;
|
||||
|
||||
/** \returns the number of super diagonals */
|
||||
inline Index supers() const { return derived().supers(); }
|
||||
|
||||
/** \returns the number of sub diagonals */
|
||||
inline Index subs() const { return derived().subs(); }
|
||||
|
||||
/** \returns an expression of the underlying coefficient matrix */
|
||||
inline const CoefficientsType& coeffs() const { return derived().coeffs(); }
|
||||
|
||||
/** \returns an expression of the underlying coefficient matrix */
|
||||
inline CoefficientsType& coeffs() { return derived().coeffs(); }
|
||||
|
||||
/** \returns a vector expression of the \a i -th column,
|
||||
* only the meaningful part is returned.
|
||||
* \warning the internal storage must be column major. */
|
||||
inline Block<CoefficientsType, Dynamic, 1> col(Index i) {
|
||||
EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
Index start = 0;
|
||||
Index len = coeffs().rows();
|
||||
if (i <= supers()) {
|
||||
start = supers() - i;
|
||||
len = (std::min)(rows(), std::max<Index>(0, coeffs().rows() - (supers() - i)));
|
||||
} else if (i >= rows() - subs())
|
||||
len = std::max<Index>(0, coeffs().rows() - (i + 1 - rows() + subs()));
|
||||
return Block<CoefficientsType, Dynamic, 1>(coeffs(), start, i, len, 1);
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the main diagonal */
|
||||
inline Block<CoefficientsType, 1, SizeAtCompileTime> diagonal() {
|
||||
return Block<CoefficientsType, 1, SizeAtCompileTime>(coeffs(), supers(), 0, 1, (std::min)(rows(), cols()));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the main diagonal (const version) */
|
||||
inline const Block<const CoefficientsType, 1, SizeAtCompileTime> diagonal() const {
|
||||
return Block<const CoefficientsType, 1, SizeAtCompileTime>(coeffs(), supers(), 0, 1, (std::min)(rows(), cols()));
|
||||
}
|
||||
|
||||
template <int Index>
|
||||
struct DiagonalIntReturnType {
|
||||
enum {
|
||||
ReturnOpposite =
|
||||
(int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)),
|
||||
Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,
|
||||
ActualIndex = ReturnOpposite ? -Index : Index,
|
||||
DiagonalSize =
|
||||
(RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic)
|
||||
? Dynamic
|
||||
: (ActualIndex < 0 ? min_size_prefer_dynamic(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
|
||||
: min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
|
||||
};
|
||||
typedef Block<CoefficientsType, 1, DiagonalSize> BuildType;
|
||||
typedef std::conditional_t<Conjugate, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, BuildType>, BuildType>
|
||||
Type;
|
||||
};
|
||||
|
||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template <int N>
|
||||
inline typename DiagonalIntReturnType<N>::Type diagonal() {
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template <int N>
|
||||
inline const typename DiagonalIntReturnType<N>::Type diagonal() const {
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
||||
inline Block<CoefficientsType, 1, Dynamic> diagonal(Index i) {
|
||||
eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers()));
|
||||
return Block<CoefficientsType, 1, Dynamic>(coeffs(), supers() - i, std::max<Index>(0, i), 1, diagonalLength(i));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
||||
inline const Block<const CoefficientsType, 1, Dynamic> diagonal(Index i) const {
|
||||
eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers()));
|
||||
return Block<const CoefficientsType, 1, Dynamic>(coeffs(), supers() - i, std::max<Index>(0, i), 1,
|
||||
diagonalLength(i));
|
||||
}
|
||||
|
||||
template <typename Dest>
|
||||
inline void evalTo(Dest& dst) const {
|
||||
dst.resize(rows(), cols());
|
||||
dst.setZero();
|
||||
dst.diagonal() = diagonal();
|
||||
for (Index i = 1; i <= supers(); ++i) dst.diagonal(i) = diagonal(i);
|
||||
for (Index i = 1; i <= subs(); ++i) dst.diagonal(-i) = diagonal(-i);
|
||||
}
|
||||
|
||||
DenseMatrixType toDenseMatrix() const {
|
||||
DenseMatrixType res(rows(), cols());
|
||||
evalTo(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
inline Index diagonalLength(Index i) const {
|
||||
return i < 0 ? (std::min)(cols(), rows() + i) : (std::min)(rows(), cols() - i);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* \class BandMatrix
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Represents a rectangular matrix with a banded storage
|
||||
*
|
||||
* \tparam Scalar_ Numeric type, i.e. float, double, int
|
||||
* \tparam Rows_ Number of rows, or \b Dynamic
|
||||
* \tparam Cols_ Number of columns, or \b Dynamic
|
||||
* \tparam Supers_ Number of super diagonal
|
||||
* \tparam Subs_ Number of sub diagonal
|
||||
* \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
|
||||
* The former controls \ref TopicStorageOrders "storage order", and defaults to
|
||||
* column-major. The latter controls whether the matrix represents a selfadjoint
|
||||
* matrix in which case either Supers of Subs have to be null.
|
||||
*
|
||||
* \sa class TridiagonalMatrix
|
||||
*/
|
||||
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
struct traits<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> > {
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef Eigen::Index StorageIndex;
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
RowsAtCompileTime = Rows_,
|
||||
ColsAtCompileTime = Cols_,
|
||||
MaxRowsAtCompileTime = Rows_,
|
||||
MaxColsAtCompileTime = Cols_,
|
||||
Flags = LvalueBit,
|
||||
Supers = Supers_,
|
||||
Subs = Subs_,
|
||||
Options = Options_,
|
||||
DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic
|
||||
};
|
||||
typedef Matrix<Scalar, DataRowsAtCompileTime, ColsAtCompileTime, int(Options) & int(RowMajor) ? RowMajor : ColMajor>
|
||||
CoefficientsType;
|
||||
};
|
||||
|
||||
template <typename Scalar_, int Rows, int Cols, int Supers, int Subs, int Options>
|
||||
class BandMatrix : public BandMatrixBase<BandMatrix<Scalar_, Rows, Cols, Supers, Subs, Options> > {
|
||||
public:
|
||||
typedef typename internal::traits<BandMatrix>::Scalar Scalar;
|
||||
typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
|
||||
|
||||
explicit inline BandMatrix(Index rows = Rows, Index cols = Cols, Index supers = Supers, Index subs = Subs)
|
||||
: m_coeffs(1 + supers + subs, cols), m_rows(rows), m_supers(supers), m_subs(subs) {}
|
||||
|
||||
/** \returns the number of columns */
|
||||
constexpr Index rows() const { return m_rows.value(); }
|
||||
|
||||
/** \returns the number of rows */
|
||||
constexpr Index cols() const { return m_coeffs.cols(); }
|
||||
|
||||
/** \returns the number of super diagonals */
|
||||
constexpr Index supers() const { return m_supers.value(); }
|
||||
|
||||
/** \returns the number of sub diagonals */
|
||||
constexpr Index subs() const { return m_subs.value(); }
|
||||
|
||||
inline const CoefficientsType& coeffs() const { return m_coeffs; }
|
||||
inline CoefficientsType& coeffs() { return m_coeffs; }
|
||||
|
||||
protected:
|
||||
CoefficientsType m_coeffs;
|
||||
internal::variable_if_dynamic<Index, Rows> m_rows;
|
||||
internal::variable_if_dynamic<Index, Supers> m_supers;
|
||||
internal::variable_if_dynamic<Index, Subs> m_subs;
|
||||
};
|
||||
|
||||
template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
class BandMatrixWrapper;
|
||||
|
||||
template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
struct traits<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
|
||||
typedef typename CoefficientsType_::Scalar Scalar;
|
||||
typedef typename CoefficientsType_::StorageKind StorageKind;
|
||||
typedef typename CoefficientsType_::StorageIndex StorageIndex;
|
||||
enum {
|
||||
CoeffReadCost = internal::traits<CoefficientsType_>::CoeffReadCost,
|
||||
RowsAtCompileTime = Rows_,
|
||||
ColsAtCompileTime = Cols_,
|
||||
MaxRowsAtCompileTime = Rows_,
|
||||
MaxColsAtCompileTime = Cols_,
|
||||
Flags = LvalueBit,
|
||||
Supers = Supers_,
|
||||
Subs = Subs_,
|
||||
Options = Options_,
|
||||
DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic
|
||||
};
|
||||
typedef CoefficientsType_ CoefficientsType;
|
||||
};
|
||||
|
||||
template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
class BandMatrixWrapper
|
||||
: public BandMatrixBase<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
|
||||
public:
|
||||
typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
|
||||
typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
|
||||
typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex;
|
||||
|
||||
explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows = Rows_, Index cols = Cols_,
|
||||
Index supers = Supers_, Index subs = Subs_)
|
||||
: m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) {
|
||||
EIGEN_UNUSED_VARIABLE(cols);
|
||||
// eigen_assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
|
||||
}
|
||||
|
||||
/** \returns the number of columns */
|
||||
constexpr Index rows() const { return m_rows.value(); }
|
||||
|
||||
/** \returns the number of rows */
|
||||
constexpr Index cols() const { return m_coeffs.cols(); }
|
||||
|
||||
/** \returns the number of super diagonals */
|
||||
constexpr Index supers() const { return m_supers.value(); }
|
||||
|
||||
/** \returns the number of sub diagonals */
|
||||
constexpr Index subs() const { return m_subs.value(); }
|
||||
|
||||
inline const CoefficientsType& coeffs() const { return m_coeffs; }
|
||||
|
||||
protected:
|
||||
const CoefficientsType& m_coeffs;
|
||||
internal::variable_if_dynamic<Index, Rows_> m_rows;
|
||||
internal::variable_if_dynamic<Index, Supers_> m_supers;
|
||||
internal::variable_if_dynamic<Index, Subs_> m_subs;
|
||||
};
|
||||
|
||||
/**
|
||||
* \class TridiagonalMatrix
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Represents a tridiagonal matrix with a compact banded storage
|
||||
*
|
||||
* \tparam Scalar Numeric type, i.e. float, double, int
|
||||
* \tparam Size Number of rows and cols, or \b Dynamic
|
||||
* \tparam Options Can be 0 or \b SelfAdjoint
|
||||
*
|
||||
* \sa class BandMatrix
|
||||
*/
|
||||
template <typename Scalar, int Size, int Options>
|
||||
class TridiagonalMatrix : public BandMatrix<Scalar, Size, Size, Options & SelfAdjoint ? 0 : 1, 1, Options | RowMajor> {
|
||||
typedef BandMatrix<Scalar, Size, Size, Options & SelfAdjoint ? 0 : 1, 1, Options | RowMajor> Base;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
explicit TridiagonalMatrix(Index size = Size) : Base(size, size, Options & SelfAdjoint ? 0 : 1, 1) {}
|
||||
|
||||
inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); }
|
||||
inline const typename Base::template DiagonalIntReturnType<1>::Type super() const {
|
||||
return Base::template diagonal<1>();
|
||||
}
|
||||
inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); }
|
||||
inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const {
|
||||
return Base::template diagonal<-1>();
|
||||
}
|
||||
|
||||
protected:
|
||||
};
|
||||
|
||||
struct BandShape {};
|
||||
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
struct evaluator_traits<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> >
|
||||
: public evaluator_traits_base<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> > {
|
||||
typedef BandShape Shape;
|
||||
};
|
||||
|
||||
template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
|
||||
struct evaluator_traits<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> >
|
||||
: public evaluator_traits_base<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
|
||||
typedef BandShape Shape;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct AssignmentKind<DenseShape, BandShape> {
|
||||
typedef EigenBase2EigenBase Kind;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_BANDMATRIX_H
|
||||
429
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Block.h
Normal file
429
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Block.h
Normal file
@@ -0,0 +1,429 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_BLOCK_H
|
||||
#define EIGEN_BLOCK_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename XprType_, int BlockRows, int BlockCols, bool InnerPanel_>
|
||||
struct traits<Block<XprType_, BlockRows, BlockCols, InnerPanel_>> : traits<XprType_> {
|
||||
typedef typename traits<XprType_>::Scalar Scalar;
|
||||
typedef typename traits<XprType_>::StorageKind StorageKind;
|
||||
typedef typename traits<XprType_>::XprKind XprKind;
|
||||
typedef typename ref_selector<XprType_>::type XprTypeNested;
|
||||
typedef std::remove_reference_t<XprTypeNested> XprTypeNested_;
|
||||
enum {
|
||||
MatrixRows = traits<XprType_>::RowsAtCompileTime,
|
||||
MatrixCols = traits<XprType_>::ColsAtCompileTime,
|
||||
RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
|
||||
ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
|
||||
MaxRowsAtCompileTime = BlockRows == 0 ? 0
|
||||
: RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)
|
||||
: int(traits<XprType_>::MaxRowsAtCompileTime),
|
||||
MaxColsAtCompileTime = BlockCols == 0 ? 0
|
||||
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
||||
: int(traits<XprType_>::MaxColsAtCompileTime),
|
||||
|
||||
XprTypeIsRowMajor = (int(traits<XprType_>::Flags) & RowMajorBit) != 0,
|
||||
IsRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1
|
||||
: (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0
|
||||
: XprTypeIsRowMajor,
|
||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType_>::ret)
|
||||
: int(outer_stride_at_compile_time<XprType_>::ret),
|
||||
OuterStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time<XprType_>::ret)
|
||||
: int(inner_stride_at_compile_time<XprType_>::ret),
|
||||
|
||||
// FIXME, this traits is rather specialized for dense object and it needs to be cleaned further
|
||||
FlagsLvalueBit = is_lvalue<XprType_>::value ? LvalueBit : 0,
|
||||
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
|
||||
Flags = (traits<XprType_>::Flags & (DirectAccessBit | (InnerPanel_ ? CompressedAccessBit : 0))) | FlagsLvalueBit |
|
||||
FlagsRowMajorBit,
|
||||
// FIXME DirectAccessBit should not be handled by expressions
|
||||
//
|
||||
// Alignment is needed by MapBase's assertions
|
||||
// We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the
|
||||
// respective evaluator
|
||||
Alignment = 0,
|
||||
InnerPanel = InnerPanel_ ? 1 : 0
|
||||
};
|
||||
};
|
||||
|
||||
template <typename XprType, int BlockRows = Dynamic, int BlockCols = Dynamic, bool InnerPanel = false,
|
||||
bool HasDirectAccess = internal::has_direct_access<XprType>::ret>
|
||||
class BlockImpl_dense;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template <typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind>
|
||||
class BlockImpl;
|
||||
|
||||
/** \class Block
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of a fixed-size or dynamic-size block
|
||||
*
|
||||
* \tparam XprType the type of the expression in which we are taking a block
|
||||
* \tparam BlockRows the number of rows of the block we are taking at compile time (optional)
|
||||
* \tparam BlockCols the number of columns of the block we are taking at compile time (optional)
|
||||
* \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or
|
||||
* to set of columns of a column major matrix (optional). The parameter allows to determine
|
||||
* at compile time whether aligned access is possible on the block expression.
|
||||
*
|
||||
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
|
||||
* type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
|
||||
* most of the time this is the only way it is used.
|
||||
*
|
||||
* However, if you want to directly manipulate block expressions,
|
||||
* for instance if you want to write a function returning such an expression, you
|
||||
* will need to use this class.
|
||||
*
|
||||
* Here is an example illustrating the dynamic case:
|
||||
* \include class_Block.cpp
|
||||
* Output: \verbinclude class_Block.out
|
||||
*
|
||||
* \note Even though this expression has dynamic size, in the case where \a XprType
|
||||
* has fixed size, this expression inherits a fixed maximal size which means that evaluating
|
||||
* it does not cause a dynamic memory allocation.
|
||||
*
|
||||
* Here is an example illustrating the fixed-size case:
|
||||
* \include class_FixedBlock.cpp
|
||||
* Output: \verbinclude class_FixedBlock.out
|
||||
*
|
||||
* \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
|
||||
*/
|
||||
template <typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class Block
|
||||
: public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> {
|
||||
typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl;
|
||||
using BlockHelper = internal::block_xpr_helper<Block>;
|
||||
|
||||
public:
|
||||
// typedef typename Impl::Base Base;
|
||||
typedef Impl Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
|
||||
|
||||
typedef internal::remove_all_t<XprType> NestedExpression;
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index i) : Impl(xpr, i) {
|
||||
eigen_assert((i >= 0) && (((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) && i < xpr.rows()) ||
|
||||
((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) && i < xpr.cols())));
|
||||
}
|
||||
|
||||
/** Fixed-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index startRow, Index startCol)
|
||||
: Impl(xpr, startRow, startCol) {
|
||||
EIGEN_STATIC_ASSERT(RowsAtCompileTime != Dynamic && ColsAtCompileTime != Dynamic,
|
||||
THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
|
||||
eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() && startCol >= 0 &&
|
||||
BlockCols >= 0 && startCol + BlockCols <= xpr.cols());
|
||||
}
|
||||
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index startRow, Index startCol, Index blockRows,
|
||||
Index blockCols)
|
||||
: Impl(xpr, startRow, startCol, blockRows, blockCols) {
|
||||
eigen_assert((RowsAtCompileTime == Dynamic || RowsAtCompileTime == blockRows) &&
|
||||
(ColsAtCompileTime == Dynamic || ColsAtCompileTime == blockCols));
|
||||
eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows && startCol >= 0 &&
|
||||
blockCols >= 0 && startCol <= xpr.cols() - blockCols);
|
||||
}
|
||||
|
||||
// convert nested blocks (e.g. Block<Block<MatrixType>>) to a simple block expression (Block<MatrixType>)
|
||||
|
||||
using ConstUnwindReturnType = Block<const typename BlockHelper::BaseType, BlockRows, BlockCols, InnerPanel>;
|
||||
using UnwindReturnType = Block<typename BlockHelper::BaseType, BlockRows, BlockCols, InnerPanel>;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ConstUnwindReturnType unwind() const {
|
||||
return ConstUnwindReturnType(BlockHelper::base(*this), BlockHelper::row(*this, 0), BlockHelper::col(*this, 0),
|
||||
this->rows(), this->cols());
|
||||
}
|
||||
|
||||
template <typename T = Block, typename EnableIf = std::enable_if_t<!std::is_const<T>::value>>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UnwindReturnType unwind() {
|
||||
return UnwindReturnType(BlockHelper::base(*this), BlockHelper::row(*this, 0), BlockHelper::col(*this, 0),
|
||||
this->rows(), this->cols());
|
||||
}
|
||||
};
|
||||
|
||||
// The generic default implementation for dense block simply forward to the internal::BlockImpl_dense
|
||||
// that must be specialized for direct and non-direct access...
|
||||
template <typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
|
||||
: public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> {
|
||||
typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
|
||||
public:
|
||||
typedef Impl Base;
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr, i) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol)
|
||||
: Impl(xpr, startRow, startCol) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows,
|
||||
Index blockCols)
|
||||
: Impl(xpr, startRow, startCol, blockRows, blockCols) {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal Internal implementation of dense Blocks in the general case. */
|
||||
template <typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess>
|
||||
class BlockImpl_dense : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel>>::type {
|
||||
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
|
||||
typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
|
||||
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<BlockType>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
|
||||
|
||||
// class InnerIterator; // FIXME apparently never used
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i)
|
||||
: m_xpr(xpr),
|
||||
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
|
||||
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
|
||||
// all other cases are invalid.
|
||||
// The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
|
||||
m_startRow((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) ? i : 0),
|
||||
m_startCol((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) ? i : 0),
|
||||
m_blockRows(BlockRows == 1 ? 1 : xpr.rows()),
|
||||
m_blockCols(BlockCols == 1 ? 1 : xpr.cols()) {}
|
||||
|
||||
/** Fixed-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
|
||||
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) {}
|
||||
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows,
|
||||
Index blockCols)
|
||||
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); }
|
||||
EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) {
|
||||
EIGEN_STATIC_ASSERT_LVALUE(XprType)
|
||||
return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
|
||||
return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const {
|
||||
return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) {
|
||||
EIGEN_STATIC_ASSERT_LVALUE(XprType)
|
||||
return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
|
||||
return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const {
|
||||
return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_DEVICE_FUNC inline PacketScalar packet(Index rowId, Index colId) const {
|
||||
return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value());
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val) {
|
||||
m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val);
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const {
|
||||
return m_xpr.template packet<Unaligned>(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val) {
|
||||
m_xpr.template writePacket<Unaligned>(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
|
||||
}
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \sa MapBase::data() */
|
||||
EIGEN_DEVICE_FUNC constexpr const Scalar* data() const;
|
||||
EIGEN_DEVICE_FUNC inline Index innerStride() const;
|
||||
EIGEN_DEVICE_FUNC inline Index outerStride() const;
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t<XprTypeNested>& nestedExpression() const {
|
||||
return m_xpr;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr StorageIndex startRow() const noexcept { return m_startRow.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr StorageIndex startCol() const noexcept { return m_startCol.value(); }
|
||||
|
||||
protected:
|
||||
XprTypeNested m_xpr;
|
||||
const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows == 1) ? 0 : Dynamic>
|
||||
m_startRow;
|
||||
const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols == 1) ? 0 : Dynamic>
|
||||
m_startCol;
|
||||
const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;
|
||||
};
|
||||
|
||||
/** \internal Internal implementation of dense Blocks in the direct access case.*/
|
||||
template <typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel, true>
|
||||
: public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel>> {
|
||||
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
|
||||
typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
|
||||
enum { XprTypeIsRowMajor = (int(traits<XprType>::Flags) & RowMajorBit) != 0 };
|
||||
|
||||
/** \internal Returns base+offset (unless base is null, in which case returns null).
|
||||
* Adding an offset to nullptr is undefined behavior, so we must avoid it.
|
||||
*/
|
||||
template <typename Scalar>
|
||||
EIGEN_DEVICE_FUNC constexpr EIGEN_ALWAYS_INLINE static Scalar* add_to_nullable_pointer(Scalar* base, Index offset) {
|
||||
return base != nullptr ? base + offset : nullptr;
|
||||
}
|
||||
|
||||
public:
|
||||
typedef MapBase<BlockType> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index i)
|
||||
: Base((BlockRows == 0 || BlockCols == 0)
|
||||
? nullptr
|
||||
: add_to_nullable_pointer(
|
||||
xpr.data(),
|
||||
i * (((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) ||
|
||||
((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) &&
|
||||
(XprTypeIsRowMajor))
|
||||
? xpr.innerStride()
|
||||
: xpr.outerStride())),
|
||||
BlockRows == 1 ? 1 : xpr.rows(), BlockCols == 1 ? 1 : xpr.cols()),
|
||||
m_xpr(xpr),
|
||||
m_startRow((BlockRows == 1) && (BlockCols == XprType::ColsAtCompileTime) ? i : 0),
|
||||
m_startCol((BlockRows == XprType::RowsAtCompileTime) && (BlockCols == 1) ? i : 0) {
|
||||
init();
|
||||
}
|
||||
|
||||
/** Fixed-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
|
||||
: Base((BlockRows == 0 || BlockCols == 0)
|
||||
? nullptr
|
||||
: add_to_nullable_pointer(xpr.data(),
|
||||
xpr.innerStride() * (XprTypeIsRowMajor ? startCol : startRow) +
|
||||
xpr.outerStride() * (XprTypeIsRowMajor ? startRow : startCol))),
|
||||
m_xpr(xpr),
|
||||
m_startRow(startRow),
|
||||
m_startCol(startCol) {
|
||||
init();
|
||||
}
|
||||
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows,
|
||||
Index blockCols)
|
||||
: Base((blockRows == 0 || blockCols == 0)
|
||||
? nullptr
|
||||
: add_to_nullable_pointer(xpr.data(),
|
||||
xpr.innerStride() * (XprTypeIsRowMajor ? startCol : startRow) +
|
||||
xpr.outerStride() * (XprTypeIsRowMajor ? startRow : startCol)),
|
||||
blockRows, blockCols),
|
||||
m_xpr(xpr),
|
||||
m_startRow(startRow),
|
||||
m_startCol(startCol) {
|
||||
init();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t<XprTypeNested>& nestedExpression() const noexcept {
|
||||
return m_xpr;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; }
|
||||
|
||||
/** \sa MapBase::innerStride() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index innerStride() const noexcept {
|
||||
return internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride();
|
||||
}
|
||||
|
||||
/** \sa MapBase::outerStride() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index outerStride() const noexcept {
|
||||
return internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr StorageIndex startRow() const noexcept { return m_startRow.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr StorageIndex startCol() const noexcept { return m_startCol.value(); }
|
||||
|
||||
#ifndef __SUNPRO_CC
|
||||
// FIXME sunstudio is not friendly with the above friend...
|
||||
// META-FIXME there is no 'friend' keyword around here. Is this obsolete?
|
||||
protected:
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal used by allowAligned() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows,
|
||||
Index blockCols)
|
||||
: Base(data, blockRows, blockCols), m_xpr(xpr) {
|
||||
init();
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void init() {
|
||||
m_outerStride =
|
||||
internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride();
|
||||
}
|
||||
|
||||
XprTypeNested m_xpr;
|
||||
const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows == 1) ? 0 : Dynamic>
|
||||
m_startRow;
|
||||
const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols == 1) ? 0 : Dynamic>
|
||||
m_startCol;
|
||||
Index m_outerStride;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_BLOCK_H
|
||||
@@ -0,0 +1,149 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_COMMAINITIALIZER_H
|
||||
#define EIGEN_COMMAINITIALIZER_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class CommaInitializer
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Helper class used by the comma initializer operator
|
||||
*
|
||||
* This class is internally used to implement the comma initializer feature. It is
|
||||
* the return type of MatrixBase::operator<<, and most of the time this is the only
|
||||
* way it is used.
|
||||
*
|
||||
* \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
|
||||
*/
|
||||
template <typename XprType>
|
||||
struct CommaInitializer {
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s)
|
||||
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) {
|
||||
eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0 && "Cannot comma-initialize a 0x0 matrix (operator<<)");
|
||||
m_xpr.coeffRef(0, 0) = s;
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
|
||||
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) {
|
||||
eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols() &&
|
||||
"Cannot comma-initialize a 0x0 matrix (operator<<)");
|
||||
m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>(0, 0, other.rows(),
|
||||
other.cols()) = other;
|
||||
}
|
||||
|
||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
||||
* absence of return value optimization to avoid assertions during destruction. */
|
||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
||||
EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o)
|
||||
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
|
||||
// Mark original object as finished. In absence of R-value references we need to const_cast:
|
||||
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
|
||||
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
|
||||
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
|
||||
}
|
||||
|
||||
/* inserts a scalar value in the target matrix */
|
||||
EIGEN_DEVICE_FUNC CommaInitializer &operator,(const Scalar& s) {
|
||||
if (m_col == m_xpr.cols()) {
|
||||
m_row += m_currentBlockRows;
|
||||
m_col = 0;
|
||||
m_currentBlockRows = 1;
|
||||
eigen_assert(m_row < m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)");
|
||||
}
|
||||
eigen_assert(m_col < m_xpr.cols() && "Too many coefficients passed to comma initializer (operator<<)");
|
||||
eigen_assert(m_currentBlockRows == 1);
|
||||
m_xpr.coeffRef(m_row, m_col++) = s;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* inserts a matrix expression in the target matrix */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC CommaInitializer &operator,(const DenseBase<OtherDerived>& other) {
|
||||
if (m_col == m_xpr.cols() && (other.cols() != 0 || other.rows() != m_currentBlockRows)) {
|
||||
m_row += m_currentBlockRows;
|
||||
m_col = 0;
|
||||
m_currentBlockRows = other.rows();
|
||||
eigen_assert(m_row + m_currentBlockRows <= m_xpr.rows() &&
|
||||
"Too many rows passed to comma initializer (operator<<)");
|
||||
}
|
||||
eigen_assert((m_col + other.cols() <= m_xpr.cols()) &&
|
||||
"Too many coefficients passed to comma initializer (operator<<)");
|
||||
eigen_assert(m_currentBlockRows == other.rows());
|
||||
m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>(m_row, m_col, other.rows(),
|
||||
other.cols()) = other;
|
||||
m_col += other.cols();
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline ~CommaInitializer()
|
||||
#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
|
||||
noexcept(false) // Eigen::eigen_assert_exception
|
||||
#endif
|
||||
{
|
||||
finished();
|
||||
}
|
||||
|
||||
/** \returns the built matrix once all its coefficients have been set.
|
||||
* Calling finished is 100% optional. Its purpose is to write expressions
|
||||
* like this:
|
||||
* \code
|
||||
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
|
||||
* \endcode
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline XprType& finished() {
|
||||
eigen_assert(((m_row + m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() &&
|
||||
"Too few coefficients passed to comma initializer (operator<<)");
|
||||
return m_xpr;
|
||||
}
|
||||
|
||||
XprType& m_xpr; // target expression
|
||||
Index m_row; // current row id
|
||||
Index m_col; // current col id
|
||||
Index m_currentBlockRows; // current block height
|
||||
};
|
||||
|
||||
/** \anchor MatrixBaseCommaInitRef
|
||||
* Convenient operator to set the coefficients of a matrix.
|
||||
*
|
||||
* The coefficients must be provided in a row major order and exactly match
|
||||
* the size of the matrix. Otherwise an assertion is raised.
|
||||
*
|
||||
* Example: \include MatrixBase_set.cpp
|
||||
* Output: \verbinclude MatrixBase_set.out
|
||||
*
|
||||
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary
|
||||
* order.
|
||||
*
|
||||
* \sa CommaInitializer::finished(), class CommaInitializer
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(const Scalar& s) {
|
||||
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
|
||||
}
|
||||
|
||||
/** \sa operator<<(const Scalar&) */
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(
|
||||
const DenseBase<OtherDerived>& other) {
|
||||
return CommaInitializer<Derived>(*static_cast<Derived*>(this), other);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_COMMAINITIALIZER_H
|
||||
@@ -0,0 +1,173 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CONDITIONESTIMATOR_H
|
||||
#define EIGEN_CONDITIONESTIMATOR_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Vector, typename RealVector, bool IsComplex>
|
||||
struct rcond_compute_sign {
|
||||
static inline Vector run(const Vector& v) {
|
||||
const RealVector v_abs = v.cwiseAbs();
|
||||
return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
|
||||
.select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
|
||||
}
|
||||
};
|
||||
|
||||
// Partial specialization to avoid elementwise division for real vectors.
|
||||
template <typename Vector>
|
||||
struct rcond_compute_sign<Vector, Vector, false> {
|
||||
static inline Vector run(const Vector& v) {
|
||||
return (v.array() < static_cast<typename Vector::RealScalar>(0))
|
||||
.select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
|
||||
* \a matrix that implements .solve() and .adjoint().solve() methods.
|
||||
*
|
||||
* This function implements Algorithms 4.1 and 5.1 from
|
||||
* http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
|
||||
* which also forms the basis for the condition number estimators in
|
||||
* LAPACK. Since at most 10 calls to the solve method of dec are
|
||||
* performed, the total cost is O(dims^2), as opposed to O(dims^3)
|
||||
* needed to compute the inverse matrix explicitly.
|
||||
*
|
||||
* The most common usage is in estimating the condition number
|
||||
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
|
||||
* computed directly in O(n^2) operations.
|
||||
*
|
||||
* Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
|
||||
* LLT.
|
||||
*
|
||||
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
|
||||
*/
|
||||
template <typename Decomposition>
|
||||
typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) {
|
||||
typedef typename Decomposition::MatrixType MatrixType;
|
||||
typedef typename Decomposition::Scalar Scalar;
|
||||
typedef typename Decomposition::RealScalar RealScalar;
|
||||
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
||||
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
|
||||
const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
|
||||
|
||||
eigen_assert(dec.rows() == dec.cols());
|
||||
const Index n = dec.rows();
|
||||
if (n == 0) return 0;
|
||||
|
||||
// Disable Index to float conversion warning
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning push
|
||||
#pragma warning(disable : 2259)
|
||||
#endif
|
||||
Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning pop
|
||||
#endif
|
||||
|
||||
// lower_bound is a lower bound on
|
||||
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
|
||||
// and is the objective maximized by the ("super-") gradient ascent
|
||||
// algorithm below.
|
||||
RealScalar lower_bound = v.template lpNorm<1>();
|
||||
if (n == 1) return lower_bound;
|
||||
|
||||
// Gradient ascent algorithm follows: We know that the optimum is achieved at
|
||||
// one of the simplices v = e_i, so in each iteration we follow a
|
||||
// super-gradient to move towards the optimal one.
|
||||
RealScalar old_lower_bound = lower_bound;
|
||||
Vector sign_vector(n);
|
||||
Vector old_sign_vector;
|
||||
Index v_max_abs_index = -1;
|
||||
Index old_v_max_abs_index = v_max_abs_index;
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
|
||||
if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
|
||||
// Break if the solution stagnated.
|
||||
break;
|
||||
}
|
||||
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
|
||||
v = dec.adjoint().solve(sign_vector);
|
||||
v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
|
||||
if (v_max_abs_index == old_v_max_abs_index) {
|
||||
// Break if the solution stagnated.
|
||||
break;
|
||||
}
|
||||
// Move to the new simplex e_j, where j = v_max_abs_index.
|
||||
v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
|
||||
lower_bound = v.template lpNorm<1>();
|
||||
if (lower_bound <= old_lower_bound) {
|
||||
// Break if the gradient step did not increase the lower_bound.
|
||||
break;
|
||||
}
|
||||
if (!is_complex) {
|
||||
old_sign_vector = sign_vector;
|
||||
}
|
||||
old_v_max_abs_index = v_max_abs_index;
|
||||
old_lower_bound = lower_bound;
|
||||
}
|
||||
// The following calculates an independent estimate of ||matrix||_1 by
|
||||
// multiplying matrix by a vector with entries of slowly increasing
|
||||
// magnitude and alternating sign:
|
||||
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
|
||||
// This improvement to Hager's algorithm above is due to Higham. It was
|
||||
// added to make the algorithm more robust in certain corner cases where
|
||||
// large elements in the matrix might otherwise escape detection due to
|
||||
// exact cancellation (especially when op and op_adjoint correspond to a
|
||||
// sequence of backsubstitutions and permutations), which could cause
|
||||
// Hager's algorithm to vastly underestimate ||matrix||_1.
|
||||
Scalar alternating_sign(RealScalar(1));
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
// The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
|
||||
v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
|
||||
alternating_sign = -alternating_sign;
|
||||
}
|
||||
v = dec.solve(v);
|
||||
const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
|
||||
return numext::maxi(lower_bound, alternate_lower_bound);
|
||||
}
|
||||
|
||||
/** \brief Reciprocal condition number estimator.
|
||||
*
|
||||
* Computing a decomposition of a dense matrix takes O(n^3) operations, while
|
||||
* this method estimates the condition number quickly and reliably in O(n^2)
|
||||
* operations.
|
||||
*
|
||||
* \returns an estimate of the reciprocal condition number
|
||||
* (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
|
||||
* its decomposition. Supports the following decompositions: FullPivLU,
|
||||
* PartialPivLU, LDLT, and LLT.
|
||||
*
|
||||
* \sa FullPivLU, PartialPivLU, LDLT, LLT.
|
||||
*/
|
||||
template <typename Decomposition>
|
||||
typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm,
|
||||
const Decomposition& dec) {
|
||||
typedef typename Decomposition::RealScalar RealScalar;
|
||||
eigen_assert(dec.rows() == dec.cols());
|
||||
if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
|
||||
if (numext::is_exactly_zero(matrix_norm)) return RealScalar(0);
|
||||
if (dec.rows() == 1) return RealScalar(1);
|
||||
const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
|
||||
return (numext::is_exactly_zero(inverse_matrix_norm) ? RealScalar(0)
|
||||
: (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif
|
||||
2007
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CoreEvaluators.h
Normal file
2007
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CoreEvaluators.h
Normal file
File diff suppressed because it is too large
Load Diff
141
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CoreIterators.h
Normal file
141
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CoreIterators.h
Normal file
@@ -0,0 +1,141 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_COREITERATORS_H
|
||||
#define EIGEN_COREITERATORS_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename XprType, typename EvaluatorKind>
|
||||
class inner_iterator_selector;
|
||||
|
||||
}
|
||||
|
||||
/** \class InnerIterator
|
||||
* \brief An InnerIterator allows to loop over the element of any matrix expression.
|
||||
*
|
||||
* \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is
|
||||
* constructed.
|
||||
*
|
||||
* TODO: add a usage example
|
||||
*/
|
||||
template <typename XprType>
|
||||
class InnerIterator {
|
||||
protected:
|
||||
typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;
|
||||
typedef internal::evaluator<XprType> EvaluatorType;
|
||||
typedef typename internal::traits<XprType>::Scalar Scalar;
|
||||
|
||||
public:
|
||||
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
|
||||
InnerIterator(const XprType &xpr, const Index &outerId) : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) {}
|
||||
|
||||
/// \returns the value of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); }
|
||||
/** Increment the iterator \c *this to the next non-zero coefficient.
|
||||
* Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView
|
||||
*/
|
||||
EIGEN_STRONG_INLINE InnerIterator &operator++() {
|
||||
m_iter.operator++();
|
||||
return *this;
|
||||
}
|
||||
EIGEN_STRONG_INLINE InnerIterator &operator+=(Index i) {
|
||||
m_iter.operator+=(i);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_STRONG_INLINE InnerIterator operator+(Index i) {
|
||||
InnerIterator result(*this);
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// \returns the column or row index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
|
||||
/// \returns the row index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
|
||||
/// \returns the column index of the current coefficient.
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
|
||||
/// \returns \c true if the iterator \c *this still references a valid coefficient.
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
|
||||
|
||||
protected:
|
||||
EvaluatorType m_eval;
|
||||
IteratorType m_iter;
|
||||
|
||||
private:
|
||||
// If you get here, then you're not using the right InnerIterator type, e.g.:
|
||||
// SparseMatrix<double,RowMajor> A;
|
||||
// SparseMatrix<double>::InnerIterator it(A,0);
|
||||
template <typename T>
|
||||
InnerIterator(const EigenBase<T> &, Index outer);
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Generic inner iterator implementation for dense objects
|
||||
template <typename XprType>
|
||||
class inner_iterator_selector<XprType, IndexBased> {
|
||||
protected:
|
||||
typedef evaluator<XprType> EvaluatorType;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
enum { IsRowMajor = (XprType::Flags & RowMajorBit) == RowMajorBit };
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize)
|
||||
: m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) {}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const {
|
||||
return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) : m_eval.coeff(m_inner, m_outer);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE inner_iterator_selector &operator++() {
|
||||
m_inner++;
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index index() const { return m_inner; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner >= 0; }
|
||||
|
||||
protected:
|
||||
const EvaluatorType &m_eval;
|
||||
Index m_inner;
|
||||
const Index m_outer;
|
||||
const Index m_end;
|
||||
};
|
||||
|
||||
// For iterator-based evaluator, inner-iterator is already implemented as
|
||||
// evaluator<>::InnerIterator
|
||||
template <typename XprType>
|
||||
class inner_iterator_selector<XprType, IteratorBased> : public evaluator<XprType>::InnerIterator {
|
||||
protected:
|
||||
typedef typename evaluator<XprType>::InnerIterator Base;
|
||||
typedef evaluator<XprType> EvaluatorType;
|
||||
|
||||
public:
|
||||
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId,
|
||||
const Index & /*innerSize*/)
|
||||
: Base(eval, outerId) {}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_COREITERATORS_H
|
||||
166
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CwiseBinaryOp.h
Normal file
166
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/CwiseBinaryOp.h
Normal file
@@ -0,0 +1,166 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CWISE_BINARY_OP_H
|
||||
#define EIGEN_CWISE_BINARY_OP_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename BinaryOp, typename Lhs, typename Rhs>
|
||||
struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs>> {
|
||||
// we must not inherit from traits<Lhs> since it has
|
||||
// the potential to cause problems with MSVC
|
||||
typedef remove_all_t<Lhs> Ancestor;
|
||||
typedef typename traits<Ancestor>::XprKind XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
// even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
|
||||
// we still want to handle the case when the result type is different.
|
||||
typedef typename result_of<BinaryOp(const typename Lhs::Scalar&, const typename Rhs::Scalar&)>::type Scalar;
|
||||
typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind,
|
||||
BinaryOp>::ret StorageKind;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex, typename traits<Rhs>::StorageIndex>::type
|
||||
StorageIndex;
|
||||
typedef typename Lhs::Nested LhsNested;
|
||||
typedef typename Rhs::Nested RhsNested;
|
||||
typedef std::remove_reference_t<LhsNested> LhsNested_;
|
||||
typedef std::remove_reference_t<RhsNested> RhsNested_;
|
||||
enum {
|
||||
Flags = cwise_promote_storage_order<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind,
|
||||
LhsNested_::Flags & RowMajorBit, RhsNested_::Flags & RowMajorBit>::value
|
||||
};
|
||||
};
|
||||
} // end namespace internal
|
||||
|
||||
template <typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
|
||||
class CwiseBinaryOpImpl;
|
||||
|
||||
/** \class CwiseBinaryOp
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
|
||||
*
|
||||
* \tparam BinaryOp template functor implementing the operator
|
||||
* \tparam LhsType the type of the left-hand side
|
||||
* \tparam RhsType the type of the right-hand side
|
||||
*
|
||||
* This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
|
||||
* It is the return type of binary operators, by which we mean only those binary operators where
|
||||
* both the left-hand side and the right-hand side are Eigen expressions.
|
||||
* For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
|
||||
*
|
||||
* Most of the time, this is the only way that it is used, so you typically don't have to name
|
||||
* CwiseBinaryOp types explicitly.
|
||||
*
|
||||
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class
|
||||
* CwiseNullaryOp
|
||||
*/
|
||||
template <typename BinaryOp, typename LhsType, typename RhsType>
|
||||
class CwiseBinaryOp : public CwiseBinaryOpImpl<BinaryOp, LhsType, RhsType,
|
||||
typename internal::cwise_promote_storage_type<
|
||||
typename internal::traits<LhsType>::StorageKind,
|
||||
typename internal::traits<RhsType>::StorageKind, BinaryOp>::ret>,
|
||||
internal::no_assignment_operator {
|
||||
public:
|
||||
typedef internal::remove_all_t<BinaryOp> Functor;
|
||||
typedef internal::remove_all_t<LhsType> Lhs;
|
||||
typedef internal::remove_all_t<RhsType> Rhs;
|
||||
|
||||
typedef typename CwiseBinaryOpImpl<
|
||||
BinaryOp, LhsType, RhsType,
|
||||
typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
|
||||
typename internal::traits<Rhs>::StorageKind, BinaryOp>::ret>::Base
|
||||
Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
|
||||
|
||||
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp, typename Lhs::Scalar, typename Rhs::Scalar)
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
|
||||
|
||||
typedef typename internal::ref_selector<LhsType>::type LhsNested;
|
||||
typedef typename internal::ref_selector<RhsType>::type RhsNested;
|
||||
typedef std::remove_reference_t<LhsNested> LhsNested_;
|
||||
typedef std::remove_reference_t<RhsNested> RhsNested_;
|
||||
|
||||
#if EIGEN_COMP_MSVC
|
||||
// Required for Visual Studio or the Copy constructor will probably not get inlined!
|
||||
EIGEN_STRONG_INLINE CwiseBinaryOp(const CwiseBinaryOp<BinaryOp, LhsType, RhsType>&) = default;
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs,
|
||||
const BinaryOp& func = BinaryOp())
|
||||
: m_lhs(aLhs), m_rhs(aRhs), m_functor(func) {
|
||||
eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const noexcept {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
return internal::traits<internal::remove_all_t<LhsNested>>::RowsAtCompileTime == Dynamic ? m_rhs.rows()
|
||||
: m_lhs.rows();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const noexcept {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
return internal::traits<internal::remove_all_t<LhsNested>>::ColsAtCompileTime == Dynamic ? m_rhs.cols()
|
||||
: m_lhs.cols();
|
||||
}
|
||||
|
||||
/** \returns the left hand side nested expression */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNested_& lhs() const { return m_lhs; }
|
||||
/** \returns the right hand side nested expression */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNested_& rhs() const { return m_rhs; }
|
||||
/** \returns the functor representing the binary operation */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; }
|
||||
|
||||
protected:
|
||||
LhsNested m_lhs;
|
||||
RhsNested m_rhs;
|
||||
const BinaryOp m_functor;
|
||||
};
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
|
||||
class CwiseBinaryOpImpl : public internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs>>::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs>>::type Base;
|
||||
};
|
||||
|
||||
/** replaces \c *this by \c *this - \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this + \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_BINARY_OP_H
|
||||
@@ -0,0 +1,977 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CWISE_NULLARY_OP_H
|
||||
#define EIGEN_CWISE_NULLARY_OP_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename NullaryOp, typename PlainObjectType>
|
||||
struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType> {
|
||||
enum { Flags = traits<PlainObjectType>::Flags & RowMajorBit };
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \class CwiseNullaryOp
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Generic expression of a matrix where all coefficients are defined by a functor
|
||||
*
|
||||
* \tparam NullaryOp template functor implementing the operator
|
||||
* \tparam PlainObjectType the underlying plain matrix/array type
|
||||
*
|
||||
* This class represents an expression of a generic nullary operator.
|
||||
* It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* However, if you want to write a function returning such an expression, you
|
||||
* will need to use this class.
|
||||
*
|
||||
* The functor NullaryOp must expose one of the following method:
|
||||
<table class="manual">
|
||||
<tr ><td>\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries
|
||||
(e.g., random numbers)</td></tr> <tr class="alt"><td>\c operator()(Index i)</td><td>if the procedural generation makes
|
||||
sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace) </td></tr> <tr ><td>\c
|
||||
operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \c i, \c j (e.g.,
|
||||
to generate a checkerboard with 0 and 1)</td></tr>
|
||||
</table>
|
||||
* It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized
|
||||
for vectors.
|
||||
*
|
||||
* See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding
|
||||
* C++11 random number generators.
|
||||
*
|
||||
* A nullary expression can also be used to implement custom sophisticated matrix manipulations
|
||||
* that cannot be covered by the existing set of natively supported matrix manipulations.
|
||||
* See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations
|
||||
* on the behavior of CwiseNullaryOp.
|
||||
*
|
||||
* \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr
|
||||
*/
|
||||
template <typename NullaryOp, typename PlainObjectType>
|
||||
class CwiseNullaryOp : public internal::dense_xpr_base<CwiseNullaryOp<NullaryOp, PlainObjectType> >::type,
|
||||
internal::no_assignment_operator {
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
|
||||
|
||||
EIGEN_DEVICE_FUNC CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
|
||||
: m_rows(rows), m_cols(cols), m_functor(func) {
|
||||
eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 &&
|
||||
(ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
|
||||
}
|
||||
EIGEN_DEVICE_FUNC CwiseNullaryOp(Index size, const NullaryOp& func = NullaryOp())
|
||||
: CwiseNullaryOp(RowsAtCompileTime == 1 ? 1 : size, RowsAtCompileTime == 1 ? size : 1, func) {
|
||||
EIGEN_STATIC_ASSERT(CwiseNullaryOp::IsVectorAtCompileTime, YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows.value(); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols.value(); }
|
||||
|
||||
/** \returns the functor representing the nullary operation */
|
||||
EIGEN_DEVICE_FUNC const NullaryOp& functor() const { return m_functor; }
|
||||
|
||||
protected:
|
||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
|
||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
|
||||
const NullaryOp m_functor;
|
||||
};
|
||||
|
||||
/** \returns an expression of a matrix defined by a custom functor \a func
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
|
||||
#else
|
||||
const CwiseNullaryOp<CustomNullaryOp, PlainObject>
|
||||
#endif
|
||||
DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) {
|
||||
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);
|
||||
}
|
||||
|
||||
/** \returns an expression of a matrix defined by a custom functor \a func
|
||||
*
|
||||
* The parameter \a size is the size of the returned vector.
|
||||
* Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
|
||||
* it is redundant to pass \a size as argument, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* Here is an example with C++11 random generators: \include random_cpp11.cpp
|
||||
* Output: \verbinclude random_cpp11.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
|
||||
#else
|
||||
const CwiseNullaryOp<CustomNullaryOp, PlainObject>
|
||||
#endif
|
||||
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
if (RowsAtCompileTime == 1)
|
||||
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(1, size, func);
|
||||
else
|
||||
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(size, 1, func);
|
||||
}
|
||||
|
||||
/** \returns an expression of a matrix defined by a custom functor \a func
|
||||
*
|
||||
* This variant is only for fixed-size DenseBase types. For dynamic-size types, you
|
||||
* need to use the variants taking size arguments.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
|
||||
#else
|
||||
const CwiseNullaryOp<CustomNullaryOp, PlainObject>
|
||||
#endif
|
||||
DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func) {
|
||||
return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);
|
||||
}
|
||||
|
||||
/** \returns an expression of a constant matrix of value \a value
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this DenseBase type.
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
|
||||
DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value) {
|
||||
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
|
||||
}
|
||||
|
||||
/** \returns an expression of a constant matrix of value \a value
|
||||
*
|
||||
* The parameter \a size is the size of the returned vector.
|
||||
* Must be compatible with this DenseBase type.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
|
||||
* it is redundant to pass \a size as argument, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
|
||||
DenseBase<Derived>::Constant(Index size, const Scalar& value) {
|
||||
return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
|
||||
}
|
||||
|
||||
/** \returns an expression of a constant matrix of value \a value
|
||||
*
|
||||
* This variant is only for fixed-size DenseBase types. For dynamic-size types, you
|
||||
* need to use the variants taking size arguments.
|
||||
*
|
||||
* The template parameter \a CustomNullaryOp is the type of the functor.
|
||||
*
|
||||
* \sa class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
|
||||
DenseBase<Derived>::Constant(const Scalar& value) {
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime,
|
||||
internal::scalar_constant_op<Scalar>(value));
|
||||
}
|
||||
|
||||
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include DenseBase_LinSpaced_seq_deprecated.cpp
|
||||
* Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out
|
||||
*
|
||||
* \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<
|
||||
Derived>::RandomAccessLinSpacedReturnType
|
||||
DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low, high, size));
|
||||
}
|
||||
|
||||
/** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
|
||||
*
|
||||
* \sa LinSpaced(const Scalar&, const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<
|
||||
Derived>::RandomAccessLinSpacedReturnType
|
||||
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime,
|
||||
internal::linspaced_op<Scalar>(low, high, Derived::SizeAtCompileTime));
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Sets a linearly spaced vector.
|
||||
*
|
||||
* The function generates 'size' equally spaced values in the closed interval [low,high].
|
||||
* When size is set to 1, a vector of length 1 containing 'high' is returned.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include DenseBase_LinSpaced.cpp
|
||||
* Output: \verbinclude DenseBase_LinSpaced.out
|
||||
*
|
||||
* For integer scalar types, an even spacing is possible if and only if the length of the range,
|
||||
* i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the
|
||||
* number of values \c high-low+1 (meaning each value can be repeated the same number of time).
|
||||
* If one of these two considions is not satisfied, then \c high is lowered to the largest value
|
||||
* satisfying one of this constraint.
|
||||
* Here are some examples:
|
||||
*
|
||||
* Example: \include DenseBase_LinSpacedInt.cpp
|
||||
* Output: \verbinclude DenseBase_LinSpacedInt.out
|
||||
*
|
||||
* \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
|
||||
DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low, high, size));
|
||||
}
|
||||
|
||||
/**
|
||||
* \copydoc DenseBase::LinSpaced(Index, const DenseBase::Scalar&, const DenseBase::Scalar&)
|
||||
* Special version for fixed size types which does not require the size parameter.
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
|
||||
DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime,
|
||||
internal::linspaced_op<Scalar>(low, high, Derived::SizeAtCompileTime));
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessEqualSpacedReturnType
|
||||
DenseBase<Derived>::EqualSpaced(Index size, const Scalar& low, const Scalar& step) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(size, internal::equalspaced_op<Scalar>(low, step));
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessEqualSpacedReturnType
|
||||
DenseBase<Derived>::EqualSpaced(const Scalar& low, const Scalar& step) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::equalspaced_op<Scalar>(low, step));
|
||||
}
|
||||
|
||||
/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant(const Scalar& val, const RealScalar& prec) const {
|
||||
typename internal::nested_eval<Derived, 1>::type self(derived());
|
||||
for (Index j = 0; j < cols(); ++j)
|
||||
for (Index i = 0; i < rows(); ++i)
|
||||
if (!internal::isApprox(self.coeff(i, j), val, prec)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/** This is just an alias for isApproxToConstant().
|
||||
*
|
||||
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant(const Scalar& val, const RealScalar& prec) const {
|
||||
return isApproxToConstant(val, prec);
|
||||
}
|
||||
|
||||
/** Alias for setConstant(): sets all coefficients in this expression to \a val.
|
||||
*
|
||||
* \sa setConstant(), Constant(), class CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val) {
|
||||
setConstant(val);
|
||||
}
|
||||
|
||||
/** Sets all coefficients in this expression to value \a val.
|
||||
*
|
||||
* \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(),
|
||||
* Constant(), class CwiseNullaryOp, setZero(), setOnes()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val) {
|
||||
internal::eigen_fill_impl<Derived>::run(derived(), val);
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include Matrix_setConstant_int.cpp
|
||||
* Output: \verbinclude Matrix_setConstant_int.out
|
||||
*
|
||||
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp,
|
||||
* MatrixBase::Constant(const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val) {
|
||||
resize(size);
|
||||
return setConstant(val);
|
||||
}
|
||||
|
||||
/** Resizes to the given size, and sets all coefficients in this expression to the given value \a val.
|
||||
*
|
||||
* \param rows the new number of rows
|
||||
* \param cols the new number of columns
|
||||
* \param val the value to which all coefficients are set
|
||||
*
|
||||
* Example: \include Matrix_setConstant_int_int.cpp
|
||||
* Output: \verbinclude Matrix_setConstant_int_int.out
|
||||
*
|
||||
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
|
||||
* MatrixBase::Constant(const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index rows, Index cols,
|
||||
const Scalar& val) {
|
||||
resize(rows, cols);
|
||||
return setConstant(val);
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of columns, and sets all
|
||||
* coefficients in this expression to the given value \a val. For the parameter
|
||||
* of type NoChange_t, just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
|
||||
* MatrixBase::Constant(const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(NoChange_t, Index cols,
|
||||
const Scalar& val) {
|
||||
return setConstant(rows(), cols, val);
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of rows, and sets all
|
||||
* coefficients in this expression to the given value \a val. For the parameter
|
||||
* of type NoChange_t, just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
|
||||
* MatrixBase::Constant(const Scalar&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index rows, NoChange_t,
|
||||
const Scalar& val) {
|
||||
return setConstant(rows, cols(), val);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Sets a linearly spaced vector.
|
||||
*
|
||||
* The function generates 'size' equally spaced values in the closed interval [low,high].
|
||||
* When size is set to 1, a vector of length 1 containing 'high' is returned.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include DenseBase_setLinSpaced.cpp
|
||||
* Output: \verbinclude DenseBase_setLinSpaced.out
|
||||
*
|
||||
* For integer scalar types, do not miss the explanations on the definition
|
||||
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
|
||||
*
|
||||
* \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low,
|
||||
const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar>(low, high, newSize));
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Sets a linearly spaced vector.
|
||||
*
|
||||
* The function fills \c *this with equally spaced values in the closed interval [low,high].
|
||||
* When size is set to 1, a vector of length 1 containing 'high' is returned.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* For integer scalar types, do not miss the explanations on the definition
|
||||
* of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
|
||||
*
|
||||
* \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return setLinSpaced(size(), low, high);
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setEqualSpaced(Index newSize, const Scalar& low,
|
||||
const Scalar& step) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return derived() = Derived::NullaryExpr(newSize, internal::equalspaced_op<Scalar>(low, step));
|
||||
}
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setEqualSpaced(const Scalar& low,
|
||||
const Scalar& step) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return setEqualSpaced(size(), low, step);
|
||||
}
|
||||
|
||||
// zero:
|
||||
|
||||
/** \returns an expression of a zero matrix.
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_zero_int_int.cpp
|
||||
* Output: \verbinclude MatrixBase_zero_int_int.out
|
||||
*
|
||||
* \sa Zero(), Zero(Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ZeroReturnType DenseBase<Derived>::Zero(
|
||||
Index rows, Index cols) {
|
||||
return ZeroReturnType(rows, cols);
|
||||
}
|
||||
|
||||
/** \returns an expression of a zero vector.
|
||||
*
|
||||
* The parameter \a size is the size of the returned vector.
|
||||
* Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
|
||||
* it is redundant to pass \a size as argument, so Zero() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_zero_int.cpp
|
||||
* Output: \verbinclude MatrixBase_zero_int.out
|
||||
*
|
||||
* \sa Zero(), Zero(Index,Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ZeroReturnType DenseBase<Derived>::Zero(
|
||||
Index size) {
|
||||
return ZeroReturnType(size);
|
||||
}
|
||||
|
||||
/** \returns an expression of a fixed-size zero matrix or vector.
|
||||
*
|
||||
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
||||
* need to use the variants taking size arguments.
|
||||
*
|
||||
* Example: \include MatrixBase_zero.cpp
|
||||
* Output: \verbinclude MatrixBase_zero.out
|
||||
*
|
||||
* \sa Zero(Index), Zero(Index,Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ZeroReturnType DenseBase<Derived>::Zero() {
|
||||
return ZeroReturnType(RowsAtCompileTime, ColsAtCompileTime);
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately equal to the zero matrix,
|
||||
* within the precision given by \a prec.
|
||||
*
|
||||
* Example: \include MatrixBase_isZero.cpp
|
||||
* Output: \verbinclude MatrixBase_isZero.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const {
|
||||
typename internal::nested_eval<Derived, 1>::type self(derived());
|
||||
for (Index j = 0; j < cols(); ++j)
|
||||
for (Index i = 0; i < rows(); ++i)
|
||||
if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Sets all coefficients in this expression to zero.
|
||||
*
|
||||
* Example: \include MatrixBase_setZero.cpp
|
||||
* Output: \verbinclude MatrixBase_setZero.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero() {
|
||||
internal::eigen_zero_impl<Derived>::run(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Resizes to the given \a size, and sets all coefficients in this expression to zero.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include Matrix_setZero_int.cpp
|
||||
* Output: \verbinclude Matrix_setZero_int.out
|
||||
*
|
||||
* \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index newSize) {
|
||||
resize(newSize);
|
||||
return setZero();
|
||||
}
|
||||
|
||||
/** Resizes to the given size, and sets all coefficients in this expression to zero.
|
||||
*
|
||||
* \param rows the new number of rows
|
||||
* \param cols the new number of columns
|
||||
*
|
||||
* Example: \include Matrix_setZero_int_int.cpp
|
||||
* Output: \verbinclude Matrix_setZero_int_int.out
|
||||
*
|
||||
* \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index rows, Index cols) {
|
||||
resize(rows, cols);
|
||||
return setZero();
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of columns, and sets all
|
||||
* coefficients in this expression to zero. For the parameter of type NoChange_t,
|
||||
* just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp,
|
||||
* DenseBase::Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(NoChange_t, Index cols) {
|
||||
return setZero(rows(), cols);
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of rows, and sets all
|
||||
* coefficients in this expression to zero. For the parameter of type NoChange_t,
|
||||
* just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp,
|
||||
* DenseBase::Zero()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index rows, NoChange_t) {
|
||||
return setZero(rows, cols());
|
||||
}
|
||||
|
||||
// ones:
|
||||
|
||||
/** \returns an expression of a matrix where all coefficients equal one.
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_ones_int_int.cpp
|
||||
* Output: \verbinclude MatrixBase_ones_int_int.out
|
||||
*
|
||||
* \sa Ones(), Ones(Index), isOnes(), class Ones
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(
|
||||
Index rows, Index cols) {
|
||||
return Constant(rows, cols, Scalar(1));
|
||||
}
|
||||
|
||||
/** \returns an expression of a vector where all coefficients equal one.
|
||||
*
|
||||
* The parameter \a newSize is the size of the returned vector.
|
||||
* Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
|
||||
* it is redundant to pass \a size as argument, so Ones() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_ones_int.cpp
|
||||
* Output: \verbinclude MatrixBase_ones_int.out
|
||||
*
|
||||
* \sa Ones(), Ones(Index,Index), isOnes(), class Ones
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(
|
||||
Index newSize) {
|
||||
return Constant(newSize, Scalar(1));
|
||||
}
|
||||
|
||||
/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
|
||||
*
|
||||
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
||||
* need to use the variants taking size arguments.
|
||||
*
|
||||
* Example: \include MatrixBase_ones.cpp
|
||||
* Output: \verbinclude MatrixBase_ones.out
|
||||
*
|
||||
* \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones() {
|
||||
return Constant(Scalar(1));
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately equal to the matrix where all coefficients
|
||||
* are equal to 1, within the precision given by \a prec.
|
||||
*
|
||||
* Example: \include MatrixBase_isOnes.cpp
|
||||
* Output: \verbinclude MatrixBase_isOnes.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes(const RealScalar& prec) const {
|
||||
return isApproxToConstant(Scalar(1), prec);
|
||||
}
|
||||
|
||||
/** Sets all coefficients in this expression to one.
|
||||
*
|
||||
* Example: \include MatrixBase_setOnes.cpp
|
||||
* Output: \verbinclude MatrixBase_setOnes.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes() {
|
||||
return setConstant(Scalar(1));
|
||||
}
|
||||
|
||||
/** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include Matrix_setOnes_int.cpp
|
||||
* Output: \verbinclude Matrix_setOnes_int.out
|
||||
*
|
||||
* \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index newSize) {
|
||||
resize(newSize);
|
||||
return setConstant(Scalar(1));
|
||||
}
|
||||
|
||||
/** Resizes to the given size, and sets all coefficients in this expression to one.
|
||||
*
|
||||
* \param rows the new number of rows
|
||||
* \param cols the new number of columns
|
||||
*
|
||||
* Example: \include Matrix_setOnes_int_int.cpp
|
||||
* Output: \verbinclude Matrix_setOnes_int_int.out
|
||||
*
|
||||
* \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index rows, Index cols) {
|
||||
resize(rows, cols);
|
||||
return setConstant(Scalar(1));
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of rows, and sets all
|
||||
* coefficients in this expression to one. For the parameter of type NoChange_t,
|
||||
* just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp,
|
||||
* MatrixBase::Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index rows, NoChange_t) {
|
||||
return setOnes(rows, cols());
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of columns, and sets all
|
||||
* coefficients in this expression to one. For the parameter of type NoChange_t,
|
||||
* just pass the special value \c NoChange.
|
||||
*
|
||||
* \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp,
|
||||
* MatrixBase::Ones()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(NoChange_t, Index cols) {
|
||||
return setOnes(rows(), cols);
|
||||
}
|
||||
|
||||
// Identity:
|
||||
|
||||
/** \returns an expression of the identity matrix (not necessarily square).
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_identity_int_int.cpp
|
||||
* Output: \verbinclude MatrixBase_identity_int_int.out
|
||||
*
|
||||
* \sa Identity(), setIdentity(), isIdentity()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
|
||||
MatrixBase<Derived>::Identity(Index rows, Index cols) {
|
||||
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \returns an expression of the identity matrix (not necessarily square).
|
||||
*
|
||||
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
||||
* need to use the variant taking size arguments.
|
||||
*
|
||||
* Example: \include MatrixBase_identity.cpp
|
||||
* Output: \verbinclude MatrixBase_identity.out
|
||||
*
|
||||
* \sa Identity(Index,Index), setIdentity(), isIdentity()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
|
||||
MatrixBase<Derived>::Identity() {
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately equal to the identity matrix
|
||||
* (not necessarily square),
|
||||
* within the precision given by \a prec.
|
||||
*
|
||||
* Example: \include MatrixBase_isIdentity.cpp
|
||||
* Output: \verbinclude MatrixBase_isIdentity.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
|
||||
*/
|
||||
template <typename Derived>
|
||||
bool MatrixBase<Derived>::isIdentity(const RealScalar& prec) const {
|
||||
typename internal::nested_eval<Derived, 1>::type self(derived());
|
||||
for (Index j = 0; j < cols(); ++j) {
|
||||
for (Index i = 0; i < rows(); ++i) {
|
||||
if (i == j) {
|
||||
if (!internal::isApprox(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false;
|
||||
} else {
|
||||
if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<RealScalar>(1), prec)) return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived, bool Big = (Derived::SizeAtCompileTime >= 16)>
|
||||
struct setIdentity_impl {
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) {
|
||||
return m = Derived::Identity(m.rows(), m.cols());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct setIdentity_impl<Derived, true> {
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) {
|
||||
m.setZero();
|
||||
const Index size = numext::mini(m.rows(), m.cols());
|
||||
for (Index i = 0; i < size; ++i) m.coeffRef(i, i) = typename Derived::Scalar(1);
|
||||
return m;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** Writes the identity expression (not necessarily square) into *this.
|
||||
*
|
||||
* Example: \include MatrixBase_setIdentity.cpp
|
||||
* Output: \verbinclude MatrixBase_setIdentity.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() {
|
||||
return internal::setIdentity_impl<Derived>::run(derived());
|
||||
}
|
||||
|
||||
/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
|
||||
*
|
||||
* \param rows the new number of rows
|
||||
* \param cols the new number of columns
|
||||
*
|
||||
* Example: \include Matrix_setIdentity_int_int.cpp
|
||||
* Output: \verbinclude Matrix_setIdentity_int_int.out
|
||||
*
|
||||
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols) {
|
||||
derived().resize(rows, cols);
|
||||
return setIdentity();
|
||||
}
|
||||
|
||||
/** \returns an expression of the i-th unit (basis) vector.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(
|
||||
Index newSize, Index i) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return BasisReturnType(SquareMatrixType::Identity(newSize, newSize), i);
|
||||
}
|
||||
|
||||
/** \returns an expression of the i-th unit (basis) vector.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This variant is for fixed-size vector only.
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(
|
||||
Index i) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return BasisReturnType(SquareMatrixType::Identity(), i);
|
||||
}
|
||||
|
||||
/** \returns an expression of the X axis unit vector (1{,0}^*)
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
|
||||
* MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX() {
|
||||
return Derived::Unit(0);
|
||||
}
|
||||
|
||||
/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
|
||||
* MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY() {
|
||||
return Derived::Unit(1);
|
||||
}
|
||||
|
||||
/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
|
||||
* MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ() {
|
||||
return Derived::Unit(2);
|
||||
}
|
||||
|
||||
/** \returns an expression of the W axis unit vector (0,0,0,1)
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
|
||||
* MatrixBase::UnitW()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW() {
|
||||
return Derived::Unit(3);
|
||||
}
|
||||
|
||||
/** \brief Set the coefficients of \c *this to the i-th unit (basis) vector
|
||||
*
|
||||
* \param i index of the unique coefficient to be set to 1
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index i) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
eigen_assert(i < size());
|
||||
derived().setZero();
|
||||
derived().coeffRef(i) = Scalar(1);
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** \brief Resizes to the given \a newSize, and writes the i-th unit (basis) vector into *this.
|
||||
*
|
||||
* \param newSize the new size of the vector
|
||||
* \param i index of the unique coefficient to be set to 1
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index newSize, Index i) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
eigen_assert(i < newSize);
|
||||
derived().resize(newSize);
|
||||
return setUnit(i);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_NULLARY_OP_H
|
||||
@@ -0,0 +1,171 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CWISE_TERNARY_OP_H
|
||||
#define EIGEN_CWISE_TERNARY_OP_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
|
||||
struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>> {
|
||||
// we must not inherit from traits<Arg1> since it has
|
||||
// the potential to cause problems with MSVC
|
||||
typedef remove_all_t<Arg1> Ancestor;
|
||||
typedef typename traits<Ancestor>::XprKind XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
// even though we require Arg1, Arg2, and Arg3 to have the same scalar type
|
||||
// (see CwiseTernaryOp constructor),
|
||||
// we still want to handle the case when the result type is different.
|
||||
typedef typename result_of<TernaryOp(const typename Arg1::Scalar&, const typename Arg2::Scalar&,
|
||||
const typename Arg3::Scalar&)>::type Scalar;
|
||||
|
||||
typedef typename internal::traits<Arg1>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Arg1>::StorageIndex StorageIndex;
|
||||
|
||||
typedef typename Arg1::Nested Arg1Nested;
|
||||
typedef typename Arg2::Nested Arg2Nested;
|
||||
typedef typename Arg3::Nested Arg3Nested;
|
||||
typedef std::remove_reference_t<Arg1Nested> Arg1Nested_;
|
||||
typedef std::remove_reference_t<Arg2Nested> Arg2Nested_;
|
||||
typedef std::remove_reference_t<Arg3Nested> Arg3Nested_;
|
||||
enum { Flags = Arg1Nested_::Flags & RowMajorBit };
|
||||
};
|
||||
} // end namespace internal
|
||||
|
||||
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind>
|
||||
class CwiseTernaryOpImpl;
|
||||
|
||||
/** \class CwiseTernaryOp
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Generic expression where a coefficient-wise ternary operator is
|
||||
* applied to two expressions
|
||||
*
|
||||
* \tparam TernaryOp template functor implementing the operator
|
||||
* \tparam Arg1Type the type of the first argument
|
||||
* \tparam Arg2Type the type of the second argument
|
||||
* \tparam Arg3Type the type of the third argument
|
||||
*
|
||||
* This class represents an expression where a coefficient-wise ternary
|
||||
* operator is applied to three expressions.
|
||||
* It is the return type of ternary operators, by which we mean only those
|
||||
* ternary operators where
|
||||
* all three arguments are Eigen expressions.
|
||||
* For example, the return type of betainc(matrix1, matrix2, matrix3) is a
|
||||
* CwiseTernaryOp.
|
||||
*
|
||||
* Most of the time, this is the only way that it is used, so you typically
|
||||
* don't have to name
|
||||
* CwiseTernaryOp types explicitly.
|
||||
*
|
||||
* \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const
|
||||
* MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,
|
||||
* class CwiseUnaryOp, class CwiseNullaryOp
|
||||
*/
|
||||
template <typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type>
|
||||
class CwiseTernaryOp : public CwiseTernaryOpImpl<TernaryOp, Arg1Type, Arg2Type, Arg3Type,
|
||||
typename internal::traits<Arg1Type>::StorageKind>,
|
||||
internal::no_assignment_operator {
|
||||
public:
|
||||
typedef internal::remove_all_t<Arg1Type> Arg1;
|
||||
typedef internal::remove_all_t<Arg2Type> Arg2;
|
||||
typedef internal::remove_all_t<Arg3Type> Arg3;
|
||||
|
||||
// require the sizes to match
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2)
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3)
|
||||
|
||||
// The index types should match
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
|
||||
typename internal::traits<Arg2Type>::StorageKind>::value),
|
||||
STORAGE_KIND_MUST_MATCH)
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
|
||||
typename internal::traits<Arg3Type>::StorageKind>::value),
|
||||
STORAGE_KIND_MUST_MATCH)
|
||||
|
||||
typedef typename CwiseTernaryOpImpl<TernaryOp, Arg1Type, Arg2Type, Arg3Type,
|
||||
typename internal::traits<Arg1Type>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp)
|
||||
|
||||
typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested;
|
||||
typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested;
|
||||
typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested;
|
||||
typedef std::remove_reference_t<Arg1Nested> Arg1Nested_;
|
||||
typedef std::remove_reference_t<Arg2Nested> Arg2Nested_;
|
||||
typedef std::remove_reference_t<Arg3Nested> Arg3Nested_;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3,
|
||||
const TernaryOp& func = TernaryOp())
|
||||
: m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) {
|
||||
eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const {
|
||||
// return the fixed size type if available to enable compile time
|
||||
// optimizations
|
||||
if (internal::traits<internal::remove_all_t<Arg1Nested>>::RowsAtCompileTime == Dynamic &&
|
||||
internal::traits<internal::remove_all_t<Arg2Nested>>::RowsAtCompileTime == Dynamic)
|
||||
return m_arg3.rows();
|
||||
else if (internal::traits<internal::remove_all_t<Arg1Nested>>::RowsAtCompileTime == Dynamic &&
|
||||
internal::traits<internal::remove_all_t<Arg3Nested>>::RowsAtCompileTime == Dynamic)
|
||||
return m_arg2.rows();
|
||||
else
|
||||
return m_arg1.rows();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const {
|
||||
// return the fixed size type if available to enable compile time
|
||||
// optimizations
|
||||
if (internal::traits<internal::remove_all_t<Arg1Nested>>::ColsAtCompileTime == Dynamic &&
|
||||
internal::traits<internal::remove_all_t<Arg2Nested>>::ColsAtCompileTime == Dynamic)
|
||||
return m_arg3.cols();
|
||||
else if (internal::traits<internal::remove_all_t<Arg1Nested>>::ColsAtCompileTime == Dynamic &&
|
||||
internal::traits<internal::remove_all_t<Arg3Nested>>::ColsAtCompileTime == Dynamic)
|
||||
return m_arg2.cols();
|
||||
else
|
||||
return m_arg1.cols();
|
||||
}
|
||||
|
||||
/** \returns the first argument nested expression */
|
||||
EIGEN_DEVICE_FUNC const Arg1Nested_& arg1() const { return m_arg1; }
|
||||
/** \returns the first argument nested expression */
|
||||
EIGEN_DEVICE_FUNC const Arg2Nested_& arg2() const { return m_arg2; }
|
||||
/** \returns the third argument nested expression */
|
||||
EIGEN_DEVICE_FUNC const Arg3Nested_& arg3() const { return m_arg3; }
|
||||
/** \returns the functor representing the ternary operation */
|
||||
EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; }
|
||||
|
||||
protected:
|
||||
Arg1Nested m_arg1;
|
||||
Arg2Nested m_arg2;
|
||||
Arg3Nested m_arg3;
|
||||
const TernaryOp m_functor;
|
||||
};
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind>
|
||||
class CwiseTernaryOpImpl : public internal::generic_xpr_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>>::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>>::type Base;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_TERNARY_OP_H
|
||||
@@ -0,0 +1,91 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CWISE_UNARY_OP_H
|
||||
#define EIGEN_CWISE_UNARY_OP_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename UnaryOp, typename XprType>
|
||||
struct traits<CwiseUnaryOp<UnaryOp, XprType> > : traits<XprType> {
|
||||
typedef typename result_of<UnaryOp(const typename XprType::Scalar&)>::type Scalar;
|
||||
typedef typename XprType::Nested XprTypeNested;
|
||||
typedef std::remove_reference_t<XprTypeNested> XprTypeNested_;
|
||||
enum { Flags = XprTypeNested_::Flags & RowMajorBit };
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <typename UnaryOp, typename XprType, typename StorageKind>
|
||||
class CwiseUnaryOpImpl;
|
||||
|
||||
/** \class CwiseUnaryOp
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Generic expression where a coefficient-wise unary operator is applied to an expression
|
||||
*
|
||||
* \tparam UnaryOp template functor implementing the operator
|
||||
* \tparam XprType the type of the expression to which we are applying the unary operator
|
||||
*
|
||||
* This class represents an expression where a unary operator is applied to an expression.
|
||||
* It is the return type of all operations taking exactly 1 input expression, regardless of the
|
||||
* presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
|
||||
* is considered unary, because only the right-hand side is an expression, and its
|
||||
* return type is a specialization of CwiseUnaryOp.
|
||||
*
|
||||
* Most of the time, this is the only way that it is used, so you typically don't have to name
|
||||
* CwiseUnaryOp types explicitly.
|
||||
*
|
||||
* \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
|
||||
*/
|
||||
template <typename UnaryOp, typename XprType>
|
||||
class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>,
|
||||
internal::no_assignment_operator {
|
||||
public:
|
||||
typedef typename CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
|
||||
typedef typename internal::ref_selector<XprType>::type XprTypeNested;
|
||||
typedef internal::remove_all_t<XprType> NestedExpression;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
|
||||
: m_xpr(xpr), m_functor(func) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const noexcept { return m_xpr.rows(); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const noexcept { return m_xpr.cols(); }
|
||||
|
||||
/** \returns the functor representing the unary operation */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; }
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t<XprTypeNested>& nestedExpression() const {
|
||||
return m_xpr;
|
||||
}
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::remove_all_t<XprTypeNested>& nestedExpression() { return m_xpr; }
|
||||
|
||||
protected:
|
||||
XprTypeNested m_xpr;
|
||||
const UnaryOp m_functor;
|
||||
};
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename UnaryOp, typename XprType, typename StorageKind>
|
||||
class CwiseUnaryOpImpl : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_UNARY_OP_H
|
||||
@@ -0,0 +1,167 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CWISE_UNARY_VIEW_H
|
||||
#define EIGEN_CWISE_UNARY_VIEW_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename ViewOp, typename MatrixType, typename StrideType>
|
||||
struct traits<CwiseUnaryView<ViewOp, MatrixType, StrideType> > : traits<MatrixType> {
|
||||
typedef typename result_of<ViewOp(typename traits<MatrixType>::Scalar&)>::type1 ScalarRef;
|
||||
static_assert(std::is_reference<ScalarRef>::value, "Views must return a reference type.");
|
||||
typedef remove_all_t<ScalarRef> Scalar;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef remove_all_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
enum {
|
||||
FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
|
||||
Flags =
|
||||
traits<MatrixTypeNested_>::Flags &
|
||||
(RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions
|
||||
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
|
||||
// need to cast the sizeof's from size_t to int explicitly, otherwise:
|
||||
// "error: no integral type can represent all of the enumerator values
|
||||
InnerStrideAtCompileTime =
|
||||
StrideType::InnerStrideAtCompileTime == 0
|
||||
? (MatrixTypeInnerStride == Dynamic
|
||||
? int(Dynamic)
|
||||
: int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)))
|
||||
: int(StrideType::InnerStrideAtCompileTime),
|
||||
|
||||
OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
|
||||
? (outer_stride_at_compile_time<MatrixType>::ret == Dynamic
|
||||
? int(Dynamic)
|
||||
: outer_stride_at_compile_time<MatrixType>::ret *
|
||||
int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)))
|
||||
: int(StrideType::OuterStrideAtCompileTime)
|
||||
};
|
||||
};
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename ViewOp, typename XprType, typename StrideType, typename StorageKind,
|
||||
bool Mutable = !std::is_const<XprType>::value>
|
||||
class CwiseUnaryViewImpl : public generic_xpr_base<CwiseUnaryView<ViewOp, XprType, StrideType> >::type {
|
||||
public:
|
||||
typedef typename generic_xpr_base<CwiseUnaryView<ViewOp, XprType, StrideType> >::type Base;
|
||||
};
|
||||
|
||||
template <typename ViewOp, typename MatrixType, typename StrideType>
|
||||
class CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false>
|
||||
: public dense_xpr_base<CwiseUnaryView<ViewOp, MatrixType, StrideType> >::type {
|
||||
public:
|
||||
typedef CwiseUnaryView<ViewOp, MatrixType, StrideType> Derived;
|
||||
typedef typename dense_xpr_base<CwiseUnaryView<ViewOp, MatrixType, StrideType> >::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeffRef(0)); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const {
|
||||
return StrideType::InnerStrideAtCompileTime != 0 ? int(StrideType::InnerStrideAtCompileTime)
|
||||
: derived().nestedExpression().innerStride() *
|
||||
sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const {
|
||||
return StrideType::OuterStrideAtCompileTime != 0 ? int(StrideType::OuterStrideAtCompileTime)
|
||||
: derived().nestedExpression().outerStride() *
|
||||
sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar);
|
||||
}
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
|
||||
|
||||
// Allow const access to coeffRef for the case of direct access being enabled.
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(index);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index col) const {
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(row, col);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ViewOp, typename MatrixType, typename StrideType>
|
||||
class CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, true>
|
||||
: public CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false> {
|
||||
public:
|
||||
typedef CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false> Base;
|
||||
typedef CwiseUnaryView<ViewOp, MatrixType, StrideType> Derived;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
|
||||
|
||||
using Base::data;
|
||||
EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) {
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(index);
|
||||
}
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \class CwiseUnaryView
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
|
||||
*
|
||||
* \tparam ViewOp template functor implementing the view
|
||||
* \tparam MatrixType the type of the matrix we are applying the unary operator
|
||||
*
|
||||
* This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
|
||||
* It is the return type of real() and imag(), and most of the time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
|
||||
*/
|
||||
template <typename ViewOp, typename MatrixType, typename StrideType>
|
||||
class CwiseUnaryView : public internal::CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType,
|
||||
typename internal::traits<MatrixType>::StorageKind> {
|
||||
public:
|
||||
typedef typename internal::CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType,
|
||||
typename internal::traits<MatrixType>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
|
||||
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
|
||||
typedef internal::remove_all_t<MatrixType> NestedExpression;
|
||||
|
||||
explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())
|
||||
: m_matrix(mat), m_functor(func) {}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const noexcept { return m_matrix.rows(); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const noexcept { return m_matrix.cols(); }
|
||||
|
||||
/** \returns the functor representing unary operation */
|
||||
EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; }
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC const internal::remove_all_t<MatrixTypeNested>& nestedExpression() const { return m_matrix; }
|
||||
|
||||
/** \returns the nested expression */
|
||||
EIGEN_DEVICE_FUNC std::remove_reference_t<MatrixTypeNested>& nestedExpression() { return m_matrix; }
|
||||
|
||||
protected:
|
||||
MatrixTypeNested m_matrix;
|
||||
ViewOp m_functor;
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_UNARY_VIEW_H
|
||||
673
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DenseBase.h
Normal file
673
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DenseBase.h
Normal file
@@ -0,0 +1,673 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DENSEBASE_H
|
||||
#define EIGEN_DENSEBASE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
|
||||
EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
|
||||
|
||||
/** \class DenseBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for all dense matrices, vectors, and arrays
|
||||
*
|
||||
* This class is the base that is inherited by all dense objects (matrix, vector, arrays,
|
||||
* and related expression types). The common Eigen API for dense objects is contained in this class.
|
||||
*
|
||||
* \tparam Derived is the derived type, e.g., a matrix type or an expression.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
|
||||
*
|
||||
* \sa \blank \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DenseBase
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
: public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value>
|
||||
#else
|
||||
: public DenseCoeffsBase<Derived, DirectWriteAccessors>
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
{
|
||||
public:
|
||||
/** Inner iterator type to iterate over the coefficients of a row or column.
|
||||
* \sa class InnerIterator
|
||||
*/
|
||||
typedef Eigen::InnerIterator<Derived> InnerIterator;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
|
||||
/**
|
||||
* \brief The type used to store indices
|
||||
* \details This typedef is relevant for types that store multiple indices such as
|
||||
* PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
|
||||
* \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
|
||||
*/
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
|
||||
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
|
||||
*
|
||||
* It is an alias for the Scalar type */
|
||||
typedef Scalar value_type;
|
||||
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::coeffByOuterInner;
|
||||
using Base::colIndexByOuterInner;
|
||||
using Base::cols;
|
||||
using Base::const_cast_derived;
|
||||
using Base::derived;
|
||||
using Base::rowIndexByOuterInner;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
using Base::operator();
|
||||
using Base::operator[];
|
||||
using Base::colStride;
|
||||
using Base::innerStride;
|
||||
using Base::outerStride;
|
||||
using Base::rowStride;
|
||||
using Base::stride;
|
||||
using Base::w;
|
||||
using Base::x;
|
||||
using Base::y;
|
||||
using Base::z;
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
|
||||
enum {
|
||||
|
||||
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
|
||||
/**< The number of rows at compile-time. This is just a copy of the value provided
|
||||
* by the \a Derived type. If a value is not known at compile-time,
|
||||
* it is set to the \a Dynamic constant.
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
||||
/**< The number of columns at compile-time. This is just a copy of the value provided
|
||||
* by the \a Derived type. If a value is not known at compile-time,
|
||||
* it is set to the \a Dynamic constant.
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
SizeAtCompileTime = (internal::size_of_xpr_at_compile_time<Derived>::ret),
|
||||
/**< This is equal to the number of coefficients, i.e. the number of
|
||||
* rows times the number of columns, or to \a Dynamic if this is not
|
||||
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
||||
|
||||
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
|
||||
/**< This value is equal to the maximum possible number of rows that this expression
|
||||
* might have. If this expression might have an arbitrarily high number of rows,
|
||||
* this value is set to \a Dynamic.
|
||||
*
|
||||
* This value is useful to know when evaluating an expression, in order to determine
|
||||
* whether it is possible to avoid doing a dynamic memory allocation.
|
||||
*
|
||||
* \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
|
||||
*/
|
||||
|
||||
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
|
||||
/**< This value is equal to the maximum possible number of columns that this expression
|
||||
* might have. If this expression might have an arbitrarily high number of columns,
|
||||
* this value is set to \a Dynamic.
|
||||
*
|
||||
* This value is useful to know when evaluating an expression, in order to determine
|
||||
* whether it is possible to avoid doing a dynamic memory allocation.
|
||||
*
|
||||
* \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
|
||||
*/
|
||||
|
||||
MaxSizeAtCompileTime = internal::size_at_compile_time(internal::traits<Derived>::MaxRowsAtCompileTime,
|
||||
internal::traits<Derived>::MaxColsAtCompileTime),
|
||||
/**< This value is equal to the maximum possible number of coefficients that this expression
|
||||
* might have. If this expression might have an arbitrarily high number of coefficients,
|
||||
* this value is set to \a Dynamic.
|
||||
*
|
||||
* This value is useful to know when evaluating an expression, in order to determine
|
||||
* whether it is possible to avoid doing a dynamic memory allocation.
|
||||
*
|
||||
* \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
|
||||
*/
|
||||
|
||||
IsVectorAtCompileTime =
|
||||
internal::traits<Derived>::RowsAtCompileTime == 1 || internal::traits<Derived>::ColsAtCompileTime == 1,
|
||||
/**< This is set to true if either the number of rows or the number of
|
||||
* columns is known at compile-time to be equal to 1. Indeed, in that case,
|
||||
* we are dealing with a column-vector (if there is only one column) or with
|
||||
* a row-vector (if there is only one row). */
|
||||
|
||||
NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0
|
||||
: bool(IsVectorAtCompileTime) ? 1
|
||||
: 2,
|
||||
/**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
|
||||
* and 2 for matrices.
|
||||
*/
|
||||
|
||||
Flags = internal::traits<Derived>::Flags,
|
||||
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
|
||||
* constructed from this one. See the \ref flags "list of flags".
|
||||
*/
|
||||
|
||||
IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
|
||||
|
||||
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
|
||||
: int(IsRowMajor) ? int(ColsAtCompileTime)
|
||||
: int(RowsAtCompileTime),
|
||||
|
||||
InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
|
||||
OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
|
||||
};
|
||||
|
||||
typedef typename internal::find_best_packet<Scalar, SizeAtCompileTime>::type PacketScalar;
|
||||
|
||||
enum { IsPlainObjectBase = 0 };
|
||||
|
||||
/** The plain matrix type corresponding to this expression.
|
||||
* \sa PlainObject */
|
||||
typedef Matrix<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime,
|
||||
internal::traits<Derived>::ColsAtCompileTime,
|
||||
AutoAlign | (internal::traits<Derived>::Flags & RowMajorBit ? RowMajor : ColMajor),
|
||||
internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>
|
||||
PlainMatrix;
|
||||
|
||||
/** The plain array type corresponding to this expression.
|
||||
* \sa PlainObject */
|
||||
typedef Array<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime,
|
||||
internal::traits<Derived>::ColsAtCompileTime,
|
||||
AutoAlign | (internal::traits<Derived>::Flags & RowMajorBit ? RowMajor : ColMajor),
|
||||
internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>
|
||||
PlainArray;
|
||||
|
||||
/** \brief The plain matrix or array type corresponding to this expression.
|
||||
*
|
||||
* This is not necessarily exactly the return type of eval(). In the case of plain matrices,
|
||||
* the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
|
||||
* that the return type of eval() is either PlainObject or const PlainObject&.
|
||||
*/
|
||||
typedef std::conditional_t<internal::is_same<typename internal::traits<Derived>::XprKind, MatrixXpr>::value,
|
||||
PlainMatrix, PlainArray>
|
||||
PlainObject;
|
||||
|
||||
/** \returns the outer size.
|
||||
*
|
||||
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
|
||||
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
|
||||
* column-major matrix, and the number of rows for a row-major matrix. */
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerSize() const {
|
||||
return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols();
|
||||
}
|
||||
|
||||
/** \returns the inner size.
|
||||
*
|
||||
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
|
||||
* with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
|
||||
* column-major matrix, and the number of columns for a row-major matrix. */
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerSize() const {
|
||||
return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows();
|
||||
}
|
||||
|
||||
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
|
||||
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
|
||||
* does nothing else.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC void resize(Index newSize) {
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(newSize);
|
||||
eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize.");
|
||||
}
|
||||
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
|
||||
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
|
||||
* does nothing else.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) {
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
||||
eigen_assert(rows == this->rows() && cols == this->cols() &&
|
||||
"DenseBase::resize() does not actually allow to resize.");
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
|
||||
/** \internal Represents a matrix with all coefficients equal to zero*/
|
||||
typedef CwiseNullaryOp<internal::scalar_zero_op<Scalar>, PlainObject> ZeroReturnType;
|
||||
/** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
|
||||
EIGEN_DEPRECATED typedef CwiseNullaryOp<internal::linspaced_op<Scalar>, PlainObject> SequentialLinSpacedReturnType;
|
||||
/** \internal Represents a vector with linearly spaced coefficients that allows random access. */
|
||||
typedef CwiseNullaryOp<internal::linspaced_op<Scalar>, PlainObject> RandomAccessLinSpacedReturnType;
|
||||
/** \internal Represents a vector with equally spaced coefficients that allows random access. */
|
||||
typedef CwiseNullaryOp<internal::equalspaced_op<Scalar>, PlainObject> RandomAccessEqualSpacedReturnType;
|
||||
/** \internal the return type of MatrixBase::eigenvalues() */
|
||||
typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||
internal::traits<Derived>::ColsAtCompileTime, 1>
|
||||
EigenvaluesReturnType;
|
||||
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** Copies \a other into *this. \returns a reference to *this. */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other);
|
||||
|
||||
/** Special case of the template operator=, in order to prevent the compiler
|
||||
* from generating a default operator= (issue hit with g++ 4.1)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||
|
||||
/** \internal
|
||||
* Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
template <typename OtherDerived>
|
||||
/** \deprecated */
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
||||
|
||||
EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<<(const Scalar& s);
|
||||
|
||||
template <unsigned int Added, unsigned int Removed>
|
||||
/** \deprecated it now returns \c *this */
|
||||
EIGEN_DEPRECATED const Derived& flagged() const {
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<<(const DenseBase<OtherDerived>& other);
|
||||
|
||||
typedef Transpose<Derived> TransposeReturnType;
|
||||
EIGEN_DEVICE_FUNC TransposeReturnType transpose();
|
||||
typedef Transpose<const Derived> ConstTransposeReturnType;
|
||||
EIGEN_DEVICE_FUNC const ConstTransposeReturnType transpose() const;
|
||||
EIGEN_DEVICE_FUNC void transposeInPlace();
|
||||
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value);
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value);
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value);
|
||||
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, Index size,
|
||||
const Scalar& low,
|
||||
const Scalar& high);
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t,
|
||||
const Scalar& low,
|
||||
const Scalar& high);
|
||||
|
||||
EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low,
|
||||
const Scalar& high);
|
||||
EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high);
|
||||
|
||||
EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(Index size, const Scalar& low,
|
||||
const Scalar& step);
|
||||
EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(const Scalar& low, const Scalar& step);
|
||||
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index rows, Index cols,
|
||||
const CustomNullaryOp& func);
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index size,
|
||||
const CustomNullaryOp& func);
|
||||
template <typename CustomNullaryOp>
|
||||
EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(const CustomNullaryOp& func);
|
||||
|
||||
EIGEN_DEVICE_FUNC static const ZeroReturnType Zero(Index rows, Index cols);
|
||||
EIGEN_DEVICE_FUNC static const ZeroReturnType Zero(Index size);
|
||||
EIGEN_DEVICE_FUNC static const ZeroReturnType Zero();
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols);
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size);
|
||||
EIGEN_DEVICE_FUNC static const ConstantReturnType Ones();
|
||||
|
||||
EIGEN_DEVICE_FUNC void fill(const Scalar& value);
|
||||
EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value);
|
||||
EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
|
||||
EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high);
|
||||
EIGEN_DEVICE_FUNC Derived& setEqualSpaced(Index size, const Scalar& low, const Scalar& step);
|
||||
EIGEN_DEVICE_FUNC Derived& setEqualSpaced(const Scalar& low, const Scalar& step);
|
||||
EIGEN_DEVICE_FUNC Derived& setZero();
|
||||
EIGEN_DEVICE_FUNC Derived& setOnes();
|
||||
EIGEN_DEVICE_FUNC Derived& setRandom();
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC bool isApprox(const DenseBase<OtherDerived>& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline bool hasNaN() const;
|
||||
EIGEN_DEVICE_FUNC inline bool allFinite() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other);
|
||||
template <bool Enable = !internal::is_same<Scalar, RealScalar>::value, typename = std::enable_if_t<Enable>>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const RealScalar& other);
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other);
|
||||
template <bool Enable = !internal::is_same<Scalar, RealScalar>::value, typename = std::enable_if_t<Enable>>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const RealScalar& other);
|
||||
|
||||
typedef internal::add_const_on_value_type_t<typename internal::eval<Derived>::type> EvalReturnType;
|
||||
/** \returns the matrix or vector obtained by evaluating this expression.
|
||||
*
|
||||
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
|
||||
* a const reference, in order to avoid a useless copy.
|
||||
*
|
||||
* \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page
|
||||
* \endlink.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const {
|
||||
// Even though MSVC does not honor strong inlining when the return type
|
||||
// is a dynamic matrix, we desperately need strong inlining for fixed
|
||||
// size types on MSVC.
|
||||
return typename internal::eval<Derived>::type(derived());
|
||||
}
|
||||
|
||||
/** swaps *this with the expression \a other.
|
||||
*
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(const DenseBase<OtherDerived>& other) {
|
||||
EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
||||
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
|
||||
}
|
||||
|
||||
/** swaps *this with the matrix or array \a other.
|
||||
*
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(PlainObjectBase<OtherDerived>& other) {
|
||||
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
||||
call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const;
|
||||
EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
|
||||
EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess();
|
||||
template <bool Enable>
|
||||
EIGEN_DEVICE_FUNC inline const std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&>
|
||||
forceAlignedAccessIf() const;
|
||||
template <bool Enable>
|
||||
EIGEN_DEVICE_FUNC inline std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&> forceAlignedAccessIf();
|
||||
|
||||
EIGEN_DEVICE_FUNC Scalar sum() const;
|
||||
EIGEN_DEVICE_FUNC Scalar mean() const;
|
||||
EIGEN_DEVICE_FUNC Scalar trace() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC Scalar prod() const;
|
||||
|
||||
template <int NaNPropagation>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const;
|
||||
template <int NaNPropagation>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const;
|
||||
|
||||
// By default, the fastest version with undefined NaN propagation semantics is
|
||||
// used.
|
||||
// TODO(rmlarsen): Replace with default template argument when we move to
|
||||
// c++11 or beyond.
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff() const {
|
||||
return minCoeff<PropagateFast>();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff() const {
|
||||
return maxCoeff<PropagateFast>();
|
||||
}
|
||||
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
|
||||
|
||||
// TODO(rmlarsen): Replace these methods with a default template argument.
|
||||
template <typename IndexType>
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const {
|
||||
return minCoeff<PropagateFast>(row, col);
|
||||
}
|
||||
template <typename IndexType>
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const {
|
||||
return maxCoeff<PropagateFast>(row, col);
|
||||
}
|
||||
template <typename IndexType>
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const {
|
||||
return minCoeff<PropagateFast>(index);
|
||||
}
|
||||
template <typename IndexType>
|
||||
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const {
|
||||
return maxCoeff<PropagateFast>(index);
|
||||
}
|
||||
|
||||
template <typename BinaryOp>
|
||||
EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const;
|
||||
|
||||
template <typename Visitor>
|
||||
EIGEN_DEVICE_FUNC void visit(Visitor& func) const;
|
||||
|
||||
/** \returns a WithFormat proxy object allowing to print a matrix the with given
|
||||
* format \a fmt.
|
||||
*
|
||||
* See class IOFormat for some examples.
|
||||
*
|
||||
* \sa class IOFormat, class WithFormat
|
||||
*/
|
||||
inline const WithFormat<Derived> format(const IOFormat& fmt) const { return WithFormat<Derived>(derived(), fmt); }
|
||||
|
||||
/** \returns the unique coefficient of a 1x1 expression */
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType value() const {
|
||||
EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1);
|
||||
return derived().coeff(0, 0);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC bool all() const;
|
||||
EIGEN_DEVICE_FUNC bool any() const;
|
||||
EIGEN_DEVICE_FUNC Index count() const;
|
||||
|
||||
typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
|
||||
typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
|
||||
typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
|
||||
typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
|
||||
|
||||
/** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions
|
||||
*
|
||||
* Example: \include MatrixBase_rowwise.cpp
|
||||
* Output: \verbinclude MatrixBase_rowwise.out
|
||||
*
|
||||
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
||||
*/
|
||||
// Code moved here due to a CUDA compiler bug
|
||||
EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); }
|
||||
EIGEN_DEVICE_FUNC RowwiseReturnType rowwise();
|
||||
|
||||
/** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions
|
||||
*
|
||||
* Example: \include MatrixBase_colwise.cpp
|
||||
* Output: \verbinclude MatrixBase_colwise.out
|
||||
*
|
||||
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); }
|
||||
EIGEN_DEVICE_FUNC ColwiseReturnType colwise();
|
||||
|
||||
typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>, PlainObject> RandomReturnType;
|
||||
static const RandomReturnType Random(Index rows, Index cols);
|
||||
static const RandomReturnType Random(Index size);
|
||||
static const RandomReturnType Random();
|
||||
|
||||
template <typename ThenDerived, typename ElseDerived>
|
||||
inline EIGEN_DEVICE_FUNC
|
||||
CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
|
||||
typename DenseBase<ElseDerived>::Scalar, Scalar>,
|
||||
ThenDerived, ElseDerived, Derived>
|
||||
select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const;
|
||||
|
||||
template <typename ThenDerived>
|
||||
inline EIGEN_DEVICE_FUNC
|
||||
CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
|
||||
typename DenseBase<ThenDerived>::Scalar, Scalar>,
|
||||
ThenDerived, typename DenseBase<ThenDerived>::ConstantReturnType, Derived>
|
||||
select(const DenseBase<ThenDerived>& thenMatrix, const typename DenseBase<ThenDerived>::Scalar& elseScalar) const;
|
||||
|
||||
template <typename ElseDerived>
|
||||
inline EIGEN_DEVICE_FUNC
|
||||
CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ElseDerived>::Scalar,
|
||||
typename DenseBase<ElseDerived>::Scalar, Scalar>,
|
||||
typename DenseBase<ElseDerived>::ConstantReturnType, ElseDerived, Derived>
|
||||
select(const typename DenseBase<ElseDerived>::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
|
||||
|
||||
template <int p>
|
||||
RealScalar lpNorm() const;
|
||||
|
||||
template <int RowFactor, int ColFactor>
|
||||
EIGEN_DEVICE_FUNC const Replicate<Derived, RowFactor, ColFactor> replicate() const;
|
||||
/**
|
||||
* \return an expression of the replication of \c *this
|
||||
*
|
||||
* Example: \include MatrixBase_replicate_int_int.cpp
|
||||
* Output: \verbinclude MatrixBase_replicate_int_int.out
|
||||
*
|
||||
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
|
||||
*/
|
||||
// Code moved here due to a CUDA compiler bug
|
||||
EIGEN_DEVICE_FUNC const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const {
|
||||
return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor);
|
||||
}
|
||||
|
||||
typedef Reverse<Derived, BothDirections> ReverseReturnType;
|
||||
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
|
||||
EIGEN_DEVICE_FUNC ReverseReturnType reverse();
|
||||
/** This is the const version of reverse(). */
|
||||
// Code moved here due to a CUDA compiler bug
|
||||
EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); }
|
||||
EIGEN_DEVICE_FUNC void reverseInPlace();
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** STL-like <a href="https://en.cppreference.com/w/cpp/named_req/RandomAccessIterator">RandomAccessIterator</a>
|
||||
* iterator type as returned by the begin() and end() methods.
|
||||
*/
|
||||
typedef random_access_iterator_type iterator;
|
||||
/** This is the const version of iterator (aka read-only) */
|
||||
typedef random_access_iterator_type const_iterator;
|
||||
#else
|
||||
typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit,
|
||||
internal::pointer_based_stl_iterator<Derived>,
|
||||
internal::generic_randaccess_stl_iterator<Derived> >
|
||||
iterator_type;
|
||||
|
||||
typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit,
|
||||
internal::pointer_based_stl_iterator<const Derived>,
|
||||
internal::generic_randaccess_stl_iterator<const Derived> >
|
||||
const_iterator_type;
|
||||
|
||||
// Stl-style iterators are supported only for vectors.
|
||||
|
||||
typedef std::conditional_t<IsVectorAtCompileTime, iterator_type, void> iterator;
|
||||
|
||||
typedef std::conditional_t<IsVectorAtCompileTime, const_iterator_type, void> const_iterator;
|
||||
#endif
|
||||
|
||||
inline iterator begin();
|
||||
inline const_iterator begin() const;
|
||||
inline const_iterator cbegin() const;
|
||||
inline iterator end();
|
||||
inline const_iterator end() const;
|
||||
inline const_iterator cend() const;
|
||||
|
||||
using RealViewReturnType = std::conditional_t<NumTraits<Scalar>::IsComplex, RealView<Derived>, Derived&>;
|
||||
using ConstRealViewReturnType =
|
||||
std::conditional_t<NumTraits<Scalar>::IsComplex, RealView<const Derived>, const Derived&>;
|
||||
|
||||
EIGEN_DEVICE_FUNC RealViewReturnType realView();
|
||||
EIGEN_DEVICE_FUNC ConstRealViewReturnType realView() const;
|
||||
|
||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
|
||||
#define EIGEN_DOC_UNARY_ADDONS(X, Y)
|
||||
#include "../plugins/CommonCwiseUnaryOps.inc"
|
||||
#include "../plugins/BlockMethods.inc"
|
||||
#include "../plugins/IndexedViewMethods.inc"
|
||||
#include "../plugins/ReshapedMethods.inc"
|
||||
#ifdef EIGEN_DENSEBASE_PLUGIN
|
||||
#include EIGEN_DENSEBASE_PLUGIN
|
||||
#endif
|
||||
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
|
||||
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
|
||||
#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
|
||||
#undef EIGEN_DOC_UNARY_ADDONS
|
||||
|
||||
// disable the use of evalTo for dense objects with a nice compilation error
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void evalTo(Dest&) const {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Dest, void>::value),
|
||||
THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
|
||||
}
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase)
|
||||
/** Default constructor. Do nothing. */
|
||||
#ifdef EIGEN_INTERNAL_DEBUGGING
|
||||
EIGEN_DEVICE_FUNC constexpr DenseBase() {
|
||||
/* Just checks for self-consistency of the flags.
|
||||
* Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
|
||||
*/
|
||||
EIGEN_STATIC_ASSERT(
|
||||
(internal::check_implication(MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1, int(IsRowMajor)) &&
|
||||
internal::check_implication(MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1, int(!IsRowMajor))),
|
||||
INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
|
||||
}
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC constexpr DenseBase() = default;
|
||||
#endif
|
||||
|
||||
private:
|
||||
EIGEN_DEVICE_FUNC explicit DenseBase(int);
|
||||
EIGEN_DEVICE_FUNC DenseBase(int, int);
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&);
|
||||
};
|
||||
|
||||
/** Free-function swap.
|
||||
*/
|
||||
template <typename DerivedA, typename DerivedB>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
// Use forwarding references to capture all combinations of cv-qualified l+r-value cases.
|
||||
std::enable_if_t<std::is_base_of<DenseBase<std::decay_t<DerivedA>>, std::decay_t<DerivedA>>::value &&
|
||||
std::is_base_of<DenseBase<std::decay_t<DerivedB>>, std::decay_t<DerivedB>>::value,
|
||||
void>
|
||||
swap(DerivedA&& a, DerivedB&& b) {
|
||||
a.swap(b);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DENSEBASE_H
|
||||
@@ -0,0 +1,568 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DENSECOEFFSBASE_H
|
||||
#define EIGEN_DENSECOEFFSBASE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename T>
|
||||
struct add_const_on_value_type_if_arithmetic {
|
||||
typedef std::conditional_t<is_arithmetic<T>::value, T, add_const_on_value_type_t<T>> type;
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \brief Base class providing read-only coefficient access to matrices and arrays.
|
||||
* \ingroup Core_Module
|
||||
* \tparam Derived Type of the derived class
|
||||
*
|
||||
* \note #ReadOnlyAccessors Constant indicating read-only access
|
||||
*
|
||||
* This class defines the \c operator() \c const function and friends, which can be used to read specific
|
||||
* entries of a matrix or array.
|
||||
*
|
||||
* \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
|
||||
* \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DenseCoeffsBase<Derived, ReadOnlyAccessors> : public EigenBase<Derived> {
|
||||
public:
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
|
||||
// Explanation for this CoeffReturnType typedef.
|
||||
// - This is the return type of the coeff() method.
|
||||
// - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
|
||||
// to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
|
||||
// - The DirectAccessBit means exactly that the underlying data of coefficients can be directly accessed as a plain
|
||||
// strided array, which means exactly that the underlying data of coefficients does exist in memory, which means
|
||||
// exactly that the coefficients is const-referencable, which means exactly that we can have coeff() return a const
|
||||
// reference. For example, Map<const Matrix> have DirectAccessBit but not LvalueBit, so that Map<const Matrix>.coeff()
|
||||
// does points to a const Scalar& which exists in memory, while does not allow coeffRef() as it would not provide a
|
||||
// lvalue. Notice that DirectAccessBit and LvalueBit are mutually orthogonal.
|
||||
// - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems
|
||||
// while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
|
||||
// not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
|
||||
typedef std::conditional_t<bool(internal::traits<Derived>::Flags&(LvalueBit | DirectAccessBit)), const Scalar&,
|
||||
std::conditional_t<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>>
|
||||
CoeffReturnType;
|
||||
|
||||
typedef typename internal::add_const_on_value_type_if_arithmetic<typename internal::packet_traits<Scalar>::type>::type
|
||||
PacketReturnType;
|
||||
|
||||
typedef EigenBase<Derived> Base;
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const {
|
||||
return int(Derived::RowsAtCompileTime) == 1 ? 0
|
||||
: int(Derived::ColsAtCompileTime) == 1 ? inner
|
||||
: int(Derived::Flags) & RowMajorBit ? outer
|
||||
: inner;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const {
|
||||
return int(Derived::ColsAtCompileTime) == 1 ? 0
|
||||
: int(Derived::RowsAtCompileTime) == 1 ? inner
|
||||
: int(Derived::Flags) & RowMajorBit ? inner
|
||||
: outer;
|
||||
}
|
||||
|
||||
/** Short version: don't use this function, use
|
||||
* \link operator()(Index,Index) const \endlink instead.
|
||||
*
|
||||
* Long version: this function is similar to
|
||||
* \link operator()(Index,Index) const \endlink, but without the assertion.
|
||||
* Use this for limiting the performance cost of debugging code when doing
|
||||
* repeated coefficient access. Only use this when it is guaranteed that the
|
||||
* parameters \a row and \a col are in range.
|
||||
*
|
||||
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
|
||||
* function equivalent to \link operator()(Index,Index) const \endlink.
|
||||
*
|
||||
* \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType coeff(Index row, Index col) const {
|
||||
eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return internal::evaluator<Derived>(derived()).coeff(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType coeffByOuterInner(Index outer, Index inner) const {
|
||||
return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
|
||||
}
|
||||
|
||||
/** \returns the coefficient at given the given row and column.
|
||||
*
|
||||
* \sa operator()(Index,Index), operator[](Index)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType operator()(Index row, Index col) const {
|
||||
eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return coeff(row, col);
|
||||
}
|
||||
|
||||
/** Short version: don't use this function, use
|
||||
* \link operator[](Index) const \endlink instead.
|
||||
*
|
||||
* Long version: this function is similar to
|
||||
* \link operator[](Index) const \endlink, but without the assertion.
|
||||
* Use this for limiting the performance cost of debugging code when doing
|
||||
* repeated coefficient access. Only use this when it is guaranteed that the
|
||||
* parameter \a index is in range.
|
||||
*
|
||||
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
|
||||
* function equivalent to \link operator[](Index) const \endlink.
|
||||
*
|
||||
* \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType coeff(Index index) const {
|
||||
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
|
||||
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return internal::evaluator<Derived>(derived()).coeff(index);
|
||||
}
|
||||
|
||||
/** \returns the coefficient at given index.
|
||||
*
|
||||
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
|
||||
*
|
||||
* \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
|
||||
* z() const, w() const
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType operator[](Index index) const {
|
||||
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
|
||||
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
|
||||
eigen_assert(index >= 0 && index < size());
|
||||
return coeff(index);
|
||||
}
|
||||
|
||||
/** \returns the coefficient at given index.
|
||||
*
|
||||
* This is synonymous to operator[](Index) const.
|
||||
*
|
||||
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
|
||||
*
|
||||
* \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
|
||||
* z() const, w() const
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType operator()(Index index) const {
|
||||
eigen_assert(index >= 0 && index < size());
|
||||
return coeff(index);
|
||||
}
|
||||
|
||||
/** equivalent to operator[](0). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType x() const { return (*this)[0]; }
|
||||
|
||||
/** equivalent to operator[](1). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType y() const {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[1];
|
||||
}
|
||||
|
||||
/** equivalent to operator[](2). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType z() const {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[2];
|
||||
}
|
||||
|
||||
/** equivalent to operator[](3). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr CoeffReturnType w() const {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[3];
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* \returns the packet of coefficients starting at the given row and column. It is your responsibility
|
||||
* to ensure that a packet really starts there. This method is only available on expressions having the
|
||||
* PacketAccessBit.
|
||||
*
|
||||
* The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
|
||||
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
|
||||
* starting at an address which is a multiple of the packet size.
|
||||
*/
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const {
|
||||
typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
|
||||
eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return internal::evaluator<Derived>(derived()).template packet<LoadMode, DefaultPacketType>(row, col);
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template <int LoadMode>
|
||||
EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const {
|
||||
return packet<LoadMode>(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* \returns the packet of coefficients starting at the given index. It is your responsibility
|
||||
* to ensure that a packet really starts there. This method is only available on expressions having the
|
||||
* PacketAccessBit and the LinearAccessBit.
|
||||
*
|
||||
* The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
|
||||
* the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
|
||||
* starting at an address which is a multiple of the packet size.
|
||||
*/
|
||||
|
||||
template <int LoadMode>
|
||||
EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
|
||||
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
|
||||
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
|
||||
typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return internal::evaluator<Derived>(derived()).template packet<LoadMode, DefaultPacketType>(index);
|
||||
}
|
||||
|
||||
protected:
|
||||
// explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
|
||||
// But some methods are only available in the DirectAccess case.
|
||||
// So we add dummy methods here with these names, so that "using... " doesn't fail.
|
||||
// It's not private so that the child class DenseBase can access them, and it's not public
|
||||
// either since it's an implementation detail, so has to be protected.
|
||||
void coeffRef();
|
||||
void coeffRefByOuterInner();
|
||||
void writePacket();
|
||||
void writePacketByOuterInner();
|
||||
void copyCoeff();
|
||||
void copyCoeffByOuterInner();
|
||||
void copyPacket();
|
||||
void copyPacketByOuterInner();
|
||||
void stride();
|
||||
void innerStride();
|
||||
void outerStride();
|
||||
void rowStride();
|
||||
void colStride();
|
||||
};
|
||||
|
||||
/** \brief Base class providing read/write coefficient access to matrices and arrays.
|
||||
* \ingroup Core_Module
|
||||
* \tparam Derived Type of the derived class
|
||||
*
|
||||
* \note #WriteAccessors Constant indicating read/write access
|
||||
*
|
||||
* This class defines the non-const \c operator() function and friends, which can be used to write specific
|
||||
* entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
|
||||
* defines the const variant for reading specific entries.
|
||||
*
|
||||
* \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> {
|
||||
public:
|
||||
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::colIndexByOuterInner;
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rowIndexByOuterInner;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
using Base::operator[];
|
||||
using Base::operator();
|
||||
using Base::w;
|
||||
using Base::x;
|
||||
using Base::y;
|
||||
using Base::z;
|
||||
|
||||
/** Short version: don't use this function, use
|
||||
* \link operator()(Index,Index) \endlink instead.
|
||||
*
|
||||
* Long version: this function is similar to
|
||||
* \link operator()(Index,Index) \endlink, but without the assertion.
|
||||
* Use this for limiting the performance cost of debugging code when doing
|
||||
* repeated coefficient access. Only use this when it is guaranteed that the
|
||||
* parameters \a row and \a col are in range.
|
||||
*
|
||||
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
|
||||
* function equivalent to \link operator()(Index,Index) \endlink.
|
||||
*
|
||||
* \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index row, Index col) {
|
||||
eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) {
|
||||
return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient at given the given row and column.
|
||||
*
|
||||
* \sa operator[](Index)
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& operator()(Index row, Index col) {
|
||||
eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return coeffRef(row, col);
|
||||
}
|
||||
|
||||
/** Short version: don't use this function, use
|
||||
* \link operator[](Index) \endlink instead.
|
||||
*
|
||||
* Long version: this function is similar to
|
||||
* \link operator[](Index) \endlink, but without the assertion.
|
||||
* Use this for limiting the performance cost of debugging code when doing
|
||||
* repeated coefficient access. Only use this when it is guaranteed that the
|
||||
* parameters \a row and \a col are in range.
|
||||
*
|
||||
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
|
||||
* function equivalent to \link operator[](Index) \endlink.
|
||||
*
|
||||
* \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& coeffRef(Index index) {
|
||||
EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
|
||||
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return internal::evaluator<Derived>(derived()).coeffRef(index);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient at given index.
|
||||
*
|
||||
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
|
||||
*
|
||||
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& operator[](Index index) {
|
||||
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
|
||||
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
|
||||
eigen_assert(index >= 0 && index < size());
|
||||
return coeffRef(index);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient at given index.
|
||||
*
|
||||
* This is synonymous to operator[](Index).
|
||||
*
|
||||
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
|
||||
*
|
||||
* \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
|
||||
*/
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& operator()(Index index) {
|
||||
eigen_assert(index >= 0 && index < size());
|
||||
return coeffRef(index);
|
||||
}
|
||||
|
||||
/** equivalent to operator[](0). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& x() { return (*this)[0]; }
|
||||
|
||||
/** equivalent to operator[](1). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& y() {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[1];
|
||||
}
|
||||
|
||||
/** equivalent to operator[](2). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& z() {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[2];
|
||||
}
|
||||
|
||||
/** equivalent to operator[](3). */
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Scalar& w() {
|
||||
EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS);
|
||||
return (*this)[3];
|
||||
}
|
||||
};
|
||||
|
||||
/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
|
||||
* \ingroup Core_Module
|
||||
* \tparam Derived Type of the derived class
|
||||
*
|
||||
* \note #DirectAccessors Constant indicating direct access
|
||||
*
|
||||
* This class defines functions to work with strides which can be used to access entries directly. This class
|
||||
* inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
|
||||
* \c operator() .
|
||||
*
|
||||
* \sa \blank \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> {
|
||||
public:
|
||||
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
|
||||
/** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
|
||||
*
|
||||
* \sa outerStride(), rowStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const { return derived().innerStride(); }
|
||||
|
||||
/** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
|
||||
* in a column-major matrix).
|
||||
*
|
||||
* \sa innerStride(), rowStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const { return derived().outerStride(); }
|
||||
|
||||
// FIXME shall we remove it ?
|
||||
constexpr Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); }
|
||||
|
||||
/** \returns the pointer increment between two consecutive rows.
|
||||
*
|
||||
* \sa innerStride(), outerStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); }
|
||||
|
||||
/** \returns the pointer increment between two consecutive columns.
|
||||
*
|
||||
* \sa innerStride(), outerStride(), rowStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); }
|
||||
};
|
||||
|
||||
/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
|
||||
* \ingroup Core_Module
|
||||
* \tparam Derived Type of the derived class
|
||||
*
|
||||
* \note #DirectWriteAccessors Constant indicating direct access
|
||||
*
|
||||
* This class defines functions to work with strides which can be used to access entries directly. This class
|
||||
* inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
|
||||
* \c operator().
|
||||
*
|
||||
* \sa \blank \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DenseCoeffsBase<Derived, DirectWriteAccessors> : public DenseCoeffsBase<Derived, WriteAccessors> {
|
||||
public:
|
||||
typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
|
||||
/** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
|
||||
*
|
||||
* \sa outerStride(), rowStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return derived().innerStride(); }
|
||||
|
||||
/** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
|
||||
* in a column-major matrix).
|
||||
*
|
||||
* \sa innerStride(), rowStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return derived().outerStride(); }
|
||||
|
||||
// FIXME shall we remove it ?
|
||||
constexpr Index stride() const noexcept { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); }
|
||||
|
||||
/** \returns the pointer increment between two consecutive rows.
|
||||
*
|
||||
* \sa innerStride(), outerStride(), colStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index rowStride() const noexcept {
|
||||
return Derived::IsRowMajor ? outerStride() : innerStride();
|
||||
}
|
||||
|
||||
/** \returns the pointer increment between two consecutive columns.
|
||||
*
|
||||
* \sa innerStride(), outerStride(), rowStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index colStride() const noexcept {
|
||||
return Derived::IsRowMajor ? innerStride() : outerStride();
|
||||
}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <int Alignment, typename Derived, bool JustReturnZero>
|
||||
struct first_aligned_impl {
|
||||
static constexpr Index run(const Derived&) noexcept { return 0; }
|
||||
};
|
||||
|
||||
template <int Alignment, typename Derived>
|
||||
struct first_aligned_impl<Alignment, Derived, false> {
|
||||
static inline Index run(const Derived& m) { return internal::first_aligned<Alignment>(m.data(), m.size()); }
|
||||
};
|
||||
|
||||
/** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect
|
||||
* to \a Alignment for vectorization.
|
||||
*
|
||||
* \tparam Alignment requested alignment in Bytes.
|
||||
*
|
||||
* There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
|
||||
* documentation.
|
||||
*/
|
||||
template <int Alignment, typename Derived>
|
||||
static inline Index first_aligned(const DenseBase<Derived>& m) {
|
||||
enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };
|
||||
return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
static inline Index first_default_aligned(const DenseBase<Derived>& m) {
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename packet_traits<Scalar>::type DefaultPacketType;
|
||||
return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment), Derived>(m);
|
||||
}
|
||||
|
||||
template <typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
|
||||
struct inner_stride_at_compile_time {
|
||||
enum { ret = traits<Derived>::InnerStrideAtCompileTime };
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct inner_stride_at_compile_time<Derived, false> {
|
||||
enum { ret = 0 };
|
||||
};
|
||||
|
||||
template <typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
|
||||
struct outer_stride_at_compile_time {
|
||||
enum { ret = traits<Derived>::OuterStrideAtCompileTime };
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct outer_stride_at_compile_time<Derived, false> {
|
||||
enum { ret = 0 };
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DENSECOEFFSBASE_H
|
||||
578
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DenseStorage.h
Normal file
578
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DenseStorage.h
Normal file
@@ -0,0 +1,578 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MATRIXSTORAGE_H
|
||||
#define EIGEN_MATRIXSTORAGE_H
|
||||
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) \
|
||||
X; \
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
|
||||
#else
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)
|
||||
#endif
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
|
||||
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment)
|
||||
#else
|
||||
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment) \
|
||||
eigen_assert((is_constant_evaluated() || (std::uintptr_t(array) % Alignment == 0)) && \
|
||||
"this assertion is explained here: " \
|
||||
"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
|
||||
" **** READ THIS WEB PAGE !!! ****");
|
||||
#endif
|
||||
|
||||
#if EIGEN_STACK_ALLOCATION_LIMIT
|
||||
#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X) \
|
||||
EIGEN_STATIC_ASSERT(X <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG)
|
||||
#else
|
||||
#define EIGEN_MAKE_STACK_ALLOCATION_ASSERT(X)
|
||||
#endif
|
||||
|
||||
/** \internal
|
||||
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
|
||||
* to 16 bytes boundary if the total size is a multiple of 16 bytes.
|
||||
*/
|
||||
|
||||
template <typename T, int Size, int MatrixOrArrayOptions,
|
||||
int Alignment = (MatrixOrArrayOptions & DontAlign) ? 0 : compute_default_alignment<T, Size>::value>
|
||||
struct plain_array {
|
||||
EIGEN_ALIGN_TO_BOUNDARY(Alignment) T array[Size];
|
||||
#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default;
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() {
|
||||
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(Alignment)
|
||||
EIGEN_MAKE_STACK_ALLOCATION_ASSERT(Size * sizeof(T))
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
template <typename T, int Size, int MatrixOrArrayOptions>
|
||||
struct plain_array<T, Size, MatrixOrArrayOptions, 0> {
|
||||
T array[Size];
|
||||
#if defined(EIGEN_NO_DEBUG) || defined(EIGEN_TESTING_PLAINOBJECT_CTOR)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default;
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() { EIGEN_MAKE_STACK_ALLOCATION_ASSERT(Size * sizeof(T)) }
|
||||
#endif
|
||||
};
|
||||
|
||||
template <typename T, int MatrixOrArrayOptions, int Alignment>
|
||||
struct plain_array<T, 0, MatrixOrArrayOptions, Alignment> {
|
||||
T array[1];
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr plain_array() = default;
|
||||
};
|
||||
|
||||
template <typename T, int Size, int Options, int Alignment>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap_plain_array(plain_array<T, Size, Options, Alignment>& a,
|
||||
plain_array<T, Size, Options, Alignment>& b,
|
||||
Index a_size, Index b_size) {
|
||||
Index common_size = numext::mini(a_size, b_size);
|
||||
std::swap_ranges(a.array, a.array + common_size, b.array);
|
||||
if (a_size > b_size)
|
||||
smart_copy(a.array + common_size, a.array + a_size, b.array + common_size);
|
||||
else if (b_size > a_size)
|
||||
smart_copy(b.array + common_size, b.array + b_size, a.array + common_size);
|
||||
}
|
||||
|
||||
template <typename T, int Size, int Rows, int Cols, int Options>
|
||||
class DenseStorage_impl {
|
||||
plain_array<T, Size, Options> m_data;
|
||||
|
||||
public:
|
||||
#ifndef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default;
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
|
||||
smart_copy(other.m_data.array, other.m_data.array + Size, m_data.array);
|
||||
}
|
||||
#endif
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) {
|
||||
numext::swap(m_data, other.m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/,
|
||||
Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; }
|
||||
};
|
||||
template <typename T, int Size, int Cols, int Options>
|
||||
class DenseStorage_impl<T, Size, Dynamic, Cols, Options> {
|
||||
plain_array<T, Size, Options> m_data;
|
||||
Index m_rows = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_rows(other.m_rows) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index /*cols*/)
|
||||
: m_rows(rows) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
EIGEN_UNUSED_VARIABLE(size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
m_rows = other.m_rows;
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) {
|
||||
swap_plain_array(m_data, other.m_data, size(), other.size());
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index /*cols*/) {
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index /*cols*/) {
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; }
|
||||
};
|
||||
template <typename T, int Size, int Rows, int Options>
|
||||
class DenseStorage_impl<T, Size, Rows, Dynamic, Options> {
|
||||
plain_array<T, Size, Options> m_data;
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_cols(other.m_cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index /*rows*/, Index cols)
|
||||
: m_cols(cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
EIGEN_UNUSED_VARIABLE(size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
m_cols = other.m_cols;
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) {
|
||||
swap_plain_array(m_data, other.m_data, size(), other.size());
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, Index cols) {
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index cols) {
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; }
|
||||
};
|
||||
template <typename T, int Size, int Options>
|
||||
class DenseStorage_impl<T, Size, Dynamic, Dynamic, Options> {
|
||||
plain_array<T, Size, Options> m_data;
|
||||
Index m_rows = 0;
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_rows(other.m_rows), m_cols(other.m_cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index cols)
|
||||
: m_rows(rows), m_cols(cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
EIGEN_UNUSED_VARIABLE(size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
smart_copy(other.m_data.array, other.m_data.array + other.size(), m_data.array);
|
||||
m_rows = other.m_rows;
|
||||
m_cols = other.m_cols;
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) {
|
||||
swap_plain_array(m_data, other.m_data, size(), other.size());
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index cols) {
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index cols) {
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data.array; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data.array; }
|
||||
};
|
||||
// null matrix variants
|
||||
template <typename T, int Rows, int Cols, int Options>
|
||||
class DenseStorage_impl<T, 0, Rows, Cols, Options> {
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl&) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/,
|
||||
Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index /*cols*/) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; }
|
||||
};
|
||||
template <typename T, int Cols, int Options>
|
||||
class DenseStorage_impl<T, 0, Dynamic, Cols, Options> {
|
||||
Index m_rows = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index rows, Index /*cols*/)
|
||||
: m_rows(rows) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index /*cols*/) {
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index /*cols*/) {
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; }
|
||||
};
|
||||
template <typename T, int Rows, int Options>
|
||||
class DenseStorage_impl<T, 0, Rows, Dynamic, Options> {
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index /*rows*/, Index cols)
|
||||
: m_cols(cols) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index /*rows*/, Index cols) {
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index /*rows*/, Index cols) {
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; }
|
||||
};
|
||||
template <typename T, int Options>
|
||||
class DenseStorage_impl<T, 0, Dynamic, Dynamic, Options> {
|
||||
Index m_rows = 0;
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index /*size*/, Index rows, Index cols)
|
||||
: m_rows(rows), m_cols(cols) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index /*size*/, Index rows, Index cols) {
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index /*size*/, Index rows, Index cols) {
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return nullptr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return nullptr; }
|
||||
};
|
||||
// fixed-size matrix with dynamic memory allocation not currently supported
|
||||
template <typename T, int Rows, int Cols, int Options>
|
||||
class DenseStorage_impl<T, Dynamic, Rows, Cols, Options> {};
|
||||
// dynamic-sized variants
|
||||
template <typename T, int Cols, int Options>
|
||||
class DenseStorage_impl<T, Dynamic, Dynamic, Cols, Options> {
|
||||
static constexpr bool Align = (Options & DontAlign) == 0;
|
||||
T* m_data = nullptr;
|
||||
Index m_rows = 0;
|
||||
|
||||
public:
|
||||
static constexpr int Size = Dynamic;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(other.size())), m_rows(other.m_rows) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index /*cols*/)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(size)), m_rows(rows) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept
|
||||
: m_data(other.m_data), m_rows(other.m_rows) {
|
||||
other.m_data = nullptr;
|
||||
other.m_rows = 0;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto<T, Align>(m_data, size()); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
resize(other.size(), other.rows(), other.cols());
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept {
|
||||
this->swap(other);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_data, other.m_data);
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index rows, Index /*cols*/) {
|
||||
m_data = conditional_aligned_realloc_new_auto<T, Align>(m_data, size, this->size());
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index rows, Index /*cols*/) {
|
||||
Index oldSize = this->size();
|
||||
if (oldSize != size) {
|
||||
conditional_aligned_delete_auto<T, Align>(m_data, oldSize);
|
||||
m_data = conditional_aligned_new_auto<T, Align>(size);
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_rows = rows;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * Cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; }
|
||||
};
|
||||
template <typename T, int Rows, int Options>
|
||||
class DenseStorage_impl<T, Dynamic, Rows, Dynamic, Options> {
|
||||
static constexpr bool Align = (Options & DontAlign) == 0;
|
||||
T* m_data = nullptr;
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
static constexpr int Size = Dynamic;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(other.size())), m_cols(other.m_cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index /*rows*/, Index cols)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(size)), m_cols(cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept
|
||||
: m_data(other.m_data), m_cols(other.m_cols) {
|
||||
other.m_data = nullptr;
|
||||
other.m_cols = 0;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto<T, Align>(m_data, size()); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
resize(other.size(), other.rows(), other.cols());
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept {
|
||||
this->swap(other);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_data, other.m_data);
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index /*rows*/, Index cols) {
|
||||
m_data = conditional_aligned_realloc_new_auto<T, Align>(m_data, size, this->size());
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index /*rows*/, Index cols) {
|
||||
Index oldSize = this->size();
|
||||
if (oldSize != size) {
|
||||
conditional_aligned_delete_auto<T, Align>(m_data, oldSize);
|
||||
m_data = conditional_aligned_new_auto<T, Align>(size);
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return Rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return Rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; }
|
||||
};
|
||||
template <typename T, int Options>
|
||||
class DenseStorage_impl<T, Dynamic, Dynamic, Dynamic, Options> {
|
||||
static constexpr bool Align = (Options & DontAlign) == 0;
|
||||
T* m_data = nullptr;
|
||||
Index m_rows = 0;
|
||||
Index m_cols = 0;
|
||||
|
||||
public:
|
||||
static constexpr int Size = Dynamic;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(const DenseStorage_impl& other)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(other.size())), m_rows(other.m_rows), m_cols(other.m_cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = other.size())
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(Index size, Index rows, Index cols)
|
||||
: m_data(conditional_aligned_new_auto<T, Align>(size)), m_rows(rows), m_cols(cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl(DenseStorage_impl&& other) noexcept
|
||||
: m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {
|
||||
other.m_data = nullptr;
|
||||
other.m_rows = 0;
|
||||
other.m_cols = 0;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC ~DenseStorage_impl() { conditional_aligned_delete_auto<T, Align>(m_data, size()); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(const DenseStorage_impl& other) {
|
||||
resize(other.size(), other.rows(), other.cols());
|
||||
smart_copy(other.m_data, other.m_data + other.size(), m_data);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage_impl& operator=(DenseStorage_impl&& other) noexcept {
|
||||
this->swap(other);
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void swap(DenseStorage_impl& other) noexcept {
|
||||
numext::swap(m_data, other.m_data);
|
||||
numext::swap(m_rows, other.m_rows);
|
||||
numext::swap(m_cols, other.m_cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void conservativeResize(Index size, Index rows, Index cols) {
|
||||
m_data = conditional_aligned_realloc_new_auto<T, Align>(m_data, size, this->size());
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void resize(Index size, Index rows, Index cols) {
|
||||
Index oldSize = this->size();
|
||||
if (oldSize != size) {
|
||||
conditional_aligned_delete_auto<T, Align>(m_data, oldSize);
|
||||
m_data = conditional_aligned_new_auto<T, Align>(size);
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const { return m_rows; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const { return m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index size() const { return m_rows * m_cols; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr T* data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr const T* data() const { return m_data; }
|
||||
};
|
||||
template <typename T, int Size, int Rows, int Cols>
|
||||
struct use_default_move {
|
||||
static constexpr bool DynamicObject = Size == Dynamic;
|
||||
static constexpr bool TrivialObject =
|
||||
(!NumTraits<T>::RequireInitialization) && (Rows >= 0) && (Cols >= 0) && (Size == Rows * Cols);
|
||||
static constexpr bool value = DynamicObject || TrivialObject;
|
||||
};
|
||||
} // end namespace internal
|
||||
|
||||
/** \internal
|
||||
*
|
||||
* \class DenseStorage_impl
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Stores the data of a matrix
|
||||
*
|
||||
* This class stores the data of fixed-size, dynamic-size or mixed matrices
|
||||
* in a way as compact as possible.
|
||||
*
|
||||
* \sa Matrix
|
||||
*/
|
||||
template <typename T, int Size, int Rows, int Cols, int Options,
|
||||
bool Trivial = internal::use_default_move<T, Size, Rows, Cols>::value>
|
||||
class DenseStorage : public internal::DenseStorage_impl<T, Size, Rows, Cols, Options> {
|
||||
using Base = internal::DenseStorage_impl<T, Size, Rows, Cols, Options>;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(const DenseStorage&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(Index size, Index rows, Index cols)
|
||||
: Base(size, rows, cols) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(const DenseStorage&) = default;
|
||||
// if DenseStorage meets the requirements of use_default_move, then use the move construction and move assignment
|
||||
// operation defined in DenseStorage_impl, or the compiler-generated version if none is defined
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(DenseStorage&&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(DenseStorage&&) = default;
|
||||
};
|
||||
template <typename T, int Size, int Rows, int Cols, int Options>
|
||||
class DenseStorage<T, Size, Rows, Cols, Options, false>
|
||||
: public internal::DenseStorage_impl<T, Size, Rows, Cols, Options> {
|
||||
using Base = internal::DenseStorage_impl<T, Size, Rows, Cols, Options>;
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage() = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(const DenseStorage&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(Index size, Index rows, Index cols)
|
||||
: Base(size, rows, cols) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(const DenseStorage&) = default;
|
||||
// if DenseStorage does not meet the requirements of use_default_move, then defer to the copy construction and copy
|
||||
// assignment behavior
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage(DenseStorage&& other)
|
||||
: DenseStorage(static_cast<const DenseStorage&>(other)) {}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr DenseStorage& operator=(DenseStorage&& other) {
|
||||
*this = other;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MATRIX_H
|
||||
153
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DeviceWrapper.h
Normal file
153
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/DeviceWrapper.h
Normal file
@@ -0,0 +1,153 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2023 Charlie Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DEVICEWRAPPER_H
|
||||
#define EIGEN_DEVICEWRAPPER_H
|
||||
|
||||
namespace Eigen {
|
||||
template <typename Derived, typename Device>
|
||||
struct DeviceWrapper {
|
||||
using Base = EigenBase<internal::remove_all_t<Derived>>;
|
||||
using Scalar = typename Derived::Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC DeviceWrapper(Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {}
|
||||
EIGEN_DEVICE_FUNC DeviceWrapper(const Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived>& other) {
|
||||
using AssignOp = internal::assign_op<Scalar, typename OtherDerived::Scalar>;
|
||||
internal::call_assignment(*this, other.derived(), AssignOp());
|
||||
return m_xpr;
|
||||
}
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const EigenBase<OtherDerived>& other) {
|
||||
using AddAssignOp = internal::add_assign_op<Scalar, typename OtherDerived::Scalar>;
|
||||
internal::call_assignment(*this, other.derived(), AddAssignOp());
|
||||
return m_xpr;
|
||||
}
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const EigenBase<OtherDerived>& other) {
|
||||
using SubAssignOp = internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>;
|
||||
internal::call_assignment(*this, other.derived(), SubAssignOp());
|
||||
return m_xpr;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& derived() { return m_xpr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Device& device() { return m_device; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NoAlias<DeviceWrapper, EigenBase> noalias() {
|
||||
return NoAlias<DeviceWrapper, EigenBase>(*this);
|
||||
}
|
||||
|
||||
Derived& m_xpr;
|
||||
Device& m_device;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// this is where we differentiate between lazy assignment and specialized kernels (e.g. matrix products)
|
||||
template <typename DstXprType, typename SrcXprType, typename Functor, typename Device,
|
||||
typename Kind = typename AssignmentKind<typename evaluator_traits<DstXprType>::Shape,
|
||||
typename evaluator_traits<SrcXprType>::Shape>::Kind,
|
||||
typename EnableIf = void>
|
||||
struct AssignmentWithDevice;
|
||||
|
||||
// unless otherwise specified, use the default product implementation
|
||||
template <typename DstXprType, typename Lhs, typename Rhs, int Options, typename Functor, typename Device,
|
||||
typename Weak>
|
||||
struct AssignmentWithDevice<DstXprType, Product<Lhs, Rhs, Options>, Functor, Device, Dense2Dense, Weak> {
|
||||
using SrcXprType = Product<Lhs, Rhs, Options>;
|
||||
using Base = Assignment<DstXprType, SrcXprType, Functor>;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func,
|
||||
Device&) {
|
||||
Base::run(dst, src, func);
|
||||
}
|
||||
};
|
||||
|
||||
// specialization for coeffcient-wise assignment
|
||||
template <typename DstXprType, typename SrcXprType, typename Functor, typename Device, typename Weak>
|
||||
struct AssignmentWithDevice<DstXprType, SrcXprType, Functor, Device, Dense2Dense, Weak> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func,
|
||||
Device& device) {
|
||||
#ifndef EIGEN_NO_DEBUG
|
||||
internal::check_for_aliasing(dst, src);
|
||||
#endif
|
||||
|
||||
call_dense_assignment_loop(dst, src, func, device);
|
||||
}
|
||||
};
|
||||
|
||||
// this allows us to use the default evaluation scheme if it is not specialized for the device
|
||||
template <typename Kernel, typename Device, int Traversal = Kernel::AssignmentTraits::Traversal,
|
||||
int Unrolling = Kernel::AssignmentTraits::Unrolling>
|
||||
struct dense_assignment_loop_with_device {
|
||||
using Base = dense_assignment_loop<Kernel, Traversal, Unrolling>;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void run(Kernel& kernel, Device&) { Base::run(kernel); }
|
||||
};
|
||||
|
||||
// entry point for a generic expression with device
|
||||
template <typename Dst, typename Src, typename Func, typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void call_assignment_no_alias(DeviceWrapper<Dst, Device> dst,
|
||||
const Src& src, const Func& func) {
|
||||
enum {
|
||||
NeedToTranspose = ((int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) ||
|
||||
(int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)) &&
|
||||
int(Dst::SizeAtCompileTime) != 1
|
||||
};
|
||||
|
||||
using ActualDstTypeCleaned = std::conditional_t<NeedToTranspose, Transpose<Dst>, Dst>;
|
||||
using ActualDstType = std::conditional_t<NeedToTranspose, Transpose<Dst>, Dst&>;
|
||||
ActualDstType actualDst(dst.derived());
|
||||
|
||||
// TODO check whether this is the right place to perform these checks:
|
||||
EIGEN_STATIC_ASSERT_LVALUE(Dst)
|
||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned, Src)
|
||||
EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename ActualDstTypeCleaned::Scalar, typename Src::Scalar);
|
||||
|
||||
// this provides a mechanism for specializing simple assignments, matrix products, etc
|
||||
AssignmentWithDevice<ActualDstTypeCleaned, Src, Func, Device>::run(actualDst, src, func, dst.device());
|
||||
}
|
||||
|
||||
// copy and pasted from AssignEvaluator except forward device to kernel
|
||||
template <typename DstXprType, typename SrcXprType, typename Functor, typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src,
|
||||
const Functor& func, Device& device) {
|
||||
using DstEvaluatorType = evaluator<DstXprType>;
|
||||
using SrcEvaluatorType = evaluator<SrcXprType>;
|
||||
|
||||
SrcEvaluatorType srcEvaluator(src);
|
||||
|
||||
// NOTE To properly handle A = (A*A.transpose())/s with A rectangular,
|
||||
// we need to resize the destination after the source evaluator has been created.
|
||||
resize_if_allowed(dst, src, func);
|
||||
|
||||
DstEvaluatorType dstEvaluator(dst);
|
||||
|
||||
using Kernel = generic_dense_assignment_kernel<DstEvaluatorType, SrcEvaluatorType, Functor>;
|
||||
|
||||
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
|
||||
|
||||
dense_assignment_loop_with_device<Kernel, Device>::run(kernel, device);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename Derived>
|
||||
template <typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<Derived, Device> EigenBase<Derived>::device(Device& device) {
|
||||
return DeviceWrapper<Derived, Device>(derived(), device);
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<const Derived, Device> EigenBase<Derived>::device(
|
||||
Device& device) const {
|
||||
return DeviceWrapper<const Derived, Device>(derived(), device);
|
||||
}
|
||||
} // namespace Eigen
|
||||
#endif
|
||||
219
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Diagonal.h
Normal file
219
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Diagonal.h
Normal file
@@ -0,0 +1,219 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DIAGONAL_H
|
||||
#define EIGEN_DIAGONAL_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class Diagonal
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
|
||||
*
|
||||
* \tparam MatrixType the type of the object in which we are taking a sub/main/super diagonal
|
||||
* \tparam DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
|
||||
* A positive value means a superdiagonal, a negative value means a subdiagonal.
|
||||
* You can also use DynamicIndex so the index can be set at runtime.
|
||||
*
|
||||
* The matrix is not required to be square.
|
||||
*
|
||||
* This class represents an expression of the main diagonal, or any sub/super diagonal
|
||||
* of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
|
||||
* time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename MatrixType, int DiagIndex>
|
||||
struct traits<Diagonal<MatrixType, DiagIndex> > : traits<MatrixType> {
|
||||
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
|
||||
typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
typedef typename MatrixType::StorageKind StorageKind;
|
||||
enum {
|
||||
RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic)
|
||||
? Dynamic
|
||||
: (plain_enum_min(MatrixType::RowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
|
||||
MatrixType::ColsAtCompileTime - plain_enum_max(DiagIndex, 0))),
|
||||
ColsAtCompileTime = 1,
|
||||
MaxRowsAtCompileTime =
|
||||
int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
|
||||
: DiagIndex == DynamicIndex
|
||||
? min_size_prefer_fixed(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime)
|
||||
: (plain_enum_min(MatrixType::MaxRowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
|
||||
MatrixType::MaxColsAtCompileTime - plain_enum_max(DiagIndex, 0))),
|
||||
MaxColsAtCompileTime = 1,
|
||||
MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
|
||||
Flags = (unsigned int)MatrixTypeNested_::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) &
|
||||
~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions
|
||||
MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
|
||||
InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride + 1,
|
||||
OuterStrideAtCompileTime = 0
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <typename MatrixType, int DiagIndex_>
|
||||
class Diagonal : public internal::dense_xpr_base<Diagonal<MatrixType, DiagIndex_> >::type {
|
||||
public:
|
||||
enum { DiagIndex = DiagIndex_ };
|
||||
typedef typename internal::dense_xpr_base<Diagonal>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex)
|
||||
: m_matrix(matrix), m_index(a_index) {
|
||||
eigen_assert(a_index <= m_matrix.cols() && -a_index <= m_matrix.rows());
|
||||
}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index rows() const {
|
||||
return m_index.value() < 0 ? numext::mini<Index>(m_matrix.cols(), m_matrix.rows() + m_index.value())
|
||||
: numext::mini<Index>(m_matrix.rows(), m_matrix.cols() - m_index.value());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return 1; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return m_matrix.outerStride() + 1; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return 0; }
|
||||
|
||||
typedef std::conditional_t<internal::is_lvalue<MatrixType>::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
|
||||
EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index) {
|
||||
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
|
||||
return m_matrix.coeffRef(row + rowOffset(), row + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index) const {
|
||||
return m_matrix.coeffRef(row + rowOffset(), row + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index row, Index) const {
|
||||
return m_matrix.coeff(row + rowOffset(), row + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index idx) {
|
||||
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
|
||||
return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index idx) const {
|
||||
return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index idx) const {
|
||||
return m_matrix.coeff(idx + rowOffset(), idx + colOffset());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const internal::remove_all_t<typename MatrixType::Nested>& nestedExpression() const {
|
||||
return m_matrix;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index index() const { return m_index.value(); }
|
||||
|
||||
protected:
|
||||
typename internal::ref_selector<MatrixType>::non_const_type m_matrix;
|
||||
const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
|
||||
|
||||
private:
|
||||
// some compilers may fail to optimize std::max etc in case of compile-time constants...
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index absDiagIndex() const noexcept {
|
||||
return m_index.value() > 0 ? m_index.value() : -m_index.value();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rowOffset() const noexcept {
|
||||
return m_index.value() > 0 ? 0 : -m_index.value();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index colOffset() const noexcept {
|
||||
return m_index.value() > 0 ? m_index.value() : 0;
|
||||
}
|
||||
// trigger a compile-time error if someone try to call packet
|
||||
template <int LoadMode>
|
||||
typename MatrixType::PacketReturnType packet(Index) const;
|
||||
template <int LoadMode>
|
||||
typename MatrixType::PacketReturnType packet(Index, Index) const;
|
||||
};
|
||||
|
||||
/** \returns an expression of the main diagonal of the matrix \c *this
|
||||
*
|
||||
* \c *this is not required to be square.
|
||||
*
|
||||
* Example: \include MatrixBase_diagonal.cpp
|
||||
* Output: \verbinclude MatrixBase_diagonal.out
|
||||
*
|
||||
* \sa class Diagonal */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalReturnType MatrixBase<Derived>::diagonal() {
|
||||
return DiagonalReturnType(derived());
|
||||
}
|
||||
|
||||
/** This is the const version of diagonal(). */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::ConstDiagonalReturnType MatrixBase<Derived>::diagonal()
|
||||
const {
|
||||
return ConstDiagonalReturnType(derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
|
||||
*
|
||||
* \c *this is not required to be square.
|
||||
*
|
||||
* The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
|
||||
* and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
|
||||
*
|
||||
* Example: \include MatrixBase_diagonal_int.cpp
|
||||
* Output: \verbinclude MatrixBase_diagonal_int.out
|
||||
*
|
||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Diagonal<Derived, DynamicIndex> MatrixBase<Derived>::diagonal(Index index) {
|
||||
return Diagonal<Derived, DynamicIndex>(derived(), index);
|
||||
}
|
||||
|
||||
/** This is the const version of diagonal(Index). */
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline const Diagonal<const Derived, DynamicIndex> MatrixBase<Derived>::diagonal(Index index) const {
|
||||
return Diagonal<const Derived, DynamicIndex>(derived(), index);
|
||||
}
|
||||
|
||||
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
|
||||
*
|
||||
* \c *this is not required to be square.
|
||||
*
|
||||
* The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
|
||||
* and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
|
||||
*
|
||||
* Example: \include MatrixBase_diagonal_template_int.cpp
|
||||
* Output: \verbinclude MatrixBase_diagonal_template_int.out
|
||||
*
|
||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||
template <typename Derived>
|
||||
template <int Index_>
|
||||
EIGEN_DEVICE_FUNC inline Diagonal<Derived, Index_> MatrixBase<Derived>::diagonal() {
|
||||
return Diagonal<Derived, Index_>(derived());
|
||||
}
|
||||
|
||||
/** This is the const version of diagonal<int>(). */
|
||||
template <typename Derived>
|
||||
template <int Index_>
|
||||
EIGEN_DEVICE_FUNC inline const Diagonal<const Derived, Index_> MatrixBase<Derived>::diagonal() const {
|
||||
return Diagonal<const Derived, Index_>(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DIAGONAL_H
|
||||
@@ -0,0 +1,420 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DIAGONALMATRIX_H
|
||||
#define EIGEN_DIAGONALMATRIX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class DiagonalBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for diagonal matrices and expressions
|
||||
*
|
||||
* This is the base class that is inherited by diagonal matrix and related expression
|
||||
* types, which internally use a vector for storing the diagonal entries. Diagonal
|
||||
* types always represent square matrices.
|
||||
*
|
||||
* \tparam Derived is the derived type, a DiagonalMatrix or DiagonalWrapper.
|
||||
*
|
||||
* \sa class DiagonalMatrix, class DiagonalWrapper
|
||||
*/
|
||||
template <typename Derived>
|
||||
class DiagonalBase : public EigenBase<Derived> {
|
||||
public:
|
||||
typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::RealScalar RealScalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
|
||||
MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
|
||||
IsVectorAtCompileTime = 0,
|
||||
Flags = NoPreferredStorageOrderBit
|
||||
};
|
||||
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime>
|
||||
DenseMatrixType;
|
||||
typedef DenseMatrixType DenseType;
|
||||
typedef DiagonalMatrix<Scalar, DiagonalVectorType::SizeAtCompileTime, DiagonalVectorType::MaxSizeAtCompileTime>
|
||||
PlainObject;
|
||||
|
||||
/** \returns a reference to the derived object. */
|
||||
EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
/** \returns a const reference to the derived object. */
|
||||
EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
|
||||
/**
|
||||
* Constructs a dense matrix from \c *this. Note, this directly returns a dense matrix type,
|
||||
* not an expression.
|
||||
* \returns A dense matrix, with its diagonal entries set from the the derived object. */
|
||||
EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); }
|
||||
|
||||
/** \returns a reference to the derived object's vector of diagonal coefficients. */
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
|
||||
/** \returns a const reference to the derived object's vector of diagonal coefficients. */
|
||||
EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
|
||||
|
||||
/** \returns the value of the coefficient as if \c *this was a dense matrix. */
|
||||
EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const {
|
||||
eigen_assert(row >= 0 && col >= 0 && row < rows() && col <= cols());
|
||||
return row == col ? diagonal().coeff(row) : Scalar(0);
|
||||
}
|
||||
|
||||
/** \returns the number of rows. */
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const { return diagonal().size(); }
|
||||
/** \returns the number of columns. */
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const { return diagonal().size(); }
|
||||
|
||||
/** \returns the diagonal matrix product of \c *this by the dense matrix, \a matrix */
|
||||
template <typename MatrixDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<Derived, MatrixDerived, LazyProduct> operator*(
|
||||
const MatrixBase<MatrixDerived>& matrix) const {
|
||||
return Product<Derived, MatrixDerived, LazyProduct>(derived(), matrix.derived());
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
using DiagonalProductReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
|
||||
DiagonalVectorType, typename OtherDerived::DiagonalVectorType, product)>;
|
||||
|
||||
/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a other */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC const DiagonalProductReturnType<OtherDerived> operator*(
|
||||
const DiagonalBase<OtherDerived>& other) const {
|
||||
return diagonal().cwiseProduct(other.diagonal()).asDiagonal();
|
||||
}
|
||||
|
||||
using DiagonalInverseReturnType =
|
||||
DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType>>;
|
||||
|
||||
/** \returns the inverse \c *this. Computed as the coefficient-wise inverse of the diagonal. */
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalInverseReturnType inverse() const {
|
||||
return diagonal().cwiseInverse().asDiagonal();
|
||||
}
|
||||
|
||||
using DiagonalScaleReturnType =
|
||||
DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType, Scalar, product)>;
|
||||
|
||||
/** \returns the product of \c *this by the scalar \a scalar */
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalScaleReturnType operator*(const Scalar& scalar) const {
|
||||
return (diagonal() * scalar).asDiagonal();
|
||||
}
|
||||
|
||||
using ScaleDiagonalReturnType =
|
||||
DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar, DiagonalVectorType, product)>;
|
||||
|
||||
/** \returns the product of a scalar and the diagonal matrix \a other */
|
||||
EIGEN_DEVICE_FUNC friend inline const ScaleDiagonalReturnType operator*(const Scalar& scalar,
|
||||
const DiagonalBase& other) {
|
||||
return (scalar * other.diagonal()).asDiagonal();
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
using DiagonalSumReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
|
||||
DiagonalVectorType, typename OtherDerived::DiagonalVectorType, sum)>;
|
||||
|
||||
/** \returns the sum of \c *this and the diagonal matrix \a other */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalSumReturnType<OtherDerived> operator+(
|
||||
const DiagonalBase<OtherDerived>& other) const {
|
||||
return (diagonal() + other.diagonal()).asDiagonal();
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
using DiagonalDifferenceReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
|
||||
DiagonalVectorType, typename OtherDerived::DiagonalVectorType, difference)>;
|
||||
|
||||
/** \returns the difference of \c *this and the diagonal matrix \a other */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalDifferenceReturnType<OtherDerived> operator-(
|
||||
const DiagonalBase<OtherDerived>& other) const {
|
||||
return (diagonal() - other.diagonal()).asDiagonal();
|
||||
}
|
||||
};
|
||||
|
||||
/** \class DiagonalMatrix
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Represents a diagonal matrix with its storage
|
||||
*
|
||||
* \tparam Scalar_ the type of coefficients
|
||||
* \tparam SizeAtCompileTime the dimension of the matrix, or Dynamic
|
||||
* \tparam MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
|
||||
* to SizeAtCompileTime. Most of the time, you do not need to specify it.
|
||||
*
|
||||
* \sa class DiagonalBase, class DiagonalWrapper
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename Scalar_, int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
struct traits<DiagonalMatrix<Scalar_, SizeAtCompileTime, MaxSizeAtCompileTime>>
|
||||
: traits<Matrix<Scalar_, SizeAtCompileTime, SizeAtCompileTime, 0, MaxSizeAtCompileTime, MaxSizeAtCompileTime>> {
|
||||
typedef Matrix<Scalar_, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> DiagonalVectorType;
|
||||
typedef DiagonalShape StorageKind;
|
||||
enum { Flags = LvalueBit | NoPreferredStorageOrderBit | NestByRefBit };
|
||||
};
|
||||
} // namespace internal
|
||||
template <typename Scalar_, int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
class DiagonalMatrix : public DiagonalBase<DiagonalMatrix<Scalar_, SizeAtCompileTime, MaxSizeAtCompileTime>> {
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
|
||||
typedef const DiagonalMatrix& Nested;
|
||||
typedef Scalar_ Scalar;
|
||||
typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
DiagonalVectorType m_diagonal;
|
||||
|
||||
public:
|
||||
/** const version of diagonal(). */
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
|
||||
/** \returns a reference to the stored vector of diagonal coefficients. */
|
||||
EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return m_diagonal; }
|
||||
|
||||
/** Default constructor without initialization */
|
||||
EIGEN_DEVICE_FUNC inline DiagonalMatrix() {}
|
||||
|
||||
/** Constructs a diagonal matrix with given dimension */
|
||||
EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
|
||||
|
||||
/** 2D constructor. */
|
||||
EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x, y) {}
|
||||
|
||||
/** 3D constructor. */
|
||||
EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x, y, z) {}
|
||||
|
||||
/** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients.
|
||||
*
|
||||
* \warning To construct a diagonal matrix of fixed size, the number of values passed to this
|
||||
* constructor must match the fixed dimension of \c *this.
|
||||
*
|
||||
* \sa DiagonalMatrix(const Scalar&, const Scalar&)
|
||||
* \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&)
|
||||
*/
|
||||
template <typename... ArgTypes>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2,
|
||||
const ArgTypes&... args)
|
||||
: m_diagonal(a0, a1, a2, args...) {}
|
||||
|
||||
/** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer
|
||||
* lists \cpp11
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE DiagonalMatrix(
|
||||
const std::initializer_list<std::initializer_list<Scalar>>& list)
|
||||
: m_diagonal(list) {}
|
||||
|
||||
/** \brief Constructs a DiagonalMatrix from an r-value diagonal vector type */
|
||||
EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(DiagonalVectorType&& diag) : m_diagonal(std::move(diag)) {}
|
||||
|
||||
/** Copy constructor. */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
|
||||
inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
|
||||
#endif
|
||||
|
||||
/** generic constructor from expression of the diagonal coefficients */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other) {}
|
||||
|
||||
/** Copy operator. */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other) {
|
||||
m_diagonal = other.diagonal();
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is a special case of the templated operator=. Its purpose is to
|
||||
* prevent a default operator= from hiding the templated operator=.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalMatrix& other) {
|
||||
m_diagonal = other.diagonal();
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef DiagonalWrapper<const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, DiagonalVectorType>>
|
||||
InitializeReturnType;
|
||||
|
||||
typedef DiagonalWrapper<const CwiseNullaryOp<internal::scalar_zero_op<Scalar>, DiagonalVectorType>>
|
||||
ZeroInitializeReturnType;
|
||||
|
||||
/** Initializes a diagonal matrix of size SizeAtCompileTime with coefficients set to zero */
|
||||
EIGEN_DEVICE_FUNC static const ZeroInitializeReturnType Zero() { return DiagonalVectorType::Zero().asDiagonal(); }
|
||||
/** Initializes a diagonal matrix of size dim with coefficients set to zero */
|
||||
EIGEN_DEVICE_FUNC static const ZeroInitializeReturnType Zero(Index size) {
|
||||
return DiagonalVectorType::Zero(size).asDiagonal();
|
||||
}
|
||||
/** Initializes a identity matrix of size SizeAtCompileTime */
|
||||
EIGEN_DEVICE_FUNC static const InitializeReturnType Identity() { return DiagonalVectorType::Ones().asDiagonal(); }
|
||||
/** Initializes a identity matrix of size dim */
|
||||
EIGEN_DEVICE_FUNC static const InitializeReturnType Identity(Index size) {
|
||||
return DiagonalVectorType::Ones(size).asDiagonal();
|
||||
}
|
||||
|
||||
/** Resizes to given size. */
|
||||
EIGEN_DEVICE_FUNC inline void resize(Index size) { m_diagonal.resize(size); }
|
||||
/** Sets all coefficients to zero. */
|
||||
EIGEN_DEVICE_FUNC inline void setZero() { m_diagonal.setZero(); }
|
||||
/** Resizes and sets all coefficients to zero. */
|
||||
EIGEN_DEVICE_FUNC inline void setZero(Index size) { m_diagonal.setZero(size); }
|
||||
/** Sets this matrix to be the identity matrix of the current size. */
|
||||
EIGEN_DEVICE_FUNC inline void setIdentity() { m_diagonal.setOnes(); }
|
||||
/** Sets this matrix to be the identity matrix of the given size. */
|
||||
EIGEN_DEVICE_FUNC inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
|
||||
};
|
||||
|
||||
/** \class DiagonalWrapper
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of a diagonal matrix
|
||||
*
|
||||
* \tparam DiagonalVectorType_ the type of the vector of diagonal coefficients
|
||||
*
|
||||
* This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
|
||||
* instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
|
||||
* and most of the time this is the only way that it is used.
|
||||
*
|
||||
* \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename DiagonalVectorType_>
|
||||
struct traits<DiagonalWrapper<DiagonalVectorType_>> {
|
||||
typedef DiagonalVectorType_ DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::StorageIndex StorageIndex;
|
||||
typedef DiagonalShape StorageKind;
|
||||
typedef typename traits<DiagonalVectorType>::XprKind XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
|
||||
MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
|
||||
Flags = (traits<DiagonalVectorType>::Flags & LvalueBit) | NoPreferredStorageOrderBit
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <typename DiagonalVectorType_>
|
||||
class DiagonalWrapper : public DiagonalBase<DiagonalWrapper<DiagonalVectorType_>>, internal::no_assignment_operator {
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef DiagonalVectorType_ DiagonalVectorType;
|
||||
typedef DiagonalWrapper Nested;
|
||||
#endif
|
||||
|
||||
/** Constructor from expression of diagonal coefficients to wrap. */
|
||||
EIGEN_DEVICE_FUNC explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
|
||||
|
||||
/** \returns a const reference to the wrapped expression of diagonal coefficients. */
|
||||
EIGEN_DEVICE_FUNC const DiagonalVectorType& diagonal() const { return m_diagonal; }
|
||||
|
||||
protected:
|
||||
typename DiagonalVectorType::Nested m_diagonal;
|
||||
};
|
||||
|
||||
/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* Example: \include MatrixBase_asDiagonal.cpp
|
||||
* Output: \verbinclude MatrixBase_asDiagonal.out
|
||||
*
|
||||
* \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
|
||||
**/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline const DiagonalWrapper<const Derived> MatrixBase<Derived>::asDiagonal() const {
|
||||
return DiagonalWrapper<const Derived>(derived());
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately equal to a diagonal matrix,
|
||||
* within the precision given by \a prec.
|
||||
*
|
||||
* Example: \include MatrixBase_isDiagonal.cpp
|
||||
* Output: \verbinclude MatrixBase_isDiagonal.out
|
||||
*
|
||||
* \sa asDiagonal()
|
||||
*/
|
||||
template <typename Derived>
|
||||
bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const {
|
||||
if (cols() != rows()) return false;
|
||||
RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
|
||||
for (Index j = 0; j < cols(); ++j) {
|
||||
RealScalar absOnDiagonal = numext::abs(coeff(j, j));
|
||||
if (absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
|
||||
}
|
||||
for (Index j = 0; j < cols(); ++j)
|
||||
for (Index i = 0; i < j; ++i) {
|
||||
if (!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
|
||||
if (!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <>
|
||||
struct storage_kind_to_shape<DiagonalShape> {
|
||||
typedef DiagonalShape Shape;
|
||||
};
|
||||
|
||||
struct Diagonal2Dense {};
|
||||
|
||||
template <>
|
||||
struct AssignmentKind<DenseShape, DiagonalShape> {
|
||||
typedef Diagonal2Dense Kind;
|
||||
};
|
||||
|
||||
// Diagonal matrix to Dense assignment
|
||||
template <typename DstXprType, typename SrcXprType, typename Functor>
|
||||
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense> {
|
||||
static EIGEN_DEVICE_FUNC void run(
|
||||
DstXprType& dst, const SrcXprType& src,
|
||||
const internal::assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
|
||||
Index dstRows = src.rows();
|
||||
Index dstCols = src.cols();
|
||||
if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols);
|
||||
|
||||
dst.setZero();
|
||||
dst.diagonal() = src.diagonal();
|
||||
}
|
||||
|
||||
static EIGEN_DEVICE_FUNC void run(
|
||||
DstXprType& dst, const SrcXprType& src,
|
||||
const internal::add_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
|
||||
dst.diagonal() += src.diagonal();
|
||||
}
|
||||
|
||||
static EIGEN_DEVICE_FUNC void run(
|
||||
DstXprType& dst, const SrcXprType& src,
|
||||
const internal::sub_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
|
||||
dst.diagonal() -= src.diagonal();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DIAGONALMATRIX_H
|
||||
@@ -0,0 +1,30 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DIAGONALPRODUCT_H
|
||||
#define EIGEN_DIAGONALPRODUCT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename DiagonalDerived>
|
||||
EIGEN_DEVICE_FUNC inline const Product<Derived, DiagonalDerived, LazyProduct> MatrixBase<Derived>::operator*(
|
||||
const DiagonalBase<DiagonalDerived> &a_diagonal) const {
|
||||
return Product<Derived, DiagonalDerived, LazyProduct>(derived(), a_diagonal.derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DIAGONALPRODUCT_H
|
||||
268
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Dot.h
Normal file
268
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Dot.h
Normal file
@@ -0,0 +1,268 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DOT_H
|
||||
#define EIGEN_DOT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived, typename Scalar = typename traits<Derived>::Scalar>
|
||||
struct squared_norm_impl {
|
||||
using Real = typename NumTraits<Scalar>::Real;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Real run(const Derived& a) {
|
||||
Scalar result = a.unaryExpr(squared_norm_functor<Scalar>()).sum();
|
||||
return numext::real(result) + numext::imag(result);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct squared_norm_impl<Derived, bool> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(const Derived& a) { return a.any(); }
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \fn MatrixBase::dot
|
||||
* \returns the dot product of *this with other.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \note If the scalar type is complex numbers, then this function returns the hermitian
|
||||
* (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
|
||||
* second variable.
|
||||
*
|
||||
* \sa squaredNorm(), norm()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,
|
||||
typename internal::traits<OtherDerived>::Scalar>::ReturnType
|
||||
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const {
|
||||
return internal::dot_impl<Derived, OtherDerived>::run(derived(), other.derived());
|
||||
}
|
||||
|
||||
//---------- implementation of L2 norm and related functions ----------
|
||||
|
||||
/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm.
|
||||
* In both cases, it consists in the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa dot(), norm(), lpNorm()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
MatrixBase<Derived>::squaredNorm() const {
|
||||
return internal::squared_norm_impl<Derived>::run(derived());
|
||||
}
|
||||
|
||||
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
* In both cases, it consists in the square root of the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the square root of the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa lpNorm(), dot(), squaredNorm()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
MatrixBase<Derived>::norm() const {
|
||||
return numext::sqrt(squaredNorm());
|
||||
}
|
||||
|
||||
/** \returns an expression of the quotient of \c *this by its own norm.
|
||||
*
|
||||
* \warning If the input vector is too small (i.e., this->norm()==0),
|
||||
* then this function returns a copy of the input.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \sa norm(), normalize()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::normalized()
|
||||
const {
|
||||
typedef typename internal::nested_eval<Derived, 2>::type Nested_;
|
||||
Nested_ n(derived());
|
||||
RealScalar z = n.squaredNorm();
|
||||
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
|
||||
if (z > RealScalar(0))
|
||||
return n / numext::sqrt(z);
|
||||
else
|
||||
return n;
|
||||
}
|
||||
|
||||
/** Normalizes the vector, i.e. divides it by its own norm.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
|
||||
*
|
||||
* \sa norm(), normalized()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize() {
|
||||
RealScalar z = squaredNorm();
|
||||
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
|
||||
if (z > RealScalar(0)) derived() /= numext::sqrt(z);
|
||||
}
|
||||
|
||||
/** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This method is analogue to the normalized() method, but it reduces the risk of
|
||||
* underflow and overflow when computing the norm.
|
||||
*
|
||||
* \warning If the input vector is too small (i.e., this->norm()==0),
|
||||
* then this function returns a copy of the input.
|
||||
*
|
||||
* \sa stableNorm(), stableNormalize(), normalized()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
|
||||
MatrixBase<Derived>::stableNormalized() const {
|
||||
typedef typename internal::nested_eval<Derived, 3>::type Nested_;
|
||||
Nested_ n(derived());
|
||||
RealScalar w = n.cwiseAbs().maxCoeff();
|
||||
RealScalar z = (n / w).squaredNorm();
|
||||
if (z > RealScalar(0))
|
||||
return n / (numext::sqrt(z) * w);
|
||||
else
|
||||
return n;
|
||||
}
|
||||
|
||||
/** Normalizes the vector while avoid underflow and overflow
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This method is analogue to the normalize() method, but it reduces the risk of
|
||||
* underflow and overflow when computing the norm.
|
||||
*
|
||||
* \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
|
||||
*
|
||||
* \sa stableNorm(), stableNormalized(), normalize()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize() {
|
||||
RealScalar w = cwiseAbs().maxCoeff();
|
||||
RealScalar z = (derived() / w).squaredNorm();
|
||||
if (z > RealScalar(0)) derived() /= numext::sqrt(z) * w;
|
||||
}
|
||||
|
||||
//---------- implementation of other norms ----------
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived, int p>
|
||||
struct lpNorm_selector {
|
||||
typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) {
|
||||
EIGEN_USING_STD(pow)
|
||||
return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1) / p);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct lpNorm_selector<Derived, 1> {
|
||||
EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(
|
||||
const MatrixBase<Derived>& m) {
|
||||
return m.cwiseAbs().sum();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct lpNorm_selector<Derived, 2> {
|
||||
EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(
|
||||
const MatrixBase<Derived>& m) {
|
||||
return m.norm();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct lpNorm_selector<Derived, Infinity> {
|
||||
typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) {
|
||||
if (Derived::SizeAtCompileTime == 0 || (Derived::SizeAtCompileTime == Dynamic && m.size() == 0))
|
||||
return RealScalar(0);
|
||||
return m.cwiseAbs().maxCoeff();
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the
|
||||
* p-th powers of the absolute values of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity,
|
||||
* this function returns the \f$ \ell^\infty \f$ norm, that is the maximum of the absolute values of the coefficients of
|
||||
* \c *this.
|
||||
*
|
||||
* In all cases, if \c *this is empty, then the value 0 is returned.
|
||||
*
|
||||
* \note For matrices, this function does not compute the <a
|
||||
* href="https://en.wikipedia.org/wiki/Operator_norm">operator-norm</a>. That is, if \c *this is a matrix, then its
|
||||
* coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm
|
||||
* matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink.
|
||||
*
|
||||
* \sa norm()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int p>
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC MatrixBase<Derived>::RealScalar
|
||||
#endif
|
||||
MatrixBase<Derived>::lpNorm() const {
|
||||
return internal::lpNorm_selector<Derived, p>::run(*this);
|
||||
}
|
||||
|
||||
//---------- implementation of isOrthogonal / isUnitary ----------
|
||||
|
||||
/** \returns true if *this is approximately orthogonal to \a other,
|
||||
* within the precision given by \a prec.
|
||||
*
|
||||
* Example: \include MatrixBase_isOrthogonal.cpp
|
||||
* Output: \verbinclude MatrixBase_isOrthogonal.out
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
bool MatrixBase<Derived>::isOrthogonal(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const {
|
||||
typename internal::nested_eval<Derived, 2>::type nested(derived());
|
||||
typename internal::nested_eval<OtherDerived, 2>::type otherNested(other.derived());
|
||||
return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately an unitary matrix,
|
||||
* within the precision given by \a prec. In the case where the \a Scalar
|
||||
* type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
|
||||
*
|
||||
* \note This can be used to check whether a family of vectors forms an orthonormal basis.
|
||||
* Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
|
||||
* orthonormal basis.
|
||||
*
|
||||
* Example: \include MatrixBase_isUnitary.cpp
|
||||
* Output: \verbinclude MatrixBase_isUnitary.out
|
||||
*/
|
||||
template <typename Derived>
|
||||
bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const {
|
||||
typename internal::nested_eval<Derived, 1>::type self(derived());
|
||||
for (Index i = 0; i < cols(); ++i) {
|
||||
if (!internal::isApprox(self.col(i).squaredNorm(), static_cast<RealScalar>(1), prec)) return false;
|
||||
for (Index j = 0; j < i; ++j)
|
||||
if (!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast<Scalar>(1), prec)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_DOT_H
|
||||
149
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/EigenBase.h
Normal file
149
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/EigenBase.h
Normal file
@@ -0,0 +1,149 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_EIGENBASE_H
|
||||
#define EIGEN_EIGENBASE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class EigenBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
|
||||
*
|
||||
* In other words, an EigenBase object is an object that can be copied into a MatrixBase.
|
||||
*
|
||||
* Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.
|
||||
*
|
||||
* Notice that this class is trivial, it is only used to disambiguate overloaded functions.
|
||||
*
|
||||
* \sa \blank \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
struct EigenBase {
|
||||
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
|
||||
|
||||
/** \brief The interface type of indices
|
||||
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
|
||||
* \sa StorageIndex, \ref TopicPreprocessorDirectives.
|
||||
* DEPRECATED: Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
|
||||
* Deprecation is not marked with a doxygen comment because there are too many existing usages to add the deprecation
|
||||
* attribute.
|
||||
*/
|
||||
typedef Eigen::Index Index;
|
||||
|
||||
// FIXME is it needed?
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
|
||||
/** \returns a reference to the derived object */
|
||||
EIGEN_DEVICE_FUNC constexpr Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
/** \returns a const reference to the derived object */
|
||||
EIGEN_DEVICE_FUNC constexpr const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline constexpr Derived& const_cast_derived() const {
|
||||
return *static_cast<Derived*>(const_cast<EigenBase*>(this));
|
||||
}
|
||||
EIGEN_DEVICE_FUNC inline const Derived& const_derived() const { return *static_cast<const Derived*>(this); }
|
||||
|
||||
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return derived().rows(); }
|
||||
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return derived().cols(); }
|
||||
/** \returns the number of coefficients, which is rows()*cols().
|
||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||
EIGEN_DEVICE_FUNC constexpr Index size() const noexcept { return rows() * cols(); }
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const {
|
||||
derived().evalTo(dst);
|
||||
}
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void addTo(Dest& dst) const {
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
typename Dest::PlainObject res(rows(), cols());
|
||||
evalTo(res);
|
||||
dst += res;
|
||||
}
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void subTo(Dest& dst) const {
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
typename Dest::PlainObject res(rows(), cols());
|
||||
evalTo(res);
|
||||
dst -= res;
|
||||
}
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const {
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
dst = dst * this->derived();
|
||||
}
|
||||
|
||||
/** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */
|
||||
template <typename Dest>
|
||||
EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const {
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
dst = this->derived() * dst;
|
||||
}
|
||||
|
||||
template <typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<Derived, Device> device(Device& device);
|
||||
template <typename Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<const Derived, Device> device(Device& device) const;
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of matrix base methods
|
||||
***************************************************************************/
|
||||
|
||||
/** \brief Copies the generic expression \a other into *this.
|
||||
*
|
||||
* \details The expression must provide a (templated) evalTo(Derived& dst) const
|
||||
* function which does the actual job. In practice, this allows any user to write
|
||||
* its own special matrix without having to modify MatrixBase
|
||||
*
|
||||
* \returns a reference to *this.
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived>& other) {
|
||||
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_EIGENBASE_H
|
||||
140
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Fill.h
Normal file
140
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Fill.h
Normal file
@@ -0,0 +1,140 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2024 Charles Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_FILL_H
|
||||
#define EIGEN_FILL_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Xpr>
|
||||
struct eigen_fill_helper : std::false_type {};
|
||||
|
||||
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
|
||||
struct eigen_fill_helper<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols>> : std::true_type {};
|
||||
|
||||
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
|
||||
struct eigen_fill_helper<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols>> : std::true_type {};
|
||||
|
||||
template <typename Xpr, int BlockRows, int BlockCols>
|
||||
struct eigen_fill_helper<Block<Xpr, BlockRows, BlockCols, /*InnerPanel*/ true>> : eigen_fill_helper<Xpr> {};
|
||||
|
||||
template <typename Xpr, int BlockRows, int BlockCols>
|
||||
struct eigen_fill_helper<Block<Xpr, BlockRows, BlockCols, /*InnerPanel*/ false>>
|
||||
: std::integral_constant<bool, eigen_fill_helper<Xpr>::value &&
|
||||
(Xpr::IsRowMajor ? (BlockRows == 1) : (BlockCols == 1))> {};
|
||||
|
||||
template <typename Xpr, int Options>
|
||||
struct eigen_fill_helper<Map<Xpr, Options, Stride<0, 0>>> : eigen_fill_helper<Xpr> {};
|
||||
|
||||
template <typename Xpr, int Options, int OuterStride_>
|
||||
struct eigen_fill_helper<Map<Xpr, Options, Stride<OuterStride_, 0>>>
|
||||
: std::integral_constant<bool, eigen_fill_helper<Xpr>::value &&
|
||||
enum_eq_not_dynamic(OuterStride_, Xpr::InnerSizeAtCompileTime)> {};
|
||||
|
||||
template <typename Xpr, int Options, int OuterStride_>
|
||||
struct eigen_fill_helper<Map<Xpr, Options, Stride<OuterStride_, 1>>>
|
||||
: eigen_fill_helper<Map<Xpr, Options, Stride<OuterStride_, 0>>> {};
|
||||
|
||||
template <typename Xpr, int Options, int InnerStride_>
|
||||
struct eigen_fill_helper<Map<Xpr, Options, InnerStride<InnerStride_>>>
|
||||
: eigen_fill_helper<Map<Xpr, Options, Stride<0, InnerStride_>>> {};
|
||||
|
||||
template <typename Xpr, int Options, int OuterStride_>
|
||||
struct eigen_fill_helper<Map<Xpr, Options, OuterStride<OuterStride_>>>
|
||||
: eigen_fill_helper<Map<Xpr, Options, Stride<OuterStride_, 0>>> {};
|
||||
|
||||
template <typename Xpr>
|
||||
struct eigen_fill_impl<Xpr, /*use_fill*/ false> {
|
||||
using Scalar = typename Xpr::Scalar;
|
||||
using Func = scalar_constant_op<Scalar>;
|
||||
using PlainObject = typename Xpr::PlainObject;
|
||||
using Constant = typename PlainObject::ConstantReturnType;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void run(Xpr& dst, const Scalar& val) {
|
||||
const Constant src(dst.rows(), dst.cols(), val);
|
||||
run(dst, src);
|
||||
}
|
||||
template <typename SrcXpr>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void run(Xpr& dst, const SrcXpr& src) {
|
||||
call_dense_assignment_loop(dst, src, assign_op<Scalar, Scalar>());
|
||||
}
|
||||
};
|
||||
|
||||
#if EIGEN_COMP_MSVC || defined(EIGEN_GPU_COMPILE_PHASE)
|
||||
template <typename Xpr>
|
||||
struct eigen_fill_impl<Xpr, /*use_fill*/ true> : eigen_fill_impl<Xpr, /*use_fill*/ false> {};
|
||||
#else
|
||||
template <typename Xpr>
|
||||
struct eigen_fill_impl<Xpr, /*use_fill*/ true> {
|
||||
using Scalar = typename Xpr::Scalar;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const Scalar& val) {
|
||||
const Scalar val_copy = val;
|
||||
using std::fill_n;
|
||||
fill_n(dst.data(), dst.size(), val_copy);
|
||||
}
|
||||
template <typename SrcXpr>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) {
|
||||
resize_if_allowed(dst, src, assign_op<Scalar, Scalar>());
|
||||
const Scalar& val = src.functor()();
|
||||
run(dst, val);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
template <typename Xpr>
|
||||
struct eigen_memset_helper {
|
||||
static constexpr bool value =
|
||||
std::is_trivially_copyable<typename Xpr::Scalar>::value && eigen_fill_helper<Xpr>::value;
|
||||
};
|
||||
|
||||
template <typename Xpr>
|
||||
struct eigen_zero_impl<Xpr, /*use_memset*/ false> {
|
||||
using Scalar = typename Xpr::Scalar;
|
||||
using PlainObject = typename Xpr::PlainObject;
|
||||
using Zero = typename PlainObject::ZeroReturnType;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void run(Xpr& dst) {
|
||||
const Zero src(dst.rows(), dst.cols());
|
||||
run(dst, src);
|
||||
}
|
||||
template <typename SrcXpr>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr void run(Xpr& dst, const SrcXpr& src) {
|
||||
call_dense_assignment_loop(dst, src, assign_op<Scalar, Scalar>());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Xpr>
|
||||
struct eigen_zero_impl<Xpr, /*use_memset*/ true> {
|
||||
using Scalar = typename Xpr::Scalar;
|
||||
static constexpr size_t max_bytes = (std::numeric_limits<std::ptrdiff_t>::max)();
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst) {
|
||||
const size_t num_bytes = dst.size() * sizeof(Scalar);
|
||||
if (num_bytes == 0) return;
|
||||
void* dst_ptr = static_cast<void*>(dst.data());
|
||||
#ifndef EIGEN_NO_DEBUG
|
||||
if (num_bytes > max_bytes) throw_std_bad_alloc();
|
||||
eigen_assert((dst_ptr != nullptr) && "null pointer dereference error!");
|
||||
#endif
|
||||
EIGEN_USING_STD(memset);
|
||||
memset(dst_ptr, 0, num_bytes);
|
||||
}
|
||||
template <typename SrcXpr>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Xpr& dst, const SrcXpr& src) {
|
||||
resize_if_allowed(dst, src, assign_op<Scalar, Scalar>());
|
||||
run(dst);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_FILL_H
|
||||
464
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/FindCoeff.h
Normal file
464
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/FindCoeff.h
Normal file
@@ -0,0 +1,464 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2025 Charlie Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_FIND_COEFF_H
|
||||
#define EIGEN_FIND_COEFF_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Scalar, int NaNPropagation, bool IsInteger = NumTraits<Scalar>::IsInteger>
|
||||
struct max_coeff_functor {
|
||||
EIGEN_DEVICE_FUNC inline bool compareCoeff(const Scalar& incumbent, const Scalar& candidate) const {
|
||||
return candidate > incumbent;
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) const {
|
||||
return pcmp_lt(incumbent, candidate);
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_max(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct max_coeff_functor<Scalar, PropagateNaN, false> {
|
||||
EIGEN_DEVICE_FUNC inline Scalar compareCoeff(const Scalar& incumbent, const Scalar& candidate) {
|
||||
return (candidate > incumbent) || ((candidate != candidate) && (incumbent == incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) {
|
||||
return pandnot(pcmp_lt_or_nan(incumbent, candidate), pisnan(incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_max<PropagateNaN>(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct max_coeff_functor<Scalar, PropagateNumbers, false> {
|
||||
EIGEN_DEVICE_FUNC inline bool compareCoeff(const Scalar& incumbent, const Scalar& candidate) const {
|
||||
return (candidate > incumbent) || ((candidate == candidate) && (incumbent != incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) const {
|
||||
return pandnot(pcmp_lt_or_nan(incumbent, candidate), pisnan(candidate));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_max<PropagateNumbers>(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar, int NaNPropagation, bool IsInteger = NumTraits<Scalar>::IsInteger>
|
||||
struct min_coeff_functor {
|
||||
EIGEN_DEVICE_FUNC inline bool compareCoeff(const Scalar& incumbent, const Scalar& candidate) const {
|
||||
return candidate < incumbent;
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) const {
|
||||
return pcmp_lt(candidate, incumbent);
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_min(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct min_coeff_functor<Scalar, PropagateNaN, false> {
|
||||
EIGEN_DEVICE_FUNC inline Scalar compareCoeff(const Scalar& incumbent, const Scalar& candidate) {
|
||||
return (candidate < incumbent) || ((candidate != candidate) && (incumbent == incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) {
|
||||
return pandnot(pcmp_lt_or_nan(candidate, incumbent), pisnan(incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_min<PropagateNaN>(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct min_coeff_functor<Scalar, PropagateNumbers, false> {
|
||||
EIGEN_DEVICE_FUNC inline bool compareCoeff(const Scalar& incumbent, const Scalar& candidate) const {
|
||||
return (candidate < incumbent) || ((candidate == candidate) && (incumbent != incumbent));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Packet comparePacket(const Packet& incumbent, const Packet& candidate) const {
|
||||
return pandnot(pcmp_lt_or_nan(candidate, incumbent), pisnan(candidate));
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC inline Scalar predux(const Packet& a) const {
|
||||
return predux_min<PropagateNumbers>(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct min_max_traits {
|
||||
static constexpr bool PacketAccess = packet_traits<Scalar>::Vectorizable;
|
||||
};
|
||||
template <typename Scalar, int NaNPropagation>
|
||||
struct functor_traits<max_coeff_functor<Scalar, NaNPropagation>> : min_max_traits<Scalar> {};
|
||||
template <typename Scalar, int NaNPropagation>
|
||||
struct functor_traits<min_coeff_functor<Scalar, NaNPropagation>> : min_max_traits<Scalar> {};
|
||||
|
||||
template <typename Evaluator, typename Func, bool Linear, bool Vectorize>
|
||||
struct find_coeff_loop;
|
||||
template <typename Evaluator, typename Func>
|
||||
struct find_coeff_loop<Evaluator, Func, /*Linear*/ false, /*Vectorize*/ false> {
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
static EIGEN_DEVICE_FUNC inline void run(const Evaluator& eval, Func& func, Scalar& res, Index& outer, Index& inner) {
|
||||
Index outerSize = eval.outerSize();
|
||||
Index innerSize = eval.innerSize();
|
||||
|
||||
/* initialization performed in calling function */
|
||||
/* result = eval.coeff(0, 0); */
|
||||
/* outer = 0; */
|
||||
/* inner = 0; */
|
||||
|
||||
for (Index j = 0; j < outerSize; j++) {
|
||||
for (Index i = 0; i < innerSize; i++) {
|
||||
Scalar xprCoeff = eval.coeffByOuterInner(j, i);
|
||||
bool newRes = func.compareCoeff(res, xprCoeff);
|
||||
if (newRes) {
|
||||
outer = j;
|
||||
inner = i;
|
||||
res = xprCoeff;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
template <typename Evaluator, typename Func>
|
||||
struct find_coeff_loop<Evaluator, Func, /*Linear*/ true, /*Vectorize*/ false> {
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
static EIGEN_DEVICE_FUNC inline void run(const Evaluator& eval, Func& func, Scalar& res, Index& index) {
|
||||
Index size = eval.size();
|
||||
|
||||
/* initialization performed in calling function */
|
||||
/* result = eval.coeff(0); */
|
||||
/* index = 0; */
|
||||
|
||||
for (Index k = 0; k < size; k++) {
|
||||
Scalar xprCoeff = eval.coeff(k);
|
||||
bool newRes = func.compareCoeff(res, xprCoeff);
|
||||
if (newRes) {
|
||||
index = k;
|
||||
res = xprCoeff;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
template <typename Evaluator, typename Func>
|
||||
struct find_coeff_loop<Evaluator, Func, /*Linear*/ false, /*Vectorize*/ true> {
|
||||
using ScalarImpl = find_coeff_loop<Evaluator, Func, false, false>;
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
using Packet = typename Evaluator::Packet;
|
||||
static constexpr int PacketSize = unpacket_traits<Packet>::size;
|
||||
static EIGEN_DEVICE_FUNC inline void run(const Evaluator& eval, Func& func, Scalar& result, Index& outer,
|
||||
Index& inner) {
|
||||
Index outerSize = eval.outerSize();
|
||||
Index innerSize = eval.innerSize();
|
||||
Index packetEnd = numext::round_down(innerSize, PacketSize);
|
||||
|
||||
/* initialization performed in calling function */
|
||||
/* result = eval.coeff(0, 0); */
|
||||
/* outer = 0; */
|
||||
/* inner = 0; */
|
||||
|
||||
bool checkPacket = false;
|
||||
|
||||
for (Index j = 0; j < outerSize; j++) {
|
||||
Packet resultPacket = pset1<Packet>(result);
|
||||
for (Index i = 0; i < packetEnd; i += PacketSize) {
|
||||
Packet xprPacket = eval.template packetByOuterInner<Unaligned, Packet>(j, i);
|
||||
if (predux_any(func.comparePacket(resultPacket, xprPacket))) {
|
||||
outer = j;
|
||||
inner = i;
|
||||
result = func.predux(xprPacket);
|
||||
resultPacket = pset1<Packet>(result);
|
||||
checkPacket = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (Index i = packetEnd; i < innerSize; i++) {
|
||||
Scalar xprCoeff = eval.coeffByOuterInner(j, i);
|
||||
if (func.compareCoeff(result, xprCoeff)) {
|
||||
outer = j;
|
||||
inner = i;
|
||||
result = xprCoeff;
|
||||
checkPacket = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (checkPacket) {
|
||||
result = eval.coeffByOuterInner(outer, inner);
|
||||
Index i_end = inner + PacketSize;
|
||||
for (Index i = inner; i < i_end; i++) {
|
||||
Scalar xprCoeff = eval.coeffByOuterInner(outer, i);
|
||||
if (func.compareCoeff(result, xprCoeff)) {
|
||||
inner = i;
|
||||
result = xprCoeff;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
template <typename Evaluator, typename Func>
|
||||
struct find_coeff_loop<Evaluator, Func, /*Linear*/ true, /*Vectorize*/ true> {
|
||||
using ScalarImpl = find_coeff_loop<Evaluator, Func, true, false>;
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
using Packet = typename Evaluator::Packet;
|
||||
static constexpr int PacketSize = unpacket_traits<Packet>::size;
|
||||
static constexpr int Alignment = Evaluator::Alignment;
|
||||
|
||||
static EIGEN_DEVICE_FUNC inline void run(const Evaluator& eval, Func& func, Scalar& result, Index& index) {
|
||||
Index size = eval.size();
|
||||
Index packetEnd = numext::round_down(size, PacketSize);
|
||||
|
||||
/* initialization performed in calling function */
|
||||
/* result = eval.coeff(0); */
|
||||
/* index = 0; */
|
||||
|
||||
Packet resultPacket = pset1<Packet>(result);
|
||||
bool checkPacket = false;
|
||||
|
||||
for (Index k = 0; k < packetEnd; k += PacketSize) {
|
||||
Packet xprPacket = eval.template packet<Alignment, Packet>(k);
|
||||
if (predux_any(func.comparePacket(resultPacket, xprPacket))) {
|
||||
index = k;
|
||||
result = func.predux(xprPacket);
|
||||
resultPacket = pset1<Packet>(result);
|
||||
checkPacket = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (Index k = packetEnd; k < size; k++) {
|
||||
Scalar xprCoeff = eval.coeff(k);
|
||||
if (func.compareCoeff(result, xprCoeff)) {
|
||||
index = k;
|
||||
result = xprCoeff;
|
||||
checkPacket = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (checkPacket) {
|
||||
result = eval.coeff(index);
|
||||
Index k_end = index + PacketSize;
|
||||
for (Index k = index; k < k_end; k++) {
|
||||
Scalar xprCoeff = eval.coeff(k);
|
||||
if (func.compareCoeff(result, xprCoeff)) {
|
||||
index = k;
|
||||
result = xprCoeff;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct find_coeff_evaluator : public evaluator<Derived> {
|
||||
using Base = evaluator<Derived>;
|
||||
using Scalar = typename Derived::Scalar;
|
||||
using Packet = typename packet_traits<Scalar>::type;
|
||||
static constexpr int Flags = Base::Flags;
|
||||
static constexpr bool IsRowMajor = bool(Flags & RowMajorBit);
|
||||
EIGEN_DEVICE_FUNC inline find_coeff_evaluator(const Derived& xpr) : Base(xpr), m_xpr(xpr) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar coeffByOuterInner(Index outer, Index inner) const {
|
||||
Index row = IsRowMajor ? outer : inner;
|
||||
Index col = IsRowMajor ? inner : outer;
|
||||
return Base::coeff(row, col);
|
||||
}
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC inline PacketType packetByOuterInner(Index outer, Index inner) const {
|
||||
Index row = IsRowMajor ? outer : inner;
|
||||
Index col = IsRowMajor ? inner : outer;
|
||||
return Base::template packet<LoadMode, PacketType>(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index innerSize() const { return m_xpr.innerSize(); }
|
||||
EIGEN_DEVICE_FUNC inline Index outerSize() const { return m_xpr.outerSize(); }
|
||||
EIGEN_DEVICE_FUNC inline Index size() const { return m_xpr.size(); }
|
||||
|
||||
const Derived& m_xpr;
|
||||
};
|
||||
|
||||
template <typename Derived, typename Func>
|
||||
struct find_coeff_impl {
|
||||
using Evaluator = find_coeff_evaluator<Derived>;
|
||||
static constexpr int Flags = Evaluator::Flags;
|
||||
static constexpr int Alignment = Evaluator::Alignment;
|
||||
static constexpr bool IsRowMajor = Derived::IsRowMajor;
|
||||
static constexpr int MaxInnerSizeAtCompileTime =
|
||||
IsRowMajor ? Derived::MaxColsAtCompileTime : Derived::MaxRowsAtCompileTime;
|
||||
static constexpr int MaxSizeAtCompileTime = Derived::MaxSizeAtCompileTime;
|
||||
|
||||
using Scalar = typename Derived::Scalar;
|
||||
using Packet = typename Evaluator::Packet;
|
||||
|
||||
static constexpr int PacketSize = unpacket_traits<Packet>::size;
|
||||
static constexpr bool Linearize = bool(Flags & LinearAccessBit);
|
||||
static constexpr bool DontVectorize =
|
||||
enum_lt_not_dynamic(Linearize ? MaxSizeAtCompileTime : MaxInnerSizeAtCompileTime, PacketSize);
|
||||
static constexpr bool Vectorize =
|
||||
!DontVectorize && bool(Flags & PacketAccessBit) && functor_traits<Func>::PacketAccess;
|
||||
|
||||
using Loop = find_coeff_loop<Evaluator, Func, Linearize, Vectorize>;
|
||||
|
||||
template <bool ForwardLinearAccess = Linearize, std::enable_if_t<!ForwardLinearAccess, bool> = true>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Derived& xpr, Func& func, Scalar& res, Index& outer,
|
||||
Index& inner) {
|
||||
Evaluator eval(xpr);
|
||||
Loop::run(eval, func, res, outer, inner);
|
||||
}
|
||||
template <bool ForwardLinearAccess = Linearize, std::enable_if_t<ForwardLinearAccess, bool> = true>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Derived& xpr, Func& func, Scalar& res, Index& outer,
|
||||
Index& inner) {
|
||||
// where possible, use the linear loop and back-calculate the outer and inner indices
|
||||
Index index = 0;
|
||||
run(xpr, func, res, index);
|
||||
outer = index / xpr.innerSize();
|
||||
inner = index % xpr.innerSize();
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Derived& xpr, Func& func, Scalar& res, Index& index) {
|
||||
Evaluator eval(xpr);
|
||||
Loop::run(eval, func, res, index);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, typename IndexType, typename Func>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar findCoeff(const DenseBase<Derived>& mat, Func& func,
|
||||
IndexType* rowPtr, IndexType* colPtr) {
|
||||
eigen_assert(mat.rows() > 0 && mat.cols() > 0 && "you are using an empty matrix");
|
||||
using Scalar = typename DenseBase<Derived>::Scalar;
|
||||
using FindCoeffImpl = internal::find_coeff_impl<Derived, Func>;
|
||||
Index outer = 0;
|
||||
Index inner = 0;
|
||||
Scalar res = mat.coeff(0, 0);
|
||||
FindCoeffImpl::run(mat.derived(), func, res, outer, inner);
|
||||
*rowPtr = internal::convert_index<IndexType>(Derived::IsRowMajor ? outer : inner);
|
||||
if (colPtr) *colPtr = internal::convert_index<IndexType>(Derived::IsRowMajor ? inner : outer);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename Derived, typename IndexType, typename Func>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar findCoeff(const DenseBase<Derived>& mat, Func& func,
|
||||
IndexType* indexPtr) {
|
||||
eigen_assert(mat.size() > 0 && "you are using an empty matrix");
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
using Scalar = typename DenseBase<Derived>::Scalar;
|
||||
using FindCoeffImpl = internal::find_coeff_impl<Derived, Func>;
|
||||
Index index = 0;
|
||||
Scalar res = mat.coeff(0);
|
||||
FindCoeffImpl::run(mat.derived(), func, res, index);
|
||||
*indexPtr = internal::convert_index<IndexType>(index);
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
|
||||
* \returns the minimum of all coefficients of *this and puts in *row and *col its location.
|
||||
*
|
||||
* If there are multiple coefficients with the same extreme value, the location of the first instance is returned.
|
||||
*
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*
|
||||
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff(IndexType* rowPtr,
|
||||
IndexType* colPtr) const {
|
||||
using Func = internal::min_coeff_functor<Scalar, NaNPropagation>;
|
||||
Func func;
|
||||
return internal::findCoeff(derived(), func, rowPtr, colPtr);
|
||||
}
|
||||
|
||||
/** \returns the minimum of all coefficients of *this and puts in *index its location.
|
||||
*
|
||||
* If there are multiple coefficients with the same extreme value, the location of the first instance is returned.
|
||||
*
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*
|
||||
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(),
|
||||
* DenseBase::minCoeff()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff(IndexType* indexPtr) const {
|
||||
using Func = internal::min_coeff_functor<Scalar, NaNPropagation>;
|
||||
Func func;
|
||||
return internal::findCoeff(derived(), func, indexPtr);
|
||||
}
|
||||
|
||||
/** \fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const
|
||||
* \returns the maximum of all coefficients of *this and puts in *row and *col its location.
|
||||
*
|
||||
* If there are multiple coefficients with the same extreme value, the location of the first instance is returned.
|
||||
*
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*
|
||||
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff(IndexType* rowPtr,
|
||||
IndexType* colPtr) const {
|
||||
using Func = internal::max_coeff_functor<Scalar, NaNPropagation>;
|
||||
Func func;
|
||||
return internal::findCoeff(derived(), func, rowPtr, colPtr);
|
||||
}
|
||||
|
||||
/** \returns the maximum of all coefficients of *this and puts in *index its location.
|
||||
*
|
||||
* If there are multiple coefficients with the same extreme value, the location of the first instance is returned.
|
||||
*
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*
|
||||
* \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(),
|
||||
* DenseBase::maxCoeff()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation, typename IndexType>
|
||||
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff(IndexType* indexPtr) const {
|
||||
using Func = internal::max_coeff_functor<Scalar, NaNPropagation>;
|
||||
Func func;
|
||||
return internal::findCoeff(derived(), func, indexPtr);
|
||||
}
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_FIND_COEFF_H
|
||||
@@ -0,0 +1,127 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_FORCEALIGNEDACCESS_H
|
||||
#define EIGEN_FORCEALIGNEDACCESS_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class ForceAlignedAccess
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Enforce aligned packet loads and stores regardless of what is requested
|
||||
*
|
||||
* \param ExpressionType the type of the object of which we are forcing aligned packet access
|
||||
*
|
||||
* This class is the return type of MatrixBase::forceAlignedAccess()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::forceAlignedAccess()
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template <typename ExpressionType>
|
||||
struct traits<ForceAlignedAccess<ExpressionType>> : public traits<ExpressionType> {};
|
||||
} // namespace internal
|
||||
|
||||
template <typename ExpressionType>
|
||||
class ForceAlignedAccess : public internal::dense_xpr_base<ForceAlignedAccess<ExpressionType>>::type {
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_expression.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_expression.cols(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return m_expression.outerStride(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return m_expression.innerStride(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const {
|
||||
return m_expression.coeff(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) {
|
||||
return m_expression.const_cast_derived().coeffRef(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); }
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); }
|
||||
|
||||
template <int LoadMode>
|
||||
inline const PacketScalar packet(Index row, Index col) const {
|
||||
return m_expression.template packet<Aligned>(row, col);
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
inline void writePacket(Index row, Index col, const PacketScalar& x) {
|
||||
m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
inline const PacketScalar packet(Index index) const {
|
||||
return m_expression.template packet<Aligned>(index);
|
||||
}
|
||||
|
||||
template <int LoadMode>
|
||||
inline void writePacket(Index index, const PacketScalar& x) {
|
||||
m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
|
||||
|
||||
protected:
|
||||
const ExpressionType& m_expression;
|
||||
|
||||
private:
|
||||
ForceAlignedAccess& operator=(const ForceAlignedAccess&);
|
||||
};
|
||||
|
||||
/** \returns an expression of *this with forced aligned access
|
||||
* \sa forceAlignedAccessIf(),class ForceAlignedAccess
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() const {
|
||||
return ForceAlignedAccess<Derived>(derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of *this with forced aligned access
|
||||
* \sa forceAlignedAccessIf(), class ForceAlignedAccess
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() {
|
||||
return ForceAlignedAccess<Derived>(derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of *this with forced aligned access if \a Enable is true.
|
||||
* \sa forceAlignedAccess(), class ForceAlignedAccess
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <bool Enable>
|
||||
inline add_const_on_value_type_t<std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&>>
|
||||
MatrixBase<Derived>::forceAlignedAccessIf() const {
|
||||
return derived(); // FIXME This should not work but apparently is never used
|
||||
}
|
||||
|
||||
/** \returns an expression of *this with forced aligned access if \a Enable is true.
|
||||
* \sa forceAlignedAccess(), class ForceAlignedAccess
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <bool Enable>
|
||||
inline std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&> MatrixBase<Derived>::forceAlignedAccessIf() {
|
||||
return derived(); // FIXME This should not work but apparently is never used
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_FORCEALIGNEDACCESS_H
|
||||
132
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Fuzzy.h
Normal file
132
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Fuzzy.h
Normal file
@@ -0,0 +1,132 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_FUZZY_H
|
||||
#define EIGEN_FUZZY_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
|
||||
struct isApprox_selector {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) {
|
||||
typename internal::nested_eval<Derived, 2>::type nested(x);
|
||||
typename internal::nested_eval<OtherDerived, 2>::type otherNested(y);
|
||||
return (nested.matrix() - otherNested.matrix()).cwiseAbs2().sum() <=
|
||||
prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, typename OtherDerived>
|
||||
struct isApprox_selector<Derived, OtherDerived, true> {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) {
|
||||
return x.matrix() == y.matrix();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
|
||||
struct isMuchSmallerThan_object_selector {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) {
|
||||
return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, typename OtherDerived>
|
||||
struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true> {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) {
|
||||
return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
|
||||
struct isMuchSmallerThan_scalar_selector {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar& y,
|
||||
const typename Derived::RealScalar& prec) {
|
||||
return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct isMuchSmallerThan_scalar_selector<Derived, true> {
|
||||
EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar&,
|
||||
const typename Derived::RealScalar&) {
|
||||
return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
* determined by \a prec.
|
||||
*
|
||||
* \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
|
||||
* are considered to be approximately equal within precision \f$ p \f$ if
|
||||
* \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
|
||||
* For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
|
||||
* L2 norm).
|
||||
*
|
||||
* \note Because of the multiplicativeness of this comparison, one can't use this function
|
||||
* to check whether \c *this is approximately equal to the zero matrix or vector.
|
||||
* Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
|
||||
* or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
|
||||
* RealScalar&, RealScalar) instead.
|
||||
*
|
||||
* \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApprox(const DenseBase<OtherDerived>& other,
|
||||
const RealScalar& prec) const {
|
||||
return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
|
||||
}
|
||||
|
||||
/** \returns \c true if the norm of \c *this is much smaller than \a other,
|
||||
* within the precision determined by \a prec.
|
||||
*
|
||||
* \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
|
||||
* considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
|
||||
* \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
|
||||
*
|
||||
* For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
|
||||
* the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
|
||||
* of a reference matrix of same dimensions.
|
||||
*
|
||||
* \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(const typename NumTraits<Scalar>::Real& other,
|
||||
const RealScalar& prec) const {
|
||||
return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
|
||||
}
|
||||
|
||||
/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
|
||||
* within the precision determined by \a prec.
|
||||
*
|
||||
* \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
|
||||
* considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
|
||||
* \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
|
||||
* For matrices, the comparison is done using the Hilbert-Schmidt norm.
|
||||
*
|
||||
* \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(const DenseBase<OtherDerived>& other,
|
||||
const RealScalar& prec) const {
|
||||
return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_FUZZY_H
|
||||
@@ -0,0 +1,519 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_GENERAL_PRODUCT_H
|
||||
#define EIGEN_GENERAL_PRODUCT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum { Large = 2, Small = 3 };
|
||||
|
||||
// Define the threshold value to fallback from the generic matrix-matrix product
|
||||
// implementation (heavy) to the lightweight coeff-based product one.
|
||||
// See generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
|
||||
// in products/GeneralMatrixMatrix.h for more details.
|
||||
// TODO This threshold should also be used in the compile-time selector below.
|
||||
#ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD
|
||||
// This default value has been obtained on a Haswell architecture.
|
||||
#define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20
|
||||
#endif
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <int Rows, int Cols, int Depth>
|
||||
struct product_type_selector;
|
||||
|
||||
template <int Size, int MaxSize>
|
||||
struct product_size_category {
|
||||
enum {
|
||||
#ifndef EIGEN_GPU_COMPILE_PHASE
|
||||
is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||
|
||||
(Size == Dynamic && MaxSize >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),
|
||||
#else
|
||||
is_large = 0,
|
||||
#endif
|
||||
value = is_large ? Large
|
||||
: Size == 1 ? 1
|
||||
: Small
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct product_type {
|
||||
typedef remove_all_t<Lhs> Lhs_;
|
||||
typedef remove_all_t<Rhs> Rhs_;
|
||||
enum {
|
||||
MaxRows = traits<Lhs_>::MaxRowsAtCompileTime,
|
||||
Rows = traits<Lhs_>::RowsAtCompileTime,
|
||||
MaxCols = traits<Rhs_>::MaxColsAtCompileTime,
|
||||
Cols = traits<Rhs_>::ColsAtCompileTime,
|
||||
MaxDepth = min_size_prefer_fixed(traits<Lhs_>::MaxColsAtCompileTime, traits<Rhs_>::MaxRowsAtCompileTime),
|
||||
Depth = min_size_prefer_fixed(traits<Lhs_>::ColsAtCompileTime, traits<Rhs_>::RowsAtCompileTime)
|
||||
};
|
||||
|
||||
// the splitting into different lines of code here, introducing the _select enums and the typedef below,
|
||||
// is to work around an internal compiler error with gcc 4.1 and 4.2.
|
||||
private:
|
||||
enum {
|
||||
rows_select = product_size_category<Rows, MaxRows>::value,
|
||||
cols_select = product_size_category<Cols, MaxCols>::value,
|
||||
depth_select = product_size_category<Depth, MaxDepth>::value
|
||||
};
|
||||
typedef product_type_selector<rows_select, cols_select, depth_select> selector;
|
||||
|
||||
public:
|
||||
enum { value = selector::ret, ret = selector::ret };
|
||||
#ifdef EIGEN_DEBUG_PRODUCT
|
||||
static void debug() {
|
||||
EIGEN_DEBUG_VAR(Rows);
|
||||
EIGEN_DEBUG_VAR(Cols);
|
||||
EIGEN_DEBUG_VAR(Depth);
|
||||
EIGEN_DEBUG_VAR(rows_select);
|
||||
EIGEN_DEBUG_VAR(cols_select);
|
||||
EIGEN_DEBUG_VAR(depth_select);
|
||||
EIGEN_DEBUG_VAR(value);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
/* The following allows to select the kind of product at compile time
|
||||
* based on the three dimensions of the product.
|
||||
* This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
|
||||
// FIXME I'm not sure the current mapping is the ideal one.
|
||||
template <int M, int N>
|
||||
struct product_type_selector<M, N, 1> {
|
||||
enum { ret = OuterProduct };
|
||||
};
|
||||
template <int M>
|
||||
struct product_type_selector<M, 1, 1> {
|
||||
enum { ret = LazyCoeffBasedProductMode };
|
||||
};
|
||||
template <int N>
|
||||
struct product_type_selector<1, N, 1> {
|
||||
enum { ret = LazyCoeffBasedProductMode };
|
||||
};
|
||||
template <int Depth>
|
||||
struct product_type_selector<1, 1, Depth> {
|
||||
enum { ret = InnerProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<1, 1, 1> {
|
||||
enum { ret = InnerProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, 1, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<1, Small, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Small, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Small, 1> {
|
||||
enum { ret = LazyCoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Large, 1> {
|
||||
enum { ret = LazyCoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, Small, 1> {
|
||||
enum { ret = LazyCoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<1, Large, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<1, Large, Large> {
|
||||
enum { ret = GemvProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<1, Small, Large> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, 1, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, 1, Large> {
|
||||
enum { ret = GemvProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, 1, Large> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Small, Large> {
|
||||
enum { ret = GemmProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, Small, Large> {
|
||||
enum { ret = GemmProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Large, Large> {
|
||||
enum { ret = GemmProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, Large, Large> {
|
||||
enum { ret = GemmProduct };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, Small, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Small, Large, Small> {
|
||||
enum { ret = CoeffBasedProductMode };
|
||||
};
|
||||
template <>
|
||||
struct product_type_selector<Large, Large, Small> {
|
||||
enum { ret = GemmProduct };
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/***********************************************************************
|
||||
* Implementation of Inner Vector Vector Product
|
||||
***********************************************************************/
|
||||
|
||||
// FIXME : maybe the "inner product" could return a Scalar
|
||||
// instead of a 1x1 matrix ??
|
||||
// Pro: more natural for the user
|
||||
// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
|
||||
// product ends up to a row-vector times col-vector product... To tackle this use
|
||||
// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
|
||||
|
||||
/***********************************************************************
|
||||
* Implementation of Outer Vector Vector Product
|
||||
***********************************************************************/
|
||||
|
||||
/***********************************************************************
|
||||
* Implementation of General Matrix Vector Product
|
||||
***********************************************************************/
|
||||
|
||||
/* According to the shape/flags of the matrix we have to distinghish 3 different cases:
|
||||
* 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
|
||||
* 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
|
||||
* 3 - all other cases are handled using a simple loop along the outer-storage direction.
|
||||
* Therefore we need a lower level meta selector.
|
||||
* Furthermore, if the matrix is the rhs, then the product has to be transposed.
|
||||
*/
|
||||
namespace internal {
|
||||
|
||||
template <int Side, int StorageOrder, bool BlasCompatible>
|
||||
struct gemv_dense_selector;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Scalar, int Size, int MaxSize, bool Cond>
|
||||
struct gemv_static_vector_if;
|
||||
|
||||
template <typename Scalar, int Size, int MaxSize>
|
||||
struct gemv_static_vector_if<Scalar, Size, MaxSize, false> {
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Scalar* data() {
|
||||
eigen_internal_assert(false && "should never be called");
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar, int Size>
|
||||
struct gemv_static_vector_if<Scalar, Size, Dynamic, true> {
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Scalar* data() { return 0; }
|
||||
};
|
||||
|
||||
template <typename Scalar, int Size, int MaxSize>
|
||||
struct gemv_static_vector_if<Scalar, Size, MaxSize, true> {
|
||||
#if EIGEN_MAX_STATIC_ALIGN_BYTES != 0
|
||||
internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize), 0, AlignedMax> m_data;
|
||||
EIGEN_STRONG_INLINE constexpr Scalar* data() { return m_data.array; }
|
||||
#else
|
||||
// Some architectures cannot align on the stack,
|
||||
// => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
|
||||
internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize) + EIGEN_MAX_ALIGN_BYTES, 0> m_data;
|
||||
EIGEN_STRONG_INLINE constexpr Scalar* data() {
|
||||
return reinterpret_cast<Scalar*>((std::uintptr_t(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES - 1))) +
|
||||
EIGEN_MAX_ALIGN_BYTES);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// The vector is on the left => transposition
|
||||
template <int StorageOrder, bool BlasCompatible>
|
||||
struct gemv_dense_selector<OnTheLeft, StorageOrder, BlasCompatible> {
|
||||
template <typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
|
||||
Transpose<Dest> destT(dest);
|
||||
enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
|
||||
gemv_dense_selector<OnTheRight, OtherStorageOrder, BlasCompatible>::run(rhs.transpose(), lhs.transpose(), destT,
|
||||
alpha);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct gemv_dense_selector<OnTheRight, ColMajor, true> {
|
||||
template <typename Lhs, typename Rhs, typename Dest>
|
||||
static inline void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
|
||||
typedef typename Lhs::Scalar LhsScalar;
|
||||
typedef typename Rhs::Scalar RhsScalar;
|
||||
typedef typename Dest::Scalar ResScalar;
|
||||
|
||||
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
|
||||
|
||||
typedef Map<Matrix<ResScalar, Dynamic, 1>, plain_enum_min(AlignedMax, internal::packet_traits<ResScalar>::size)>
|
||||
MappedDest;
|
||||
|
||||
ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
|
||||
ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);
|
||||
|
||||
ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
|
||||
|
||||
// make sure Dest is a compile-time vector type (bug 1166)
|
||||
typedef std::conditional_t<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr> ActualDest;
|
||||
|
||||
enum {
|
||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime == 1),
|
||||
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
||||
MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime != 0)
|
||||
};
|
||||
|
||||
typedef const_blas_data_mapper<LhsScalar, Index, ColMajor> LhsMapper;
|
||||
typedef const_blas_data_mapper<RhsScalar, Index, RowMajor> RhsMapper;
|
||||
RhsScalar compatibleAlpha = get_factor<ResScalar, RhsScalar>::run(actualAlpha);
|
||||
|
||||
if (!MightCannotUseDest) {
|
||||
// shortcut if we are sure to be able to use dest directly,
|
||||
// this ease the compiler to generate cleaner and more optimzized code for most common cases
|
||||
general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
|
||||
RhsMapper, RhsBlasTraits::NeedToConjugate>::run(actualLhs.rows(), actualLhs.cols(),
|
||||
LhsMapper(actualLhs.data(),
|
||||
actualLhs.outerStride()),
|
||||
RhsMapper(actualRhs.data(),
|
||||
actualRhs.innerStride()),
|
||||
dest.data(), 1, compatibleAlpha);
|
||||
} else {
|
||||
gemv_static_vector_if<ResScalar, ActualDest::SizeAtCompileTime, ActualDest::MaxSizeAtCompileTime,
|
||||
MightCannotUseDest>
|
||||
static_dest;
|
||||
|
||||
const bool alphaIsCompatible = (!ComplexByReal) || (numext::is_exactly_zero(numext::imag(actualAlpha)));
|
||||
const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(ResScalar, actualDestPtr, dest.size(),
|
||||
evalToDest ? dest.data() : static_dest.data());
|
||||
|
||||
if (!evalToDest) {
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
constexpr int Size = Dest::SizeAtCompileTime;
|
||||
Index size = dest.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
if (!alphaIsCompatible) {
|
||||
MappedDest(actualDestPtr, dest.size()).setZero();
|
||||
compatibleAlpha = RhsScalar(1);
|
||||
} else
|
||||
MappedDest(actualDestPtr, dest.size()) = dest;
|
||||
}
|
||||
|
||||
general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
|
||||
RhsMapper, RhsBlasTraits::NeedToConjugate>::run(actualLhs.rows(), actualLhs.cols(),
|
||||
LhsMapper(actualLhs.data(),
|
||||
actualLhs.outerStride()),
|
||||
RhsMapper(actualRhs.data(),
|
||||
actualRhs.innerStride()),
|
||||
actualDestPtr, 1, compatibleAlpha);
|
||||
|
||||
if (!evalToDest) {
|
||||
if (!alphaIsCompatible)
|
||||
dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size());
|
||||
else
|
||||
dest = MappedDest(actualDestPtr, dest.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct gemv_dense_selector<OnTheRight, RowMajor, true> {
|
||||
template <typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
|
||||
typedef typename Lhs::Scalar LhsScalar;
|
||||
typedef typename Rhs::Scalar RhsScalar;
|
||||
typedef typename Dest::Scalar ResScalar;
|
||||
|
||||
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
|
||||
typedef internal::remove_all_t<ActualRhsType> ActualRhsTypeCleaned;
|
||||
|
||||
std::add_const_t<ActualLhsType> actualLhs = LhsBlasTraits::extract(lhs);
|
||||
std::add_const_t<ActualRhsType> actualRhs = RhsBlasTraits::extract(rhs);
|
||||
|
||||
ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
|
||||
|
||||
enum {
|
||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||
DirectlyUseRhs =
|
||||
ActualRhsTypeCleaned::InnerStrideAtCompileTime == 1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime == 0
|
||||
};
|
||||
|
||||
gemv_static_vector_if<RhsScalar, ActualRhsTypeCleaned::SizeAtCompileTime,
|
||||
ActualRhsTypeCleaned::MaxSizeAtCompileTime, !DirectlyUseRhs>
|
||||
static_rhs;
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(
|
||||
RhsScalar, actualRhsPtr, actualRhs.size(),
|
||||
DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
|
||||
|
||||
if (!DirectlyUseRhs) {
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
constexpr int Size = ActualRhsTypeCleaned::SizeAtCompileTime;
|
||||
Index size = actualRhs.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
|
||||
}
|
||||
|
||||
typedef const_blas_data_mapper<LhsScalar, Index, RowMajor> LhsMapper;
|
||||
typedef const_blas_data_mapper<RhsScalar, Index, ColMajor> RhsMapper;
|
||||
general_matrix_vector_product<Index, LhsScalar, LhsMapper, RowMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
|
||||
RhsMapper, RhsBlasTraits::NeedToConjugate>::
|
||||
run(actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()),
|
||||
RhsMapper(actualRhsPtr, 1), dest.data(),
|
||||
dest.col(0).innerStride(), // NOTE if dest is not a vector at compile-time, then dest.innerStride() might
|
||||
// be wrong. (bug 1166)
|
||||
actualAlpha);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct gemv_dense_selector<OnTheRight, ColMajor, false> {
|
||||
template <typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
|
||||
EIGEN_STATIC_ASSERT((!nested_eval<Lhs, 1>::Evaluate),
|
||||
EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
// TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory,
|
||||
// otherwise use a temp
|
||||
typename nested_eval<Rhs, 1>::type actual_rhs(rhs);
|
||||
const Index size = rhs.rows();
|
||||
for (Index k = 0; k < size; ++k) dest += (alpha * actual_rhs.coeff(k)) * lhs.col(k);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct gemv_dense_selector<OnTheRight, RowMajor, false> {
|
||||
template <typename Lhs, typename Rhs, typename Dest>
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
|
||||
EIGEN_STATIC_ASSERT((!nested_eval<Lhs, 1>::Evaluate),
|
||||
EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
typename nested_eval<Rhs, Lhs::RowsAtCompileTime>::type actual_rhs(rhs);
|
||||
const Index rows = dest.rows();
|
||||
for (Index i = 0; i < rows; ++i)
|
||||
dest.coeffRef(i) += alpha * (lhs.row(i).cwiseProduct(actual_rhs.transpose())).sum();
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of matrix base methods
|
||||
***************************************************************************/
|
||||
|
||||
/** \returns the matrix product of \c *this and \a other.
|
||||
*
|
||||
* \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
|
||||
*
|
||||
* \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product<Derived, OtherDerived> MatrixBase<Derived>::operator*(
|
||||
const MatrixBase<OtherDerived>& other) const {
|
||||
// A note regarding the function declaration: In MSVC, this function will sometimes
|
||||
// not be inlined since DenseStorage is an unwindable object for dynamic
|
||||
// matrices and product types are holding a member to store the result.
|
||||
// Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
|
||||
enum {
|
||||
ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic ||
|
||||
int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime),
|
||||
AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
|
||||
SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived)
|
||||
};
|
||||
// note to the lost user:
|
||||
// * for a dot product use: v1.dot(v2)
|
||||
// * for a coeff-wise product use: v1.cwiseProduct(v2)
|
||||
EIGEN_STATIC_ASSERT(
|
||||
ProductIsValid || !(AreVectors && SameSizes),
|
||||
INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
|
||||
EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
|
||||
INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
|
||||
EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
|
||||
#ifdef EIGEN_DEBUG_PRODUCT
|
||||
internal::product_type<Derived, OtherDerived>::debug();
|
||||
#endif
|
||||
|
||||
return Product<Derived, OtherDerived>(derived(), other.derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
|
||||
*
|
||||
* The returned product will behave like any other expressions: the coefficients of the product will be
|
||||
* computed once at a time as requested. This might be useful in some extremely rare cases when only
|
||||
* a small and no coherent fraction of the result's coefficients have to be computed.
|
||||
*
|
||||
* \warning This version of the matrix product can be much much slower. So use it only if you know
|
||||
* what you are doing and that you measured a true speed improvement.
|
||||
*
|
||||
* \sa operator*(const MatrixBase&)
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product<Derived, OtherDerived, LazyProduct>
|
||||
MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived>& other) const {
|
||||
enum {
|
||||
ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic ||
|
||||
int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime),
|
||||
AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
|
||||
SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived)
|
||||
};
|
||||
// note to the lost user:
|
||||
// * for a dot product use: v1.dot(v2)
|
||||
// * for a coeff-wise product use: v1.cwiseProduct(v2)
|
||||
EIGEN_STATIC_ASSERT(
|
||||
ProductIsValid || !(AreVectors && SameSizes),
|
||||
INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
|
||||
EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
|
||||
INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
|
||||
EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
|
||||
|
||||
return Product<Derived, OtherDerived, LazyProduct>(derived(), other.derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_PRODUCT_H
|
||||
1702
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/GenericPacketMath.h
Normal file
1702
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/GenericPacketMath.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,230 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2010-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_GLOBAL_FUNCTIONS_H
|
||||
#define EIGEN_GLOBAL_FUNCTIONS_H
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \
|
||||
/** \returns an expression of the coefficient-wise DOC_OP of \a x \
|
||||
\ \
|
||||
DOC_DETAILS \
|
||||
\ \
|
||||
\sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_##NAME">Math functions</a>, class CwiseUnaryOp \
|
||||
*/ \
|
||||
template <typename Derived> \
|
||||
inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> NAME( \
|
||||
const Eigen::ArrayBase<Derived>& x);
|
||||
|
||||
#else
|
||||
|
||||
#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \
|
||||
template <typename Derived> \
|
||||
inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(NAME)( \
|
||||
const Eigen::ArrayBase<Derived>& x) { \
|
||||
return Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(x.derived()); \
|
||||
}
|
||||
|
||||
#endif // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME, FUNCTOR) \
|
||||
\
|
||||
template <typename Derived> \
|
||||
struct NAME##_retval<ArrayBase<Derived> > { \
|
||||
typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \
|
||||
}; \
|
||||
template <typename Derived> \
|
||||
struct NAME##_impl<ArrayBase<Derived> > { \
|
||||
static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) { \
|
||||
return typename NAME##_retval<ArrayBase<Derived> >::type(x.derived()); \
|
||||
} \
|
||||
};
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real, scalar_real_op, real part,\sa ArrayBase::real)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag, scalar_imag_op, imaginary part,\sa ArrayBase::imag)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj, scalar_conjugate_op, complex conjugate,\sa ArrayBase::conjugate)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse, scalar_inverse_op, inverse,\sa ArrayBase::inverse)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin, scalar_sin_op, sine,\sa ArrayBase::sin)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos, scalar_cos_op, cosine,\sa ArrayBase::cos)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan, scalar_tan_op, tangent,\sa ArrayBase::tan)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan, scalar_atan_op, arc - tangent,\sa ArrayBase::atan)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin, scalar_asin_op, arc - sine,\sa ArrayBase::asin)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos, scalar_acos_op, arc - consine,\sa ArrayBase::acos)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh, scalar_sinh_op, hyperbolic sine,\sa ArrayBase::sinh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh, scalar_cosh_op, hyperbolic cosine,\sa ArrayBase::cosh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh, scalar_tanh_op, hyperbolic tangent,\sa ArrayBase::tanh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh, scalar_asinh_op, inverse hyperbolic sine,\sa ArrayBase::asinh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh, scalar_acosh_op, inverse hyperbolic cosine,\sa ArrayBase::acosh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh, scalar_atanh_op, inverse hyperbolic tangent,\sa ArrayBase::atanh)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic, scalar_logistic_op, logistic function,\sa ArrayBase::logistic)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma, scalar_lgamma_op,
|
||||
natural logarithm of the gamma function,\sa ArrayBase::lgamma)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma, scalar_digamma_op, derivative of lgamma,\sa ArrayBase::digamma)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf, scalar_erf_op, error function,\sa ArrayBase::erf)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc, scalar_erfc_op, complement error function,\sa ArrayBase::erfc)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri, scalar_ndtri_op, inverse normal distribution function,\sa ArrayBase::ndtri)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp, scalar_exp_op, exponential,\sa ArrayBase::exp)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp2, scalar_exp2_op, exponential,\sa ArrayBase::exp2)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1, scalar_expm1_op, exponential of a value minus 1,\sa ArrayBase::expm1)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log, scalar_log_op, natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p, scalar_log1p_op, natural logarithm of 1 plus the value,\sa ArrayBase::log1p)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10, scalar_log10_op, base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2, scalar_log2_op, base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs, scalar_abs_op, absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2, scalar_abs2_op,
|
||||
squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg, scalar_arg_op, complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(carg, scalar_carg_op,
|
||||
complex argument, \sa ArrayBase::carg DOXCOMMA MatrixBase::cwiseCArg)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt, scalar_sqrt_op, square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cbrt, scalar_cbrt_op, cube root,\sa ArrayBase::cbrt DOXCOMMA MatrixBase::cwiseCbrt)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt, scalar_rsqrt_op, reciprocal square root,\sa ArrayBase::rsqrt)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square, scalar_square_op,
|
||||
square(power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube, scalar_cube_op, cube(power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint, scalar_rint_op,
|
||||
nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round, scalar_round_op,
|
||||
nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
|
||||
floor, scalar_floor_op, nearest integer not greater than the given value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
|
||||
ceil, scalar_ceil_op, nearest integer not less than the given value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(trunc, scalar_trunc_op,
|
||||
nearest integer not greater in magnitude than the given value,\sa Eigen::trunc DOXCOMMA
|
||||
ArrayBase::trunc)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
|
||||
isnan, scalar_isnan_op, not -a - number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
|
||||
isinf, scalar_isinf_op, infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite, scalar_isfinite_op,
|
||||
finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign, scalar_sign_op, sign(or 0),\sa ArrayBase::sign)
|
||||
|
||||
template <typename Derived, typename ScalarExponent>
|
||||
using GlobalUnaryPowReturnType = std::enable_if_t<
|
||||
!internal::is_arithmetic<typename NumTraits<Derived>::Real>::value &&
|
||||
internal::is_arithmetic<typename NumTraits<ScalarExponent>::Real>::value,
|
||||
CwiseUnaryOp<internal::scalar_unary_pow_op<typename Derived::Scalar, ScalarExponent>, const Derived> >;
|
||||
|
||||
/** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent.
|
||||
*
|
||||
* \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given
|
||||
* expression (\c Derived::Scalar).
|
||||
*
|
||||
* \sa ArrayBase::pow()
|
||||
*
|
||||
* \relates ArrayBase
|
||||
*/
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename Derived, typename ScalarExponent>
|
||||
EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType<Derived, ScalarExponent> pow(const Eigen::ArrayBase<Derived>& x,
|
||||
const ScalarExponent& exponent);
|
||||
#else
|
||||
template <typename Derived, typename ScalarExponent>
|
||||
EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType<Derived, ScalarExponent> pow(const Eigen::ArrayBase<Derived>& x,
|
||||
const ScalarExponent& exponent) {
|
||||
return GlobalUnaryPowReturnType<Derived, ScalarExponent>(
|
||||
x.derived(), internal::scalar_unary_pow_op<typename Derived::Scalar, ScalarExponent>(exponent));
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents.
|
||||
*
|
||||
* This function computes the coefficient-wise power.
|
||||
*
|
||||
* Example: \include Cwise_array_power_array.cpp
|
||||
* Output: \verbinclude Cwise_array_power_array.out
|
||||
*
|
||||
* \sa ArrayBase::pow()
|
||||
*
|
||||
* \relates ArrayBase
|
||||
*/
|
||||
template <typename Derived, typename ExponentDerived>
|
||||
inline const Eigen::CwiseBinaryOp<
|
||||
Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived,
|
||||
const ExponentDerived>
|
||||
pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents) {
|
||||
return Eigen::CwiseBinaryOp<
|
||||
Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived,
|
||||
const ExponentDerived>(x.derived(), exponents.derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents.
|
||||
*
|
||||
* This function computes the coefficient-wise power between a scalar and an array of exponents.
|
||||
*
|
||||
* \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression
|
||||
* (\c Derived::Scalar).
|
||||
*
|
||||
* Example: \include Cwise_scalar_power_array.cpp
|
||||
* Output: \verbinclude Cwise_scalar_power_array.out
|
||||
*
|
||||
* \sa ArrayBase::pow()
|
||||
*
|
||||
* \relates ArrayBase
|
||||
*/
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename Scalar, typename Derived>
|
||||
inline const CwiseBinaryOp<internal::scalar_pow_op<Scalar, Derived::Scalar>, Constant<Scalar>, Derived> pow(
|
||||
const Scalar& x, const Eigen::ArrayBase<Derived>& x);
|
||||
#else
|
||||
template <typename Scalar, typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(
|
||||
typename internal::promote_scalar_arg<typename Derived::Scalar EIGEN_COMMA Scalar EIGEN_COMMA
|
||||
EIGEN_SCALAR_BINARY_SUPPORTED(pow, Scalar,
|
||||
typename Derived::Scalar)>::type,
|
||||
Derived, pow) pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) {
|
||||
typedef
|
||||
typename internal::promote_scalar_arg<typename Derived::Scalar, Scalar,
|
||||
EIGEN_SCALAR_BINARY_SUPPORTED(pow, Scalar, typename Derived::Scalar)>::type
|
||||
PromotedScalar;
|
||||
return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar, Derived, pow)(
|
||||
typename internal::plain_constant_type<Derived, PromotedScalar>::type(
|
||||
exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op<PromotedScalar>(x)),
|
||||
exponents.derived());
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \returns an expression of the coefficient-wise atan2(\a x, \a y). \a x and \a y must be of the same type.
|
||||
*
|
||||
* This function computes the coefficient-wise atan2().
|
||||
*
|
||||
* \sa ArrayBase::atan2()
|
||||
*
|
||||
* \relates ArrayBase
|
||||
*/
|
||||
template <typename LhsDerived, typename RhsDerived>
|
||||
inline const std::enable_if_t<
|
||||
std::is_same<typename LhsDerived::Scalar, typename RhsDerived::Scalar>::value,
|
||||
Eigen::CwiseBinaryOp<Eigen::internal::scalar_atan2_op<typename LhsDerived::Scalar, typename RhsDerived::Scalar>,
|
||||
const LhsDerived, const RhsDerived> >
|
||||
atan2(const Eigen::ArrayBase<LhsDerived>& x, const Eigen::ArrayBase<RhsDerived>& exponents) {
|
||||
return Eigen::CwiseBinaryOp<
|
||||
Eigen::internal::scalar_atan2_op<typename LhsDerived::Scalar, typename RhsDerived::Scalar>, const LhsDerived,
|
||||
const RhsDerived>(x.derived(), exponents.derived());
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real, scalar_real_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag, scalar_imag_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2, scalar_abs2_op)
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random,
|
||||
// internal::isApprox...)
|
||||
|
||||
#endif // EIGEN_GLOBAL_FUNCTIONS_H
|
||||
233
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/IO.h
Normal file
233
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/IO.h
Normal file
@@ -0,0 +1,233 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_IO_H
|
||||
#define EIGEN_IO_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum { DontAlignCols = 1 };
|
||||
enum { StreamPrecision = -1, FullPrecision = -2 };
|
||||
|
||||
namespace internal {
|
||||
template <typename Derived>
|
||||
std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt);
|
||||
}
|
||||
|
||||
/** \class IOFormat
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Stores a set of parameters controlling the way matrices are printed
|
||||
*
|
||||
* List of available parameters:
|
||||
* - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c
|
||||
* FullPrecision. The default is the special value \c StreamPrecision which means to use the stream's own precision
|
||||
* setting, as set for instance using \c cout.precision(3). The other special value \c FullPrecision means that the
|
||||
* number of digits will be computed to match the full precision of each floating-point type.
|
||||
* - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c
|
||||
* DontAlignCols which allows to disable the alignment of columns, resulting in faster code.
|
||||
* - \b coeffSeparator string printed between two coefficients of the same row
|
||||
* - \b rowSeparator string printed between two rows
|
||||
* - \b rowPrefix string printed at the beginning of each row
|
||||
* - \b rowSuffix string printed at the end of each row
|
||||
* - \b matPrefix string printed at the beginning of the matrix
|
||||
* - \b matSuffix string printed at the end of the matrix
|
||||
* - \b fill character printed to fill the empty space in aligned columns
|
||||
*
|
||||
* Example: \include IOFormat.cpp
|
||||
* Output: \verbinclude IOFormat.out
|
||||
*
|
||||
* \sa DenseBase::format(), class WithFormat
|
||||
*/
|
||||
struct IOFormat {
|
||||
/** Default constructor, see class IOFormat for the meaning of the parameters */
|
||||
IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ",
|
||||
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix = "",
|
||||
const std::string& _rowSuffix = "", const std::string& _matPrefix = "", const std::string& _matSuffix = "",
|
||||
const char _fill = ' ')
|
||||
: matPrefix(_matPrefix),
|
||||
matSuffix(_matSuffix),
|
||||
rowPrefix(_rowPrefix),
|
||||
rowSuffix(_rowSuffix),
|
||||
rowSeparator(_rowSeparator),
|
||||
rowSpacer(""),
|
||||
coeffSeparator(_coeffSeparator),
|
||||
fill(_fill),
|
||||
precision(_precision),
|
||||
flags(_flags) {
|
||||
// TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline
|
||||
// don't add rowSpacer if columns are not to be aligned
|
||||
if ((flags & DontAlignCols)) return;
|
||||
int i = int(matPrefix.length()) - 1;
|
||||
while (i >= 0 && matPrefix[i] != '\n') {
|
||||
rowSpacer += ' ';
|
||||
i--;
|
||||
}
|
||||
}
|
||||
std::string matPrefix, matSuffix;
|
||||
std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
|
||||
std::string coeffSeparator;
|
||||
char fill;
|
||||
int precision;
|
||||
int flags;
|
||||
};
|
||||
|
||||
/** \class WithFormat
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Pseudo expression providing matrix output with given format
|
||||
*
|
||||
* \tparam ExpressionType the type of the object on which IO stream operations are performed
|
||||
*
|
||||
* This class represents an expression with stream operators controlled by a given IOFormat.
|
||||
* It is the return type of DenseBase::format()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* See class IOFormat for some examples.
|
||||
*
|
||||
* \sa DenseBase::format(), class IOFormat
|
||||
*/
|
||||
template <typename ExpressionType>
|
||||
class WithFormat {
|
||||
public:
|
||||
WithFormat(const ExpressionType& matrix, const IOFormat& format) : m_matrix(matrix), m_format(format) {}
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& s, const WithFormat& wf) {
|
||||
return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);
|
||||
}
|
||||
|
||||
protected:
|
||||
typename ExpressionType::Nested m_matrix;
|
||||
IOFormat m_format;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// NOTE: This helper is kept for backward compatibility with previous code specializing
|
||||
// this internal::significant_decimals_impl structure. In the future we should directly
|
||||
// call max_digits10().
|
||||
template <typename Scalar>
|
||||
struct significant_decimals_impl {
|
||||
static inline int run() { return NumTraits<Scalar>::max_digits10(); }
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* print the matrix \a _m to the output stream \a s using the output format \a fmt */
|
||||
template <typename Derived>
|
||||
std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt) {
|
||||
using internal::is_same;
|
||||
|
||||
if (_m.size() == 0) {
|
||||
s << fmt.matPrefix << fmt.matSuffix;
|
||||
return s;
|
||||
}
|
||||
|
||||
typename Derived::Nested m = _m;
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef std::conditional_t<is_same<Scalar, char>::value || is_same<Scalar, unsigned char>::value ||
|
||||
is_same<Scalar, numext::int8_t>::value || is_same<Scalar, numext::uint8_t>::value,
|
||||
int,
|
||||
std::conditional_t<is_same<Scalar, std::complex<char> >::value ||
|
||||
is_same<Scalar, std::complex<unsigned char> >::value ||
|
||||
is_same<Scalar, std::complex<numext::int8_t> >::value ||
|
||||
is_same<Scalar, std::complex<numext::uint8_t> >::value,
|
||||
std::complex<int>, const Scalar&> >
|
||||
PrintType;
|
||||
|
||||
Index width = 0;
|
||||
|
||||
std::streamsize explicit_precision;
|
||||
if (fmt.precision == StreamPrecision) {
|
||||
explicit_precision = 0;
|
||||
} else if (fmt.precision == FullPrecision) {
|
||||
if (NumTraits<Scalar>::IsInteger) {
|
||||
explicit_precision = 0;
|
||||
} else {
|
||||
explicit_precision = significant_decimals_impl<Scalar>::run();
|
||||
}
|
||||
} else {
|
||||
explicit_precision = fmt.precision;
|
||||
}
|
||||
|
||||
std::streamsize old_precision = 0;
|
||||
if (explicit_precision) old_precision = s.precision(explicit_precision);
|
||||
|
||||
bool align_cols = !(fmt.flags & DontAlignCols);
|
||||
if (align_cols) {
|
||||
// compute the largest width
|
||||
for (Index j = 0; j < m.cols(); ++j)
|
||||
for (Index i = 0; i < m.rows(); ++i) {
|
||||
std::stringstream sstr;
|
||||
sstr.copyfmt(s);
|
||||
sstr << static_cast<PrintType>(m.coeff(i, j));
|
||||
width = std::max<Index>(width, Index(sstr.str().length()));
|
||||
}
|
||||
}
|
||||
std::streamsize old_width = s.width();
|
||||
char old_fill_character = s.fill();
|
||||
s << fmt.matPrefix;
|
||||
for (Index i = 0; i < m.rows(); ++i) {
|
||||
if (i) s << fmt.rowSpacer;
|
||||
s << fmt.rowPrefix;
|
||||
if (width) {
|
||||
s.fill(fmt.fill);
|
||||
s.width(width);
|
||||
}
|
||||
s << static_cast<PrintType>(m.coeff(i, 0));
|
||||
for (Index j = 1; j < m.cols(); ++j) {
|
||||
s << fmt.coeffSeparator;
|
||||
if (width) {
|
||||
s.fill(fmt.fill);
|
||||
s.width(width);
|
||||
}
|
||||
s << static_cast<PrintType>(m.coeff(i, j));
|
||||
}
|
||||
s << fmt.rowSuffix;
|
||||
if (i < m.rows() - 1) s << fmt.rowSeparator;
|
||||
}
|
||||
s << fmt.matSuffix;
|
||||
if (explicit_precision) s.precision(old_precision);
|
||||
if (width) {
|
||||
s.fill(old_fill_character);
|
||||
s.width(old_width);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \relates DenseBase
|
||||
*
|
||||
* Outputs the matrix, to the given stream.
|
||||
*
|
||||
* If you wish to print the matrix with a format different than the default, use DenseBase::format().
|
||||
*
|
||||
* It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.
|
||||
* If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default
|
||||
* parameters.
|
||||
*
|
||||
* \sa DenseBase::format()
|
||||
*/
|
||||
template <typename Derived>
|
||||
std::ostream& operator<<(std::ostream& s, const DenseBase<Derived>& m) {
|
||||
return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
std::ostream& operator<<(std::ostream& s, const DiagonalBase<Derived>& m) {
|
||||
return internal::print_matrix(s, m.derived(), EIGEN_DEFAULT_IO_FORMAT);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_IO_H
|
||||
321
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/IndexedView.h
Normal file
321
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/IndexedView.h
Normal file
@@ -0,0 +1,321 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_INDEXED_VIEW_H
|
||||
#define EIGEN_INDEXED_VIEW_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename XprType, typename RowIndices, typename ColIndices>
|
||||
struct traits<IndexedView<XprType, RowIndices, ColIndices>> : traits<XprType> {
|
||||
enum {
|
||||
RowsAtCompileTime = int(IndexedViewHelper<RowIndices>::SizeAtCompileTime),
|
||||
ColsAtCompileTime = int(IndexedViewHelper<ColIndices>::SizeAtCompileTime),
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
|
||||
XprTypeIsRowMajor = (int(traits<XprType>::Flags) & RowMajorBit) != 0,
|
||||
IsRowMajor = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? 1
|
||||
: (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0
|
||||
: XprTypeIsRowMajor,
|
||||
|
||||
RowIncr = int(IndexedViewHelper<RowIndices>::IncrAtCompileTime),
|
||||
ColIncr = int(IndexedViewHelper<ColIndices>::IncrAtCompileTime),
|
||||
InnerIncr = IsRowMajor ? ColIncr : RowIncr,
|
||||
OuterIncr = IsRowMajor ? RowIncr : ColIncr,
|
||||
|
||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||
XprInnerStride = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType>::ret)
|
||||
: int(outer_stride_at_compile_time<XprType>::ret),
|
||||
XprOuterstride = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time<XprType>::ret)
|
||||
: int(inner_stride_at_compile_time<XprType>::ret),
|
||||
|
||||
InnerSize = XprTypeIsRowMajor ? ColsAtCompileTime : RowsAtCompileTime,
|
||||
IsBlockAlike = InnerIncr == 1 && OuterIncr == 1,
|
||||
IsInnerPannel = HasSameStorageOrderAsXprType &&
|
||||
is_same<AllRange<InnerSize>, std::conditional_t<XprTypeIsRowMajor, ColIndices, RowIndices>>::value,
|
||||
|
||||
InnerStrideAtCompileTime =
|
||||
InnerIncr < 0 || InnerIncr == DynamicIndex || XprInnerStride == Dynamic || InnerIncr == Undefined
|
||||
? Dynamic
|
||||
: XprInnerStride * InnerIncr,
|
||||
OuterStrideAtCompileTime =
|
||||
OuterIncr < 0 || OuterIncr == DynamicIndex || XprOuterstride == Dynamic || OuterIncr == Undefined
|
||||
? Dynamic
|
||||
: XprOuterstride * OuterIncr,
|
||||
|
||||
ReturnAsScalar = is_single_range<RowIndices>::value && is_single_range<ColIndices>::value,
|
||||
ReturnAsBlock = (!ReturnAsScalar) && IsBlockAlike,
|
||||
ReturnAsIndexedView = (!ReturnAsScalar) && (!ReturnAsBlock),
|
||||
|
||||
// FIXME we deal with compile-time strides if and only if we have DirectAccessBit flag,
|
||||
// but this is too strict regarding negative strides...
|
||||
DirectAccessMask = (int(InnerIncr) != Undefined && int(OuterIncr) != Undefined && InnerIncr >= 0 && OuterIncr >= 0)
|
||||
? DirectAccessBit
|
||||
: 0,
|
||||
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
|
||||
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
|
||||
Flags = (traits<XprType>::Flags & (HereditaryBits | DirectAccessMask)) | FlagsLvalueBit | FlagsRowMajorBit |
|
||||
FlagsLinearAccessBit
|
||||
};
|
||||
|
||||
typedef Block<XprType, RowsAtCompileTime, ColsAtCompileTime, IsInnerPannel> BlockType;
|
||||
};
|
||||
|
||||
template <typename XprType, typename RowIndices, typename ColIndices, typename StorageKind, bool DirectAccess>
|
||||
class IndexedViewImpl;
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/** \class IndexedView
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of a non-sequential sub-matrix defined by arbitrary sequences of row and column indices
|
||||
*
|
||||
* \tparam XprType the type of the expression in which we are taking the intersections of sub-rows and sub-columns
|
||||
* \tparam RowIndices the type of the object defining the sequence of row indices
|
||||
* \tparam ColIndices the type of the object defining the sequence of column indices
|
||||
*
|
||||
* This class represents an expression of a sub-matrix (or sub-vector) defined as the intersection
|
||||
* of sub-sets of rows and columns, that are themself defined by generic sequences of row indices \f$
|
||||
* \{r_0,r_1,..r_{m-1}\} \f$ and column indices \f$ \{c_0,c_1,..c_{n-1} \}\f$. Let \f$ A \f$ be the nested matrix, then
|
||||
* the resulting matrix \f$ B \f$ has \c m rows and \c n columns, and its entries are given by: \f$ B(i,j) = A(r_i,c_j)
|
||||
* \f$.
|
||||
*
|
||||
* The \c RowIndices and \c ColIndices types must be compatible with the following API:
|
||||
* \code
|
||||
* <integral type> operator[](Index) const;
|
||||
* Index size() const;
|
||||
* \endcode
|
||||
*
|
||||
* Typical supported types thus include:
|
||||
* - std::vector<int>
|
||||
* - std::valarray<int>
|
||||
* - std::array<int>
|
||||
* - Eigen::ArrayXi
|
||||
* - decltype(ArrayXi::LinSpaced(...))
|
||||
* - Any view/expressions of the previous types
|
||||
* - Eigen::ArithmeticSequence
|
||||
* - Eigen::internal::AllRange (helper for Eigen::placeholders::all)
|
||||
* - Eigen::internal::SingleRange (helper for single index)
|
||||
* - etc.
|
||||
*
|
||||
* In typical usages of %Eigen, this class should never be used directly. It is the return type of
|
||||
* DenseBase::operator()(const RowIndices&, const ColIndices&).
|
||||
*
|
||||
* \sa class Block
|
||||
*/
|
||||
template <typename XprType, typename RowIndices, typename ColIndices>
|
||||
class IndexedView
|
||||
: public internal::IndexedViewImpl<XprType, RowIndices, ColIndices, typename internal::traits<XprType>::StorageKind,
|
||||
(internal::traits<IndexedView<XprType, RowIndices, ColIndices>>::Flags &
|
||||
DirectAccessBit) != 0> {
|
||||
public:
|
||||
typedef typename internal::IndexedViewImpl<
|
||||
XprType, RowIndices, ColIndices, typename internal::traits<XprType>::StorageKind,
|
||||
(internal::traits<IndexedView<XprType, RowIndices, ColIndices>>::Flags & DirectAccessBit) != 0>
|
||||
Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(IndexedView)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedView)
|
||||
|
||||
template <typename T0, typename T1>
|
||||
IndexedView(XprType& xpr, const T0& rowIndices, const T1& colIndices) : Base(xpr, rowIndices, colIndices) {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename XprType, typename RowIndices, typename ColIndices, typename StorageKind, bool DirectAccess>
|
||||
class IndexedViewImpl : public internal::generic_xpr_base<IndexedView<XprType, RowIndices, ColIndices>>::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<IndexedView<XprType, RowIndices, ColIndices>>::type Base;
|
||||
typedef typename internal::ref_selector<XprType>::non_const_type MatrixTypeNested;
|
||||
typedef internal::remove_all_t<XprType> NestedExpression;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedViewImpl)
|
||||
|
||||
template <typename T0, typename T1>
|
||||
IndexedViewImpl(XprType& xpr, const T0& rowIndices, const T1& colIndices)
|
||||
: m_xpr(xpr), m_rowIndices(rowIndices), m_colIndices(colIndices) {}
|
||||
|
||||
/** \returns number of rows */
|
||||
Index rows() const { return IndexedViewHelper<RowIndices>::size(m_rowIndices); }
|
||||
|
||||
/** \returns number of columns */
|
||||
Index cols() const { return IndexedViewHelper<ColIndices>::size(m_colIndices); }
|
||||
|
||||
/** \returns the nested expression */
|
||||
const internal::remove_all_t<XprType>& nestedExpression() const { return m_xpr; }
|
||||
|
||||
/** \returns the nested expression */
|
||||
std::remove_reference_t<XprType>& nestedExpression() { return m_xpr; }
|
||||
|
||||
/** \returns a const reference to the object storing/generating the row indices */
|
||||
const RowIndices& rowIndices() const { return m_rowIndices; }
|
||||
|
||||
/** \returns a const reference to the object storing/generating the column indices */
|
||||
const ColIndices& colIndices() const { return m_colIndices; }
|
||||
|
||||
constexpr Scalar& coeffRef(Index rowId, Index colId) {
|
||||
return nestedExpression().coeffRef(m_rowIndices[rowId], m_colIndices[colId]);
|
||||
}
|
||||
|
||||
constexpr const Scalar& coeffRef(Index rowId, Index colId) const {
|
||||
return nestedExpression().coeffRef(m_rowIndices[rowId], m_colIndices[colId]);
|
||||
}
|
||||
|
||||
protected:
|
||||
MatrixTypeNested m_xpr;
|
||||
RowIndices m_rowIndices;
|
||||
ColIndices m_colIndices;
|
||||
};
|
||||
|
||||
template <typename XprType, typename RowIndices, typename ColIndices, typename StorageKind>
|
||||
class IndexedViewImpl<XprType, RowIndices, ColIndices, StorageKind, true>
|
||||
: public IndexedViewImpl<XprType, RowIndices, ColIndices, StorageKind, false> {
|
||||
public:
|
||||
using Base = internal::IndexedViewImpl<XprType, RowIndices, ColIndices,
|
||||
typename internal::traits<XprType>::StorageKind, false>;
|
||||
using Derived = IndexedView<XprType, RowIndices, ColIndices>;
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedViewImpl)
|
||||
|
||||
template <typename T0, typename T1>
|
||||
IndexedViewImpl(XprType& xpr, const T0& rowIndices, const T1& colIndices) : Base(xpr, rowIndices, colIndices) {}
|
||||
|
||||
Index rowIncrement() const {
|
||||
if (traits<Derived>::RowIncr != DynamicIndex && traits<Derived>::RowIncr != Undefined) {
|
||||
return traits<Derived>::RowIncr;
|
||||
}
|
||||
return IndexedViewHelper<RowIndices>::incr(this->rowIndices());
|
||||
}
|
||||
Index colIncrement() const {
|
||||
if (traits<Derived>::ColIncr != DynamicIndex && traits<Derived>::ColIncr != Undefined) {
|
||||
return traits<Derived>::ColIncr;
|
||||
}
|
||||
return IndexedViewHelper<ColIndices>::incr(this->colIndices());
|
||||
}
|
||||
|
||||
Index innerIncrement() const { return traits<Derived>::IsRowMajor ? colIncrement() : rowIncrement(); }
|
||||
|
||||
Index outerIncrement() const { return traits<Derived>::IsRowMajor ? rowIncrement() : colIncrement(); }
|
||||
|
||||
std::decay_t<typename XprType::Scalar>* data() {
|
||||
Index row_offset = this->rowIndices()[0] * this->nestedExpression().rowStride();
|
||||
Index col_offset = this->colIndices()[0] * this->nestedExpression().colStride();
|
||||
return this->nestedExpression().data() + row_offset + col_offset;
|
||||
}
|
||||
|
||||
const std::decay_t<typename XprType::Scalar>* data() const {
|
||||
Index row_offset = this->rowIndices()[0] * this->nestedExpression().rowStride();
|
||||
Index col_offset = this->colIndices()[0] * this->nestedExpression().colStride();
|
||||
return this->nestedExpression().data() + row_offset + col_offset;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept {
|
||||
if (traits<Derived>::InnerStrideAtCompileTime != Dynamic) {
|
||||
return traits<Derived>::InnerStrideAtCompileTime;
|
||||
}
|
||||
return innerIncrement() * this->nestedExpression().innerStride();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept {
|
||||
if (traits<Derived>::OuterStrideAtCompileTime != Dynamic) {
|
||||
return traits<Derived>::OuterStrideAtCompileTime;
|
||||
}
|
||||
return outerIncrement() * this->nestedExpression().outerStride();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ArgType, typename RowIndices, typename ColIndices>
|
||||
struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
|
||||
: evaluator_base<IndexedView<ArgType, RowIndices, ColIndices>> {
|
||||
typedef IndexedView<ArgType, RowIndices, ColIndices> XprType;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost /* TODO + cost of row/col index */,
|
||||
|
||||
FlagsLinearAccessBit =
|
||||
(traits<XprType>::RowsAtCompileTime == 1 || traits<XprType>::ColsAtCompileTime == 1) ? LinearAccessBit : 0,
|
||||
|
||||
FlagsRowMajorBit = traits<XprType>::FlagsRowMajorBit,
|
||||
|
||||
Flags = (evaluator<ArgType>::Flags & (HereditaryBits & ~RowMajorBit /*| LinearAccessBit | DirectAccessBit*/)) |
|
||||
FlagsLinearAccessBit | FlagsRowMajorBit,
|
||||
|
||||
Alignment = 0
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) {
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const {
|
||||
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() &&
|
||||
m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
|
||||
return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) {
|
||||
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() &&
|
||||
m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
|
||||
return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
|
||||
EIGEN_STATIC_ASSERT_LVALUE(XprType)
|
||||
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
|
||||
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
|
||||
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() &&
|
||||
m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
|
||||
return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const {
|
||||
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
|
||||
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
|
||||
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() &&
|
||||
m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
|
||||
return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index index) const {
|
||||
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
|
||||
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
|
||||
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows() &&
|
||||
m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
|
||||
return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
|
||||
}
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const XprType& m_xpr;
|
||||
};
|
||||
|
||||
// Catch assignments to an IndexedView.
|
||||
template <typename ArgType, typename RowIndices, typename ColIndices>
|
||||
struct evaluator_assume_aliasing<IndexedView<ArgType, RowIndices, ColIndices>> {
|
||||
static const bool value = true;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_INDEXED_VIEW_H
|
||||
254
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/InnerProduct.h
Normal file
254
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/InnerProduct.h
Normal file
@@ -0,0 +1,254 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2024 Charlie Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_INNER_PRODUCT_EVAL_H
|
||||
#define EIGEN_INNER_PRODUCT_EVAL_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// recursively searches for the largest simd type that does not exceed Size, or the smallest if no such type exists
|
||||
template <typename Scalar, int Size, typename Packet = typename packet_traits<Scalar>::type,
|
||||
bool Stop =
|
||||
(unpacket_traits<Packet>::size <= Size) || is_same<Packet, typename unpacket_traits<Packet>::half>::value>
|
||||
struct find_inner_product_packet_helper;
|
||||
|
||||
template <typename Scalar, int Size, typename Packet>
|
||||
struct find_inner_product_packet_helper<Scalar, Size, Packet, false> {
|
||||
using type = typename find_inner_product_packet_helper<Scalar, Size, typename unpacket_traits<Packet>::half>::type;
|
||||
};
|
||||
|
||||
template <typename Scalar, int Size, typename Packet>
|
||||
struct find_inner_product_packet_helper<Scalar, Size, Packet, true> {
|
||||
using type = Packet;
|
||||
};
|
||||
|
||||
template <typename Scalar, int Size>
|
||||
struct find_inner_product_packet : find_inner_product_packet_helper<Scalar, Size> {};
|
||||
|
||||
template <typename Scalar>
|
||||
struct find_inner_product_packet<Scalar, Dynamic> {
|
||||
using type = typename packet_traits<Scalar>::type;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct inner_product_assert {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Lhs)
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Rhs)
|
||||
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Lhs, Rhs)
|
||||
#ifndef EIGEN_NO_DEBUG
|
||||
static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, const Rhs& rhs) {
|
||||
eigen_assert((lhs.size() == rhs.size()) && "Inner product: lhs and rhs vectors must have same size");
|
||||
}
|
||||
#else
|
||||
static EIGEN_DEVICE_FUNC void run(const Lhs&, const Rhs&) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
template <typename Func, typename Lhs, typename Rhs>
|
||||
struct inner_product_evaluator {
|
||||
static constexpr int LhsFlags = evaluator<Lhs>::Flags;
|
||||
static constexpr int RhsFlags = evaluator<Rhs>::Flags;
|
||||
static constexpr int SizeAtCompileTime = size_prefer_fixed(Lhs::SizeAtCompileTime, Rhs::SizeAtCompileTime);
|
||||
static constexpr int MaxSizeAtCompileTime =
|
||||
min_size_prefer_fixed(Lhs::MaxSizeAtCompileTime, Rhs::MaxSizeAtCompileTime);
|
||||
static constexpr int LhsAlignment = evaluator<Lhs>::Alignment;
|
||||
static constexpr int RhsAlignment = evaluator<Rhs>::Alignment;
|
||||
|
||||
using Scalar = typename Func::result_type;
|
||||
using Packet = typename find_inner_product_packet<Scalar, SizeAtCompileTime>::type;
|
||||
|
||||
static constexpr bool Vectorize =
|
||||
bool(LhsFlags & RhsFlags & PacketAccessBit) && Func::PacketAccess &&
|
||||
((MaxSizeAtCompileTime == Dynamic) || (unpacket_traits<Packet>::size <= MaxSizeAtCompileTime));
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit inner_product_evaluator(const Lhs& lhs, const Rhs& rhs,
|
||||
Func func = Func())
|
||||
: m_func(func), m_lhs(lhs), m_rhs(rhs), m_size(lhs.size()) {
|
||||
inner_product_assert<Lhs, Rhs>::run(lhs, rhs);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_size.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index index) const {
|
||||
return m_func.coeff(m_lhs.coeff(index), m_rhs.coeff(index));
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& value, Index index) const {
|
||||
return m_func.coeff(value, m_lhs.coeff(index), m_rhs.coeff(index));
|
||||
}
|
||||
|
||||
template <typename PacketType, int LhsMode = LhsAlignment, int RhsMode = RhsAlignment>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const {
|
||||
return m_func.packet(m_lhs.template packet<LhsMode, PacketType>(index),
|
||||
m_rhs.template packet<RhsMode, PacketType>(index));
|
||||
}
|
||||
|
||||
template <typename PacketType, int LhsMode = LhsAlignment, int RhsMode = RhsAlignment>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(const PacketType& value, Index index) const {
|
||||
return m_func.packet(value, m_lhs.template packet<LhsMode, PacketType>(index),
|
||||
m_rhs.template packet<RhsMode, PacketType>(index));
|
||||
}
|
||||
|
||||
const Func m_func;
|
||||
const evaluator<Lhs> m_lhs;
|
||||
const evaluator<Rhs> m_rhs;
|
||||
const variable_if_dynamic<Index, SizeAtCompileTime> m_size;
|
||||
};
|
||||
|
||||
template <typename Evaluator, bool Vectorize = Evaluator::Vectorize>
|
||||
struct inner_product_impl;
|
||||
|
||||
// scalar loop
|
||||
template <typename Evaluator>
|
||||
struct inner_product_impl<Evaluator, false> {
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval) {
|
||||
const Index size = eval.size();
|
||||
if (size == 0) return Scalar(0);
|
||||
|
||||
Scalar result = eval.coeff(0);
|
||||
for (Index k = 1; k < size; k++) {
|
||||
result = eval.coeff(result, k);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
// vector loop
|
||||
template <typename Evaluator>
|
||||
struct inner_product_impl<Evaluator, true> {
|
||||
using UnsignedIndex = std::make_unsigned_t<Index>;
|
||||
using Scalar = typename Evaluator::Scalar;
|
||||
using Packet = typename Evaluator::Packet;
|
||||
static constexpr int PacketSize = unpacket_traits<Packet>::size;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval) {
|
||||
const UnsignedIndex size = static_cast<UnsignedIndex>(eval.size());
|
||||
if (size < PacketSize) return inner_product_impl<Evaluator, false>::run(eval);
|
||||
|
||||
const UnsignedIndex packetEnd = numext::round_down(size, PacketSize);
|
||||
const UnsignedIndex quadEnd = numext::round_down(size, 4 * PacketSize);
|
||||
const UnsignedIndex numPackets = size / PacketSize;
|
||||
const UnsignedIndex numRemPackets = (packetEnd - quadEnd) / PacketSize;
|
||||
|
||||
Packet presult0, presult1, presult2, presult3;
|
||||
|
||||
presult0 = eval.template packet<Packet>(0 * PacketSize);
|
||||
if (numPackets >= 2) presult1 = eval.template packet<Packet>(1 * PacketSize);
|
||||
if (numPackets >= 3) presult2 = eval.template packet<Packet>(2 * PacketSize);
|
||||
if (numPackets >= 4) {
|
||||
presult3 = eval.template packet<Packet>(3 * PacketSize);
|
||||
|
||||
for (UnsignedIndex k = 4 * PacketSize; k < quadEnd; k += 4 * PacketSize) {
|
||||
presult0 = eval.packet(presult0, k + 0 * PacketSize);
|
||||
presult1 = eval.packet(presult1, k + 1 * PacketSize);
|
||||
presult2 = eval.packet(presult2, k + 2 * PacketSize);
|
||||
presult3 = eval.packet(presult3, k + 3 * PacketSize);
|
||||
}
|
||||
|
||||
if (numRemPackets >= 1) presult0 = eval.packet(presult0, quadEnd + 0 * PacketSize);
|
||||
if (numRemPackets >= 2) presult1 = eval.packet(presult1, quadEnd + 1 * PacketSize);
|
||||
if (numRemPackets == 3) presult2 = eval.packet(presult2, quadEnd + 2 * PacketSize);
|
||||
|
||||
presult2 = padd(presult2, presult3);
|
||||
}
|
||||
|
||||
if (numPackets >= 3) presult1 = padd(presult1, presult2);
|
||||
if (numPackets >= 2) presult0 = padd(presult0, presult1);
|
||||
|
||||
Scalar result = predux(presult0);
|
||||
for (UnsignedIndex k = packetEnd; k < size; k++) {
|
||||
result = eval.coeff(result, k);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar, bool Conj>
|
||||
struct conditional_conj;
|
||||
|
||||
template <typename Scalar>
|
||||
struct conditional_conj<Scalar, true> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a) { return numext::conj(a); }
|
||||
template <typename Packet>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a) {
|
||||
return pconj(a);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct conditional_conj<Scalar, false> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a) { return a; }
|
||||
template <typename Packet>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a) {
|
||||
return a;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename LhsScalar, typename RhsScalar, bool Conj>
|
||||
struct scalar_inner_product_op {
|
||||
using result_type = typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType;
|
||||
using conj_helper = conditional_conj<LhsScalar, Conj>;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type coeff(const LhsScalar& a, const RhsScalar& b) const {
|
||||
return (conj_helper::coeff(a) * b);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type coeff(const result_type& accum, const LhsScalar& a,
|
||||
const RhsScalar& b) const {
|
||||
return (conj_helper::coeff(a) * b) + accum;
|
||||
}
|
||||
static constexpr bool PacketAccess = false;
|
||||
};
|
||||
|
||||
template <typename Scalar, bool Conj>
|
||||
struct scalar_inner_product_op<Scalar, Scalar, Conj> {
|
||||
using result_type = Scalar;
|
||||
using conj_helper = conditional_conj<Scalar, Conj>;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& a, const Scalar& b) const {
|
||||
return pmul(conj_helper::coeff(a), b);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(const Scalar& accum, const Scalar& a, const Scalar& b) const {
|
||||
return pmadd(conj_helper::coeff(a), b, accum);
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& a, const Packet& b) const {
|
||||
return pmul(conj_helper::packet(a), b);
|
||||
}
|
||||
template <typename Packet>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packet(const Packet& accum, const Packet& a, const Packet& b) const {
|
||||
return pmadd(conj_helper::packet(a), b, accum);
|
||||
}
|
||||
static constexpr bool PacketAccess = packet_traits<Scalar>::HasMul && packet_traits<Scalar>::HasAdd;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs, bool Conj>
|
||||
struct default_inner_product_impl {
|
||||
using LhsScalar = typename traits<Lhs>::Scalar;
|
||||
using RhsScalar = typename traits<Rhs>::Scalar;
|
||||
using Op = scalar_inner_product_op<LhsScalar, RhsScalar, Conj>;
|
||||
using Evaluator = inner_product_evaluator<Op, Lhs, Rhs>;
|
||||
using result_type = typename Evaluator::Scalar;
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type run(const MatrixBase<Lhs>& a, const MatrixBase<Rhs>& b) {
|
||||
Evaluator eval(a.derived(), b.derived(), Op());
|
||||
return inner_product_impl<Evaluator>::run(eval);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct dot_impl : default_inner_product_impl<Lhs, Rhs, true> {};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_INNER_PRODUCT_EVAL_H
|
||||
@@ -0,0 +1,3 @@
|
||||
#ifndef EIGEN_CORE_MODULE_H
|
||||
#error "Please include Eigen/Core instead of including headers inside the src directory directly."
|
||||
#endif
|
||||
108
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Inverse.h
Normal file
108
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Inverse.h
Normal file
@@ -0,0 +1,108 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_INVERSE_H
|
||||
#define EIGEN_INVERSE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename XprType, typename StorageKind>
|
||||
class InverseImpl;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename XprType>
|
||||
struct traits<Inverse<XprType> > : traits<typename XprType::PlainObject> {
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
typedef traits<PlainObject> BaseTraits;
|
||||
enum { Flags = BaseTraits::Flags & RowMajorBit };
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \class Inverse
|
||||
*
|
||||
* \brief Expression of the inverse of another expression
|
||||
*
|
||||
* \tparam XprType the type of the expression we are taking the inverse
|
||||
*
|
||||
* This class represents an abstract expression of A.inverse()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
*/
|
||||
template <typename XprType>
|
||||
class Inverse : public InverseImpl<XprType, typename internal::traits<XprType>::StorageKind> {
|
||||
public:
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename internal::ref_selector<XprType>::type XprTypeNested;
|
||||
typedef internal::remove_all_t<XprTypeNested> XprTypeNestedCleaned;
|
||||
typedef typename internal::ref_selector<Inverse>::type Nested;
|
||||
typedef internal::remove_all_t<XprType> NestedExpression;
|
||||
|
||||
explicit EIGEN_DEVICE_FUNC Inverse(const XprType& xpr) : m_xpr(xpr) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_xpr.cols(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_xpr.rows(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; }
|
||||
|
||||
protected:
|
||||
XprTypeNested m_xpr;
|
||||
};
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename XprType, typename StorageKind>
|
||||
class InverseImpl : public internal::generic_xpr_base<Inverse<XprType> >::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<Inverse<XprType> >::type Base;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
|
||||
private:
|
||||
Scalar coeff(Index row, Index col) const;
|
||||
Scalar coeff(Index i) const;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal
|
||||
* \brief Default evaluator for Inverse expression.
|
||||
*
|
||||
* This default evaluator for Inverse expression simply evaluate the inverse into a temporary
|
||||
* by a call to internal::call_assignment_no_alias.
|
||||
* Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for
|
||||
* there own nested expression.
|
||||
*
|
||||
* \sa class Inverse
|
||||
*/
|
||||
template <typename ArgType>
|
||||
struct unary_evaluator<Inverse<ArgType> > : public evaluator<typename Inverse<ArgType>::PlainObject> {
|
||||
typedef Inverse<ArgType> InverseType;
|
||||
typedef typename InverseType::PlainObject PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
enum { Flags = Base::Flags | EvalBeforeNestingBit };
|
||||
|
||||
EIGEN_DEVICE_FUNC unary_evaluator(const InverseType& inv_xpr) : m_result(inv_xpr.rows(), inv_xpr.cols()) {
|
||||
internal::construct_at<Base>(this, m_result);
|
||||
internal::call_assignment_no_alias(m_result, inv_xpr);
|
||||
}
|
||||
|
||||
protected:
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_INVERSE_H
|
||||
153
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Map.h
Normal file
153
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Map.h
Normal file
@@ -0,0 +1,153 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MAP_H
|
||||
#define EIGEN_MAP_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename PlainObjectType, int MapOptions, typename StrideType>
|
||||
struct traits<Map<PlainObjectType, MapOptions, StrideType> > : public traits<PlainObjectType> {
|
||||
typedef traits<PlainObjectType> TraitsBase;
|
||||
enum {
|
||||
PlainObjectTypeInnerSize = ((traits<PlainObjectType>::Flags & RowMajorBit) == RowMajorBit)
|
||||
? PlainObjectType::ColsAtCompileTime
|
||||
: PlainObjectType::RowsAtCompileTime,
|
||||
|
||||
InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
|
||||
? int(PlainObjectType::InnerStrideAtCompileTime)
|
||||
: int(StrideType::InnerStrideAtCompileTime),
|
||||
OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
|
||||
? (InnerStrideAtCompileTime == Dynamic || PlainObjectTypeInnerSize == Dynamic
|
||||
? Dynamic
|
||||
: int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize))
|
||||
: int(StrideType::OuterStrideAtCompileTime),
|
||||
Alignment = int(MapOptions) & int(AlignedMask),
|
||||
Flags0 = TraitsBase::Flags & (~NestByRefBit),
|
||||
Flags = is_lvalue<PlainObjectType>::value ? int(Flags0) : (int(Flags0) & ~LvalueBit)
|
||||
};
|
||||
|
||||
private:
|
||||
enum { Options }; // Expressions don't have Options
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class Map
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief A matrix or vector expression mapping an existing array of data.
|
||||
*
|
||||
* \tparam PlainObjectType the equivalent matrix type of the mapped data
|
||||
* \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32,
|
||||
* \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies
|
||||
* strides. By default, Map assumes the memory layout of an ordinary, contiguous array. This can be overridden by
|
||||
* specifying strides. The type passed here must be a specialization of the Stride template, see examples below.
|
||||
*
|
||||
* This class represents a matrix or vector expression mapping an existing array of data.
|
||||
* It can be used to let Eigen interface without any overhead with non-Eigen data structures,
|
||||
* such as plain C arrays or structures from other libraries. By default, it assumes that the
|
||||
* data is laid out contiguously in memory. You can however override this by explicitly specifying
|
||||
* inner and outer strides.
|
||||
*
|
||||
* Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
|
||||
* \include Map_simple.cpp
|
||||
* Output: \verbinclude Map_simple.out
|
||||
*
|
||||
* If you need to map non-contiguous arrays, you can do so by specifying strides:
|
||||
*
|
||||
* Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
|
||||
* increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
|
||||
* fixed value.
|
||||
* \include Map_inner_stride.cpp
|
||||
* Output: \verbinclude Map_inner_stride.out
|
||||
*
|
||||
* Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
|
||||
* as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
|
||||
* Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
|
||||
* a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
|
||||
* is \c Dynamic
|
||||
* \include Map_outer_stride.cpp
|
||||
* Output: \verbinclude Map_outer_stride.out
|
||||
*
|
||||
* For more details and for an example of specifying both an inner and an outer stride, see class Stride.
|
||||
*
|
||||
* \b Tip: to change the array of data mapped by a Map object, you can use the C++
|
||||
* placement new syntax:
|
||||
*
|
||||
* Example: \include Map_placement_new.cpp
|
||||
* Output: \verbinclude Map_placement_new.out
|
||||
*
|
||||
* This class is the return type of PlainObjectBase::Map() but can also be used directly.
|
||||
*
|
||||
* \sa PlainObjectBase::Map(), \ref TopicStorageOrders
|
||||
*/
|
||||
template <typename PlainObjectType, int MapOptions, typename StrideType>
|
||||
class Map : public MapBase<Map<PlainObjectType, MapOptions, StrideType> > {
|
||||
public:
|
||||
typedef MapBase<Map> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Map)
|
||||
|
||||
typedef typename Base::PointerType PointerType;
|
||||
typedef PointerType PointerArgType;
|
||||
EIGEN_DEVICE_FUNC inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const {
|
||||
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const {
|
||||
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
|
||||
: internal::traits<Map>::OuterStrideAtCompileTime != Dynamic
|
||||
? Index(internal::traits<Map>::OuterStrideAtCompileTime)
|
||||
: IsVectorAtCompileTime ? (this->size() * innerStride())
|
||||
: int(Flags) & RowMajorBit ? (this->cols() * innerStride())
|
||||
: (this->rows() * innerStride());
|
||||
}
|
||||
|
||||
/** Constructor in the fixed-size case.
|
||||
*
|
||||
* \param dataPtr pointer to the array to map
|
||||
* \param stride optional Stride object, passing the strides.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType())
|
||||
: Base(cast_to_pointer_type(dataPtr)), m_stride(stride) {}
|
||||
|
||||
/** Constructor in the dynamic-size vector case.
|
||||
*
|
||||
* \param dataPtr pointer to the array to map
|
||||
* \param size the size of the vector expression
|
||||
* \param stride optional Stride object, passing the strides.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType())
|
||||
: Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) {}
|
||||
|
||||
/** Constructor in the dynamic-size matrix case.
|
||||
*
|
||||
* \param dataPtr pointer to the array to map
|
||||
* \param rows the number of rows of the matrix expression
|
||||
* \param cols the number of columns of the matrix expression
|
||||
* \param stride optional Stride object, passing the strides.
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType())
|
||||
: Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) {}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||
|
||||
protected:
|
||||
StrideType m_stride;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAP_H
|
||||
283
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MapBase.h
Normal file
283
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MapBase.h
Normal file
@@ -0,0 +1,283 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MAPBASE_H
|
||||
#define EIGEN_MAPBASE_H
|
||||
|
||||
#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
|
||||
EIGEN_STATIC_ASSERT((int(internal::evaluator<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
|
||||
YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for dense Map and Block expression with direct access
|
||||
*
|
||||
* This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense
|
||||
* Map and Block objects with direct access.
|
||||
* Typical users do not have to directly deal with this class.
|
||||
*
|
||||
* This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN.
|
||||
* See \link TopicCustomizing_Plugins customizing Eigen \endlink for details.
|
||||
*
|
||||
* The \c Derived class has to provide the following two methods describing the memory layout:
|
||||
* \code Index innerStride() const; \endcode
|
||||
* \code Index outerStride() const; \endcode
|
||||
*
|
||||
* \sa class Map, class Block
|
||||
*/
|
||||
template <typename Derived>
|
||||
class MapBase<Derived, ReadOnlyAccessors> : public internal::dense_xpr_base<Derived>::type {
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<Derived>::type Base;
|
||||
enum {
|
||||
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
||||
InnerStrideAtCompileTime = internal::traits<Derived>::InnerStrideAtCompileTime,
|
||||
SizeAtCompileTime = Base::SizeAtCompileTime
|
||||
};
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef std::conditional_t<bool(internal::is_lvalue<Derived>::value), Scalar*, const Scalar*> PointerType;
|
||||
|
||||
using Base::derived;
|
||||
// using Base::RowsAtCompileTime;
|
||||
// using Base::ColsAtCompileTime;
|
||||
// using Base::SizeAtCompileTime;
|
||||
using Base::Flags;
|
||||
using Base::IsRowMajor;
|
||||
using Base::IsVectorAtCompileTime;
|
||||
using Base::MaxColsAtCompileTime;
|
||||
using Base::MaxRowsAtCompileTime;
|
||||
using Base::MaxSizeAtCompileTime;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::coeffRef;
|
||||
using Base::cols;
|
||||
using Base::eval;
|
||||
using Base::lazyAssign;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
|
||||
using Base::colStride;
|
||||
using Base::innerStride;
|
||||
using Base::outerStride;
|
||||
using Base::rowStride;
|
||||
|
||||
// bug 217 - compile error on ICC 11.1
|
||||
using Base::operator=;
|
||||
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
|
||||
/** \copydoc DenseBase::rows() */
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_rows.value(); }
|
||||
/** \copydoc DenseBase::cols() */
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_cols.value(); }
|
||||
|
||||
/** Returns a pointer to the first coefficient of the matrix or vector.
|
||||
*
|
||||
* \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
|
||||
*
|
||||
* \sa innerStride(), outerStride()
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return m_data; }
|
||||
|
||||
/** \copydoc PlainObjectBase::coeff(Index,Index) const */
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index rowId, Index colId) const {
|
||||
return m_data[colId * colStride() + rowId * rowStride()];
|
||||
}
|
||||
|
||||
/** \copydoc PlainObjectBase::coeff(Index) const */
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index index) const {
|
||||
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
|
||||
return m_data[index * innerStride()];
|
||||
}
|
||||
|
||||
/** \copydoc PlainObjectBase::coeffRef(Index,Index) const */
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
|
||||
return this->m_data[colId * colStride() + rowId * rowStride()];
|
||||
}
|
||||
|
||||
/** \copydoc PlainObjectBase::coeffRef(Index) const */
|
||||
EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
|
||||
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
|
||||
return this->m_data[index * innerStride()];
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template <int LoadMode>
|
||||
inline PacketScalar packet(Index rowId, Index colId) const {
|
||||
return internal::ploadt<PacketScalar, LoadMode>(m_data + (colId * colStride() + rowId * rowStride()));
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template <int LoadMode>
|
||||
inline PacketScalar packet(Index index) const {
|
||||
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
|
||||
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
||||
}
|
||||
|
||||
/** \internal Constructor for fixed size matrices or vectors */
|
||||
EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr)
|
||||
: m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) {
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
checkSanity<Derived>();
|
||||
}
|
||||
|
||||
/** \internal Constructor for dynamically sized vectors */
|
||||
EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize)
|
||||
: m_data(dataPtr),
|
||||
m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
|
||||
m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
eigen_assert(vecSize >= 0);
|
||||
eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
|
||||
checkSanity<Derived>();
|
||||
}
|
||||
|
||||
/** \internal Constructor for dynamically sized matrices */
|
||||
EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols)
|
||||
: m_data(dataPtr), m_rows(rows), m_cols(cols) {
|
||||
eigen_assert((dataPtr == 0) || (rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) &&
|
||||
cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
|
||||
checkSanity<Derived>();
|
||||
}
|
||||
|
||||
#ifdef EIGEN_MAPBASE_PLUGIN
|
||||
#include EIGEN_MAPBASE_PLUGIN
|
||||
#endif
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
|
||||
|
||||
template <typename T>
|
||||
EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t<(internal::traits<T>::Alignment > 0), void*> = 0) const {
|
||||
// Temporary macro to allow scalars to not be properly aligned. This is while we sort out failures
|
||||
// in TensorFlow Lite that are currently relying on this UB.
|
||||
#ifndef EIGEN_ALLOW_UNALIGNED_SCALARS
|
||||
// Pointer must be aligned to the Scalar type, otherwise we get UB.
|
||||
eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned");
|
||||
#endif
|
||||
#if EIGEN_MAX_ALIGN_BYTES > 0
|
||||
// innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible
|
||||
// value:
|
||||
const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride);
|
||||
eigen_assert((((std::uintptr_t(m_data) % internal::traits<Derived>::Alignment) == 0) ||
|
||||
(cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment) &&
|
||||
"data is not aligned");
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t<internal::traits<T>::Alignment == 0, void*> = 0) const {
|
||||
#ifndef EIGEN_ALLOW_UNALIGNED_SCALARS
|
||||
// Pointer must be aligned to the Scalar type, otherwise we get UB.
|
||||
eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned");
|
||||
#endif
|
||||
}
|
||||
|
||||
PointerType m_data;
|
||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
|
||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
|
||||
};
|
||||
|
||||
/** \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for non-const dense Map and Block expression with direct access
|
||||
*
|
||||
* This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of
|
||||
* dense Map and Block objects with direct access.
|
||||
* It inherits MapBase<Derived, ReadOnlyAccessors> which defines the const variant for reading specific entries.
|
||||
*
|
||||
* \sa class Map, class Block
|
||||
*/
|
||||
template <typename Derived>
|
||||
class MapBase<Derived, WriteAccessors> : public MapBase<Derived, ReadOnlyAccessors> {
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
|
||||
|
||||
public:
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> Base;
|
||||
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::PacketScalar PacketScalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::PointerType PointerType;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::coeffRef;
|
||||
using Base::cols;
|
||||
using Base::derived;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
|
||||
using Base::colStride;
|
||||
using Base::innerStride;
|
||||
using Base::outerStride;
|
||||
using Base::rowStride;
|
||||
|
||||
typedef std::conditional_t<internal::is_lvalue<Derived>::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue;
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr const Scalar* data() const { return this->m_data; }
|
||||
EIGEN_DEVICE_FUNC constexpr ScalarWithConstIfNotLvalue* data() {
|
||||
return this->m_data;
|
||||
} // no const-cast here so non-const-correct code will give a compile error
|
||||
|
||||
EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) {
|
||||
return this->m_data[col * colStride() + row * rowStride()];
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index index) {
|
||||
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
|
||||
return this->m_data[index * innerStride()];
|
||||
}
|
||||
|
||||
template <int StoreMode>
|
||||
inline void writePacket(Index row, Index col, const PacketScalar& val) {
|
||||
internal::pstoret<Scalar, PacketScalar, StoreMode>(this->m_data + (col * colStride() + row * rowStride()), val);
|
||||
}
|
||||
|
||||
template <int StoreMode>
|
||||
inline void writePacket(Index index, const PacketScalar& val) {
|
||||
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
|
||||
internal::pstoret<Scalar, PacketScalar, StoreMode>(this->m_data + index * innerStride(), val);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
|
||||
EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
|
||||
EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC Derived& operator=(const MapBase& other) {
|
||||
ReadOnlyMapBase::Base::operator=(other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
// In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
|
||||
// see bugs 821 and 920.
|
||||
using ReadOnlyMapBase::Base::operator=;
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
|
||||
};
|
||||
|
||||
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPBASE_H
|
||||
2105
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MathFunctions.h
Normal file
2105
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MathFunctions.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,263 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
|
||||
// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MATHFUNCTIONSIMPL_H
|
||||
#define EIGEN_MATHFUNCTIONSIMPL_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal Fast reciprocal using Newton-Raphson's method.
|
||||
|
||||
Preconditions:
|
||||
1. The starting guess provided in approx_a_recip must have at least half
|
||||
the leading mantissa bits in the correct result, such that a single
|
||||
Newton-Raphson step is sufficient to get within 1-2 ulps of the correct
|
||||
result.
|
||||
2. If a is zero, approx_a_recip must be infinite with the same sign as a.
|
||||
3. If a is infinite, approx_a_recip must be zero with the same sign as a.
|
||||
|
||||
If the preconditions are satisfied, which they are for the _*_rcp_ps
|
||||
instructions on x86, the result has a maximum relative error of 2 ulps,
|
||||
and correctly handles reciprocals of zero, infinity, and NaN.
|
||||
*/
|
||||
template <typename Packet, int Steps>
|
||||
struct generic_reciprocal_newton_step {
|
||||
static_assert(Steps > 0, "Steps must be at least 1.");
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_a_recip) {
|
||||
using Scalar = typename unpacket_traits<Packet>::type;
|
||||
const Packet two = pset1<Packet>(Scalar(2));
|
||||
// Refine the approximation using one Newton-Raphson step:
|
||||
// x_{i} = x_{i-1} * (2 - a * x_{i-1})
|
||||
const Packet x = generic_reciprocal_newton_step<Packet, Steps - 1>::run(a, approx_a_recip);
|
||||
const Packet tmp = pnmadd(a, x, two);
|
||||
// If tmp is NaN, it means that a is either +/-0 or +/-Inf.
|
||||
// In this case return the approximation directly.
|
||||
const Packet is_not_nan = pcmp_eq(tmp, tmp);
|
||||
return pselect(is_not_nan, pmul(x, tmp), x);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Packet>
|
||||
struct generic_reciprocal_newton_step<Packet, 0> {
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) {
|
||||
return approx_rsqrt;
|
||||
}
|
||||
};
|
||||
|
||||
/** \internal Fast reciprocal sqrt using Newton-Raphson's method.
|
||||
|
||||
Preconditions:
|
||||
1. The starting guess provided in approx_a_recip must have at least half
|
||||
the leading mantissa bits in the correct result, such that a single
|
||||
Newton-Raphson step is sufficient to get within 1-2 ulps of the correct
|
||||
result.
|
||||
2. If a is zero, approx_a_recip must be infinite with the same sign as a.
|
||||
3. If a is infinite, approx_a_recip must be zero with the same sign as a.
|
||||
|
||||
If the preconditions are satisfied, which they are for the _*_rcp_ps
|
||||
instructions on x86, the result has a maximum relative error of 2 ulps,
|
||||
and correctly handles zero, infinity, and NaN. Positive denormals are
|
||||
treated as zero.
|
||||
*/
|
||||
template <typename Packet, int Steps>
|
||||
struct generic_rsqrt_newton_step {
|
||||
static_assert(Steps > 0, "Steps must be at least 1.");
|
||||
using Scalar = typename unpacket_traits<Packet>::type;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) {
|
||||
const Scalar kMinusHalf = Scalar(-1) / Scalar(2);
|
||||
const Packet cst_minus_half = pset1<Packet>(kMinusHalf);
|
||||
const Packet cst_minus_one = pset1<Packet>(Scalar(-1));
|
||||
|
||||
Packet inv_sqrt = approx_rsqrt;
|
||||
for (int step = 0; step < Steps; ++step) {
|
||||
// Refine the approximation using one Newton-Raphson step:
|
||||
// h_n = (x * inv_sqrt) * inv_sqrt - 1 (so that h_n is nearly 0).
|
||||
// inv_sqrt = inv_sqrt - 0.5 * inv_sqrt * h_n
|
||||
Packet r2 = pmul(a, inv_sqrt);
|
||||
Packet half_r = pmul(inv_sqrt, cst_minus_half);
|
||||
Packet h_n = pmadd(r2, inv_sqrt, cst_minus_one);
|
||||
inv_sqrt = pmadd(half_r, h_n, inv_sqrt);
|
||||
}
|
||||
|
||||
// If x is NaN, then either:
|
||||
// 1) the input is NaN
|
||||
// 2) zero and infinity were multiplied
|
||||
// In either of these cases, return approx_rsqrt
|
||||
return pselect(pisnan(inv_sqrt), approx_rsqrt, inv_sqrt);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Packet>
|
||||
struct generic_rsqrt_newton_step<Packet, 0> {
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) {
|
||||
return approx_rsqrt;
|
||||
}
|
||||
};
|
||||
|
||||
/** \internal Fast sqrt using Newton-Raphson's method.
|
||||
|
||||
Preconditions:
|
||||
1. The starting guess for the reciprocal sqrt provided in approx_rsqrt must
|
||||
have at least half the leading mantissa bits in the correct result, such
|
||||
that a single Newton-Raphson step is sufficient to get within 1-2 ulps of
|
||||
the correct result.
|
||||
2. If a is zero, approx_rsqrt must be infinite.
|
||||
3. If a is infinite, approx_rsqrt must be zero.
|
||||
|
||||
If the preconditions are satisfied, which they are for the _*_rsqrt_ps
|
||||
instructions on x86, the result has a maximum relative error of 2 ulps,
|
||||
and correctly handles zero and infinity, and NaN. Positive denormal inputs
|
||||
are treated as zero.
|
||||
*/
|
||||
template <typename Packet, int Steps = 1>
|
||||
struct generic_sqrt_newton_step {
|
||||
static_assert(Steps > 0, "Steps must be at least 1.");
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) {
|
||||
using Scalar = typename unpacket_traits<Packet>::type;
|
||||
const Packet one_point_five = pset1<Packet>(Scalar(1.5));
|
||||
const Packet minus_half = pset1<Packet>(Scalar(-0.5));
|
||||
// If a is inf or zero, return a directly.
|
||||
const Packet inf_mask = pcmp_eq(a, pset1<Packet>(NumTraits<Scalar>::infinity()));
|
||||
const Packet return_a = por(pcmp_eq(a, pzero(a)), inf_mask);
|
||||
// Do a single step of Newton's iteration for reciprocal square root:
|
||||
// x_{n+1} = x_n * (1.5 + (-0.5 * x_n) * (a * x_n))).
|
||||
// The Newton's step is computed this way to avoid over/under-flows.
|
||||
Packet rsqrt = pmul(approx_rsqrt, pmadd(pmul(minus_half, approx_rsqrt), pmul(a, approx_rsqrt), one_point_five));
|
||||
for (int step = 1; step < Steps; ++step) {
|
||||
rsqrt = pmul(rsqrt, pmadd(pmul(minus_half, rsqrt), pmul(a, rsqrt), one_point_five));
|
||||
}
|
||||
|
||||
// Return sqrt(x) = x * rsqrt(x) for non-zero finite positive arguments.
|
||||
// Return a itself for 0 or +inf, NaN for negative arguments.
|
||||
return pselect(return_a, a, pmul(a, rsqrt));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename RealScalar>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) {
|
||||
// IEEE IEC 6059 special cases.
|
||||
if ((numext::isinf)(x) || (numext::isinf)(y)) return NumTraits<RealScalar>::infinity();
|
||||
if ((numext::isnan)(x) || (numext::isnan)(y)) return NumTraits<RealScalar>::quiet_NaN();
|
||||
|
||||
EIGEN_USING_STD(sqrt);
|
||||
RealScalar p, qp;
|
||||
p = numext::maxi(x, y);
|
||||
if (numext::is_exactly_zero(p)) return RealScalar(0);
|
||||
qp = numext::mini(y, x) / p;
|
||||
return p * sqrt(RealScalar(1) + qp * qp);
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
struct hypot_impl {
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
static EIGEN_DEVICE_FUNC inline RealScalar run(const Scalar& x, const Scalar& y) {
|
||||
EIGEN_USING_STD(abs);
|
||||
return positive_real_hypot<RealScalar>(abs(x), abs(y));
|
||||
}
|
||||
};
|
||||
|
||||
// Generic complex sqrt implementation that correctly handles corner cases
|
||||
// according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt
|
||||
template <typename ComplexT>
|
||||
EIGEN_DEVICE_FUNC ComplexT complex_sqrt(const ComplexT& z) {
|
||||
// Computes the principal sqrt of the input.
|
||||
//
|
||||
// For a complex square root of the number x + i*y. We want to find real
|
||||
// numbers u and v such that
|
||||
// (u + i*v)^2 = x + i*y <=>
|
||||
// u^2 - v^2 + i*2*u*v = x + i*v.
|
||||
// By equating the real and imaginary parts we get:
|
||||
// u^2 - v^2 = x
|
||||
// 2*u*v = y.
|
||||
//
|
||||
// For x >= 0, this has the numerically stable solution
|
||||
// u = sqrt(0.5 * (x + sqrt(x^2 + y^2)))
|
||||
// v = y / (2 * u)
|
||||
// and for x < 0,
|
||||
// v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2)))
|
||||
// u = y / (2 * v)
|
||||
//
|
||||
// Letting w = sqrt(0.5 * (|x| + |z|)),
|
||||
// if x == 0: u = w, v = sign(y) * w
|
||||
// if x > 0: u = w, v = y / (2 * w)
|
||||
// if x < 0: u = |y| / (2 * w), v = sign(y) * w
|
||||
using T = typename NumTraits<ComplexT>::Real;
|
||||
const T x = numext::real(z);
|
||||
const T y = numext::imag(z);
|
||||
const T zero = T(0);
|
||||
const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y)));
|
||||
|
||||
return (numext::isinf)(y) ? ComplexT(NumTraits<T>::infinity(), y)
|
||||
: numext::is_exactly_zero(x) ? ComplexT(w, y < zero ? -w : w)
|
||||
: x > zero ? ComplexT(w, y / (2 * w))
|
||||
: ComplexT(numext::abs(y) / (2 * w), y < zero ? -w : w);
|
||||
}
|
||||
|
||||
// Generic complex rsqrt implementation.
|
||||
template <typename ComplexT>
|
||||
EIGEN_DEVICE_FUNC ComplexT complex_rsqrt(const ComplexT& z) {
|
||||
// Computes the principal reciprocal sqrt of the input.
|
||||
//
|
||||
// For a complex reciprocal square root of the number z = x + i*y. We want to
|
||||
// find real numbers u and v such that
|
||||
// (u + i*v)^2 = 1 / (x + i*y) <=>
|
||||
// u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2.
|
||||
// By equating the real and imaginary parts we get:
|
||||
// u^2 - v^2 = x/|z|^2
|
||||
// 2*u*v = y/|z|^2.
|
||||
//
|
||||
// For x >= 0, this has the numerically stable solution
|
||||
// u = sqrt(0.5 * (x + |z|)) / |z|
|
||||
// v = -y / (2 * u * |z|)
|
||||
// and for x < 0,
|
||||
// v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z|
|
||||
// u = -y / (2 * v * |z|)
|
||||
//
|
||||
// Letting w = sqrt(0.5 * (|x| + |z|)),
|
||||
// if x == 0: u = w / |z|, v = -sign(y) * w / |z|
|
||||
// if x > 0: u = w / |z|, v = -y / (2 * w * |z|)
|
||||
// if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z|
|
||||
using T = typename NumTraits<ComplexT>::Real;
|
||||
const T x = numext::real(z);
|
||||
const T y = numext::imag(z);
|
||||
const T zero = T(0);
|
||||
|
||||
const T abs_z = numext::hypot(x, y);
|
||||
const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z));
|
||||
const T woz = w / abs_z;
|
||||
// Corner cases consistent with 1/sqrt(z) on gcc/clang.
|
||||
return numext::is_exactly_zero(abs_z) ? ComplexT(NumTraits<T>::infinity(), NumTraits<T>::quiet_NaN())
|
||||
: ((numext::isinf)(x) || (numext::isinf)(y)) ? ComplexT(zero, zero)
|
||||
: numext::is_exactly_zero(x) ? ComplexT(woz, y < zero ? woz : -woz)
|
||||
: x > zero ? ComplexT(woz, -y / (2 * w * abs_z))
|
||||
: ComplexT(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz);
|
||||
}
|
||||
|
||||
template <typename ComplexT>
|
||||
EIGEN_DEVICE_FUNC ComplexT complex_log(const ComplexT& z) {
|
||||
// Computes complex log.
|
||||
using T = typename NumTraits<ComplexT>::Real;
|
||||
T a = numext::abs(z);
|
||||
EIGEN_USING_STD(atan2);
|
||||
T b = atan2(z.imag(), z.real());
|
||||
return ComplexT(numext::log(a), b);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MATHFUNCTIONSIMPL_H
|
||||
534
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Matrix.h
Normal file
534
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Matrix.h
Normal file
@@ -0,0 +1,534 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MATRIX_H
|
||||
#define EIGEN_MATRIX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
|
||||
struct traits<Matrix<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
|
||||
private:
|
||||
constexpr static int size = internal::size_at_compile_time(Rows_, Cols_);
|
||||
typedef typename find_best_packet<Scalar_, size>::type PacketScalar;
|
||||
enum {
|
||||
row_major_bit = Options_ & RowMajor ? RowMajorBit : 0,
|
||||
is_dynamic_size_storage = MaxRows_ == Dynamic || MaxCols_ == Dynamic,
|
||||
max_size = is_dynamic_size_storage ? Dynamic : MaxRows_ * MaxCols_,
|
||||
default_alignment = compute_default_alignment<Scalar_, max_size>::value,
|
||||
actual_alignment = ((Options_ & DontAlign) == 0) ? default_alignment : 0,
|
||||
required_alignment = unpacket_traits<PacketScalar>::alignment,
|
||||
packet_access_bit = (packet_traits<Scalar_>::Vectorizable &&
|
||||
(EIGEN_UNALIGNED_VECTORIZE || (int(actual_alignment) >= int(required_alignment))))
|
||||
? PacketAccessBit
|
||||
: 0
|
||||
};
|
||||
|
||||
public:
|
||||
typedef Scalar_ Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef Eigen::Index StorageIndex;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = Rows_,
|
||||
ColsAtCompileTime = Cols_,
|
||||
MaxRowsAtCompileTime = MaxRows_,
|
||||
MaxColsAtCompileTime = MaxCols_,
|
||||
Flags = compute_matrix_flags(Options_),
|
||||
Options = Options_,
|
||||
InnerStrideAtCompileTime = 1,
|
||||
OuterStrideAtCompileTime = (int(Options) & int(RowMajor)) ? ColsAtCompileTime : RowsAtCompileTime,
|
||||
|
||||
// FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase
|
||||
EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit,
|
||||
Alignment = actual_alignment
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class Matrix
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief The matrix class, also used for vectors and row-vectors
|
||||
*
|
||||
* The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen.
|
||||
* Vectors are matrices with one column, and row-vectors are matrices with one row.
|
||||
*
|
||||
* The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note").
|
||||
*
|
||||
* The first three template parameters are required:
|
||||
* \tparam Scalar_ Numeric type, e.g. float, double, int or std::complex<float>.
|
||||
* User defined scalar types are supported as well (see \ref user_defined_scalars "here").
|
||||
* \tparam Rows_ Number of rows, or \b Dynamic
|
||||
* \tparam Cols_ Number of columns, or \b Dynamic
|
||||
*
|
||||
* The remaining template parameters are optional -- in most cases you don't have to worry about them.
|
||||
* \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either
|
||||
* \b #AutoAlign or \b #DontAlign.
|
||||
* The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter
|
||||
* controls alignment, which is required for vectorization. It defaults to aligning matrices except for fixed sizes that
|
||||
* aren't a multiple of the packet size. \tparam MaxRows_ Maximum number of rows. Defaults to \a Rows_ (\ref maxrows
|
||||
* "note"). \tparam MaxCols_ Maximum number of columns. Defaults to \a Cols_ (\ref maxrows "note").
|
||||
*
|
||||
* Eigen provides a number of typedefs covering the usual cases. Here are some examples:
|
||||
*
|
||||
* \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>)
|
||||
* \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>)
|
||||
* \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>)
|
||||
*
|
||||
* \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>)
|
||||
* \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>)
|
||||
*
|
||||
* \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix<float, 2, Dynamic>)
|
||||
* \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix<double, Dynamic, 3>)
|
||||
*
|
||||
* See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs.
|
||||
*
|
||||
* You can access elements of vectors and matrices using normal subscripting:
|
||||
*
|
||||
* \code
|
||||
* Eigen::VectorXd v(10);
|
||||
* v[0] = 0.1;
|
||||
* v[1] = 0.2;
|
||||
* v(0) = 0.3;
|
||||
* v(1) = 0.4;
|
||||
*
|
||||
* Eigen::MatrixXi m(10, 10);
|
||||
* m(0, 1) = 1;
|
||||
* m(0, 2) = 2;
|
||||
* m(0, 3) = 3;
|
||||
* \endcode
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN.
|
||||
*
|
||||
* <i><b>Some notes:</b></i>
|
||||
*
|
||||
* <dl>
|
||||
* <dt><b>\anchor dense Dense versus sparse:</b></dt>
|
||||
* <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the
|
||||
* Sparse module.
|
||||
*
|
||||
* Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary
|
||||
* contiguous array. This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero
|
||||
* coefficients.</dd>
|
||||
*
|
||||
* <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>
|
||||
* <dd>Fixed-size means that the numbers of rows and columns are known at compile-time. In this case, Eigen allocates
|
||||
* the array of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices,
|
||||
* typically up to 4x4, sometimes up to 16x16. Larger matrices should be declared as dynamic-size even if one happens to
|
||||
* know their size at compile-time.
|
||||
*
|
||||
* Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they
|
||||
* are runtime variables, and the array of coefficients is allocated dynamically on the heap.
|
||||
*
|
||||
* Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of
|
||||
* a std::map. If you want this behavior, see the Sparse module.</dd>
|
||||
*
|
||||
* <dt><b>\anchor maxrows MaxRows_ and MaxCols_:</b></dt>
|
||||
* <dd>In most cases, one just leaves these parameters to the default values.
|
||||
* These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases
|
||||
* when the exact numbers of rows and columns are not known at compile-time, but it is known at compile-time that they
|
||||
* cannot exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case
|
||||
* MaxRows_ and MaxCols_ are the dimensions of the original matrix, while Rows_ and Cols_ are Dynamic.</dd>
|
||||
* </dl>
|
||||
*
|
||||
* <i><b>ABI and storage layout</b></i>
|
||||
*
|
||||
* The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3.
|
||||
* <table class="manual">
|
||||
* <tr><th>Matrix type</th><th>Equivalent C structure</th></tr>
|
||||
* <tr><td>\code Matrix<T,Dynamic,Dynamic> \endcode</td><td>\code
|
||||
* struct {
|
||||
* T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0
|
||||
* Eigen::Index rows, cols;
|
||||
* };
|
||||
* \endcode</td></tr>
|
||||
* <tr class="alt"><td>\code
|
||||
* Matrix<T,Dynamic,1>
|
||||
* Matrix<T,1,Dynamic> \endcode</td><td>\code
|
||||
* struct {
|
||||
* T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0
|
||||
* Eigen::Index size;
|
||||
* };
|
||||
* \endcode</td></tr>
|
||||
* <tr><td>\code Matrix<T,Rows,Cols> \endcode</td><td>\code
|
||||
* struct {
|
||||
* T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0
|
||||
* };
|
||||
* \endcode</td></tr>
|
||||
* <tr class="alt"><td>\code Matrix<T,Dynamic,Dynamic,0,MaxRows,MaxCols> \endcode</td><td>\code
|
||||
* struct {
|
||||
* T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0
|
||||
* Eigen::Index rows, cols;
|
||||
* };
|
||||
* \endcode</td></tr>
|
||||
* </table>
|
||||
* Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest
|
||||
* possible power-of-two smaller to EIGEN_MAX_STATIC_ALIGN_BYTES.
|
||||
*
|
||||
* \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy,
|
||||
* \ref TopicStorageOrders
|
||||
*/
|
||||
|
||||
template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
|
||||
class Matrix : public PlainObjectBase<Matrix<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
|
||||
public:
|
||||
/** \brief Base class typedef.
|
||||
* \sa PlainObjectBase
|
||||
*/
|
||||
typedef PlainObjectBase<Matrix> Base;
|
||||
|
||||
enum { Options = Options_ };
|
||||
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
|
||||
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
using Base::base;
|
||||
using Base::coeffRef;
|
||||
|
||||
/**
|
||||
* \brief Assigns matrices to each other.
|
||||
*
|
||||
* \note This is a special case of the templated operator=. Its purpose is
|
||||
* to prevent a default operator= from hiding the templated operator=.
|
||||
*
|
||||
* \callgraph
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(const Matrix& other) { return Base::_set(other); }
|
||||
|
||||
/** \internal
|
||||
* \brief Copies the value of the expression \a other into \c *this with automatic resizing.
|
||||
*
|
||||
* *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
|
||||
* it will be initialized.
|
||||
*
|
||||
* Note that copying a row-vector into a vector (and conversely) is allowed.
|
||||
* The resizing, if any, is then done in the appropriate way so that row-vectors
|
||||
* remain row-vectors and vectors remain vectors.
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase<OtherDerived>& other) {
|
||||
return Base::_set(other);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Copies the generic expression \a other into *this.
|
||||
* \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived>& other) {
|
||||
return Base::operator=(other);
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func) {
|
||||
return Base::operator=(func);
|
||||
}
|
||||
|
||||
/** \brief Default constructor.
|
||||
*
|
||||
* For fixed-size matrices, does nothing.
|
||||
*
|
||||
* For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
|
||||
* is called a null matrix. This constructor is the unique way to create null matrices: resizing
|
||||
* a matrix to 0 is not supported.
|
||||
*
|
||||
* \sa resize(Index,Index)
|
||||
*/
|
||||
#if defined(EIGEN_INITIALIZE_COEFFS)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix() = default;
|
||||
#endif
|
||||
/** \brief Move constructor */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix(Matrix&&) = default;
|
||||
/** \brief Moves the matrix into the other one.
|
||||
*
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(Matrix&& other) noexcept(
|
||||
std::is_nothrow_move_assignable<Scalar>::value) {
|
||||
Base::operator=(std::move(other));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients.
|
||||
*
|
||||
* \only_for_vectors
|
||||
*
|
||||
* This constructor is for 1D array or vectors with more than 4 coefficients.
|
||||
*
|
||||
* \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
|
||||
* constructor must match the the fixed number of rows (resp. columns) of \c *this.
|
||||
*
|
||||
*
|
||||
* Example: \include Matrix_variadic_ctor_cxx11.cpp
|
||||
* Output: \verbinclude Matrix_variadic_ctor_cxx11.out
|
||||
*
|
||||
* \sa Matrix(const std::initializer_list<std::initializer_list<Scalar>>&)
|
||||
*/
|
||||
template <typename... ArgTypes>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3,
|
||||
const ArgTypes&... args)
|
||||
: Base(a0, a1, a2, a3, args...) {}
|
||||
|
||||
/** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row.
|
||||
* \cpp11
|
||||
* \anchor matrix_initializer_list
|
||||
*
|
||||
* In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
|
||||
*
|
||||
* Example: \include Matrix_initializer_list_23_cxx11.cpp
|
||||
* Output: \verbinclude Matrix_initializer_list_23_cxx11.out
|
||||
*
|
||||
* Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is
|
||||
* triggered.
|
||||
*
|
||||
* In the case of a compile-time column vector, implicit transposition from a single row is allowed.
|
||||
* Therefore <code>VectorXd{{1,2,3,4,5}}</code> is legal and the more verbose syntax
|
||||
* <code>RowVectorXd{{1},{2},{3},{4},{5}}</code> can be avoided:
|
||||
*
|
||||
* Example: \include Matrix_initializer_list_vector_cxx11.cpp
|
||||
* Output: \verbinclude Matrix_initializer_list_vector_cxx11.out
|
||||
*
|
||||
* In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes,
|
||||
* and implicit transposition is allowed for compile-time vectors only.
|
||||
*
|
||||
* \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC explicit constexpr EIGEN_STRONG_INLINE Matrix(
|
||||
const std::initializer_list<std::initializer_list<Scalar>>& list)
|
||||
: Base(list) {}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
// This constructor is for both 1x1 matrices and dynamic vectors
|
||||
template <typename T>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(const T& x) {
|
||||
Base::template _init1<T>(x);
|
||||
}
|
||||
|
||||
template <typename T0, typename T1>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) {
|
||||
Base::template _init2<T0, T1>(x, y);
|
||||
}
|
||||
|
||||
#else
|
||||
/** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */
|
||||
EIGEN_DEVICE_FUNC explicit Matrix(const Scalar* data);
|
||||
|
||||
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* This is useful for dynamic-size vectors. For fixed-size vectors,
|
||||
* it is redundant to pass these parameters, so one should use the default constructor
|
||||
* Matrix() instead.
|
||||
*
|
||||
* \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance,
|
||||
* calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&).
|
||||
* For fixed-size \c 1x1 matrices it is therefore recommended to use the default
|
||||
* constructor Matrix() instead, especially when using one of the non standard
|
||||
* \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
|
||||
*/
|
||||
EIGEN_STRONG_INLINE explicit Matrix(Index dim);
|
||||
/** \brief Constructs an initialized 1x1 matrix with the given coefficient
|
||||
* \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
|
||||
Matrix(const Scalar& x);
|
||||
/** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
|
||||
*
|
||||
* This is useful for dynamic-size matrices. For fixed-size matrices,
|
||||
* it is redundant to pass these parameters, so one should use the default constructor
|
||||
* Matrix() instead.
|
||||
*
|
||||
* \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance,
|
||||
* calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y).
|
||||
* For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default
|
||||
* constructor Matrix() instead, especially when using one of the non standard
|
||||
* \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC Matrix(Index rows, Index cols);
|
||||
|
||||
/** \brief Constructs an initialized 2D vector with given coefficients
|
||||
* \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
|
||||
Matrix(const Scalar& x, const Scalar& y);
|
||||
#endif // end EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \brief Constructs an initialized 3D vector with given coefficients
|
||||
* \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)
|
||||
m_storage.data()[0] = x;
|
||||
m_storage.data()[1] = y;
|
||||
m_storage.data()[2] = z;
|
||||
}
|
||||
/** \brief Constructs an initialized 4D vector with given coefficients
|
||||
* \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) {
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)
|
||||
m_storage.data()[0] = x;
|
||||
m_storage.data()[1] = y;
|
||||
m_storage.data()[2] = z;
|
||||
m_storage.data()[3] = w;
|
||||
}
|
||||
|
||||
/** \brief Copy constructor */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix(const Matrix&) = default;
|
||||
|
||||
/** \brief Copy constructor for generic expressions.
|
||||
* \sa MatrixBase::operator=(const EigenBase<OtherDerived>&)
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived>& other) : Base(other.derived()) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return 1; }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return this->innerSize(); }
|
||||
|
||||
/////////// Geometry module ///////////
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC explicit Matrix(const RotationBase<OtherDerived, ColsAtCompileTime>& r);
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Matrix& operator=(const RotationBase<OtherDerived, ColsAtCompileTime>& r);
|
||||
|
||||
// allow to extend Matrix outside Eigen
|
||||
#ifdef EIGEN_MATRIX_PLUGIN
|
||||
#include EIGEN_MATRIX_PLUGIN
|
||||
#endif
|
||||
|
||||
protected:
|
||||
template <typename Derived, typename OtherDerived, bool IsVector>
|
||||
friend struct internal::conservative_resize_like_impl;
|
||||
|
||||
using Base::m_storage;
|
||||
};
|
||||
|
||||
/** \defgroup matrixtypedefs Global matrix typedefs
|
||||
*
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* %Eigen defines several typedef shortcuts for most common matrix and vector types.
|
||||
*
|
||||
* The general patterns are the following:
|
||||
*
|
||||
* \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
|
||||
* and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
|
||||
* for complex double.
|
||||
*
|
||||
* For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of
|
||||
* floats.
|
||||
*
|
||||
* There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
|
||||
* a fixed-size vector of 4 complex floats.
|
||||
*
|
||||
* With \cpp11, template alias are also defined for common sizes.
|
||||
* They follow the same pattern as above except that the scalar type suffix is replaced by a
|
||||
* template parameter, i.e.:
|
||||
* - `MatrixSize<Type>` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size.
|
||||
* - `MatrixXSize<Type>` and `MatrixSizeX<Type>` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices.
|
||||
* - `VectorSize<Type>` and `RowVectorSize<Type>` for column and row vectors.
|
||||
*
|
||||
* With \cpp11, you can also use fully generic column and row vector types: `Vector<Type,Size>` and
|
||||
* `RowVector<Type,Size>`.
|
||||
*
|
||||
* \sa class Matrix
|
||||
*/
|
||||
|
||||
#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief `Size`×`Size` matrix of type `Type`. */ \
|
||||
typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief `Size`×`1` vector of type `Type`. */ \
|
||||
typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief `1`×`Size` vector of type `Type`. */ \
|
||||
typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
|
||||
|
||||
#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief `Size`×`Dynamic` matrix of type `Type`. */ \
|
||||
typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief `Dynamic`×`Size` matrix of type `Type`. */ \
|
||||
typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;
|
||||
|
||||
#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
|
||||
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
|
||||
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
|
||||
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
|
||||
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
|
||||
|
||||
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i)
|
||||
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f)
|
||||
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d)
|
||||
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
|
||||
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
|
||||
|
||||
#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
|
||||
#undef EIGEN_MAKE_TYPEDEFS
|
||||
#undef EIGEN_MAKE_FIXED_TYPEDEFS
|
||||
|
||||
#define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief \cpp11 `Size`×`Size` matrix of type `Type`.*/ \
|
||||
template <typename Type> \
|
||||
using Matrix##SizeSuffix = Matrix<Type, Size, Size>; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief \cpp11 `Size`×`1` vector of type `Type`.*/ \
|
||||
template <typename Type> \
|
||||
using Vector##SizeSuffix = Matrix<Type, Size, 1>; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief \cpp11 `1`×`Size` vector of type `Type`.*/ \
|
||||
template <typename Type> \
|
||||
using RowVector##SizeSuffix = Matrix<Type, 1, Size>;
|
||||
|
||||
#define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief \cpp11 `Size`×`Dynamic` matrix of type `Type` */ \
|
||||
template <typename Type> \
|
||||
using Matrix##Size##X = Matrix<Type, Size, Dynamic>; \
|
||||
/** \ingroup matrixtypedefs */ \
|
||||
/** \brief \cpp11 `Dynamic`×`Size` matrix of type `Type`. */ \
|
||||
template <typename Type> \
|
||||
using Matrix##X##Size = Matrix<Type, Dynamic, Size>;
|
||||
|
||||
EIGEN_MAKE_TYPEDEFS(2, 2)
|
||||
EIGEN_MAKE_TYPEDEFS(3, 3)
|
||||
EIGEN_MAKE_TYPEDEFS(4, 4)
|
||||
EIGEN_MAKE_TYPEDEFS(Dynamic, X)
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(2)
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(3)
|
||||
EIGEN_MAKE_FIXED_TYPEDEFS(4)
|
||||
|
||||
/** \ingroup matrixtypedefs
|
||||
* \brief \cpp11 `Size`×`1` vector of type `Type`. */
|
||||
template <typename Type, int Size>
|
||||
using Vector = Matrix<Type, Size, 1>;
|
||||
|
||||
/** \ingroup matrixtypedefs
|
||||
* \brief \cpp11 `1`×`Size` vector of type `Type`. */
|
||||
template <typename Type, int Size>
|
||||
using RowVector = Matrix<Type, 1, Size>;
|
||||
|
||||
#undef EIGEN_MAKE_TYPEDEFS
|
||||
#undef EIGEN_MAKE_FIXED_TYPEDEFS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MATRIX_H
|
||||
542
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MatrixBase.h
Normal file
542
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/MatrixBase.h
Normal file
@@ -0,0 +1,542 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MATRIXBASE_H
|
||||
#define EIGEN_MATRIXBASE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class MatrixBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for all dense matrices, vectors, and expressions
|
||||
*
|
||||
* This class is the base that is inherited by all matrix, vector, and related expression
|
||||
* types. Most of the Eigen API is contained in this class, and its base classes. Other important
|
||||
* classes for the Eigen API are Matrix, and VectorwiseOp.
|
||||
*
|
||||
* Note that some methods are defined in other modules such as the \ref LU_Module LU module
|
||||
* for all functions related to matrix inversions.
|
||||
*
|
||||
* \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
|
||||
*
|
||||
* When writing a function taking Eigen objects as argument, if you want your function
|
||||
* to take as argument any matrix, vector, or expression, just let it take a
|
||||
* MatrixBase argument. As an example, here is a function printFirstRow which, given
|
||||
* a matrix, vector, or expression \a x, prints the first row of \a x.
|
||||
*
|
||||
* \code
|
||||
template<typename Derived>
|
||||
void printFirstRow(const Eigen::MatrixBase<Derived>& x)
|
||||
{
|
||||
cout << x.row(0) << endl;
|
||||
}
|
||||
* \endcode
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
|
||||
*
|
||||
* \sa \blank \ref TopicClassHierarchy
|
||||
*/
|
||||
template <typename Derived>
|
||||
class MatrixBase : public DenseBase<Derived> {
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef MatrixBase StorageBaseType;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
typedef DenseBase<Derived> Base;
|
||||
using Base::ColsAtCompileTime;
|
||||
using Base::Flags;
|
||||
using Base::IsVectorAtCompileTime;
|
||||
using Base::MaxColsAtCompileTime;
|
||||
using Base::MaxRowsAtCompileTime;
|
||||
using Base::MaxSizeAtCompileTime;
|
||||
using Base::RowsAtCompileTime;
|
||||
using Base::SizeAtCompileTime;
|
||||
|
||||
using Base::coeff;
|
||||
using Base::coeffRef;
|
||||
using Base::cols;
|
||||
using Base::const_cast_derived;
|
||||
using Base::derived;
|
||||
using Base::eval;
|
||||
using Base::lazyAssign;
|
||||
using Base::rows;
|
||||
using Base::size;
|
||||
using Base::operator-;
|
||||
using Base::operator+=;
|
||||
using Base::operator-=;
|
||||
using Base::operator*=;
|
||||
using Base::operator/=;
|
||||
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
|
||||
typedef typename Base::RowXpr RowXpr;
|
||||
typedef typename Base::ColXpr ColXpr;
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** type of the equivalent square matrix */
|
||||
typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
|
||||
internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)>
|
||||
SquareMatrixType;
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \returns the size of the main diagonal, which is min(rows(),cols()).
|
||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||
EIGEN_DEVICE_FUNC inline Index diagonalSize() const { return (numext::mini)(rows(), cols()); }
|
||||
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
|
||||
/** \internal the return type of MatrixBase::adjoint() */
|
||||
typedef std::conditional_t<NumTraits<Scalar>::IsComplex,
|
||||
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
|
||||
ConstTransposeReturnType>
|
||||
AdjointReturnType;
|
||||
/** \internal Return type of eigenvalues() */
|
||||
typedef Matrix<internal::make_complex_t<Scalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor>
|
||||
EigenvaluesReturnType;
|
||||
/** \internal the return type of identity */
|
||||
typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>, PlainObject> IdentityReturnType;
|
||||
/** \internal the return type of unit vectors */
|
||||
typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
|
||||
internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>
|
||||
BasisReturnType;
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
|
||||
#define EIGEN_DOC_UNARY_ADDONS(X, Y)
|
||||
#include "../plugins/CommonCwiseBinaryOps.inc"
|
||||
#include "../plugins/MatrixCwiseUnaryOps.inc"
|
||||
#include "../plugins/MatrixCwiseBinaryOps.inc"
|
||||
#ifdef EIGEN_MATRIXBASE_PLUGIN
|
||||
#include EIGEN_MATRIXBASE_PLUGIN
|
||||
#endif
|
||||
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
|
||||
#undef EIGEN_DOC_UNARY_ADDONS
|
||||
|
||||
/** Special case of the template operator=, in order to prevent the compiler
|
||||
* from generating a default operator= (issue hit with g++ 4.1)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const MatrixBase& other);
|
||||
|
||||
// We cannot inherit here via Base::operator= since it is causing
|
||||
// trouble with MSVC.
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const MatrixBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<Derived, OtherDerived> operator*(const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<Derived, OtherDerived, LazyProduct> lazyProduct(
|
||||
const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
template <typename OtherDerived>
|
||||
Derived& operator*=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
void applyOnTheLeft(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
void applyOnTheRight(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template <typename DiagonalDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<Derived, DiagonalDerived, LazyProduct> operator*(
|
||||
const DiagonalBase<DiagonalDerived>& diagonal) const;
|
||||
|
||||
template <typename SkewDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<Derived, SkewDerived, LazyProduct> operator*(
|
||||
const SkewSymmetricBase<SkewDerived>& skew) const;
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,
|
||||
typename internal::traits<OtherDerived>::Scalar>::ReturnType
|
||||
dot(const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC RealScalar squaredNorm() const;
|
||||
EIGEN_DEVICE_FUNC RealScalar norm() const;
|
||||
RealScalar stableNorm() const;
|
||||
RealScalar blueNorm() const;
|
||||
RealScalar hypotNorm() const;
|
||||
EIGEN_DEVICE_FUNC const PlainObject normalized() const;
|
||||
EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const;
|
||||
EIGEN_DEVICE_FUNC void normalize();
|
||||
EIGEN_DEVICE_FUNC void stableNormalize();
|
||||
|
||||
EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const;
|
||||
EIGEN_DEVICE_FUNC void adjointInPlace();
|
||||
|
||||
typedef Diagonal<Derived> DiagonalReturnType;
|
||||
EIGEN_DEVICE_FUNC DiagonalReturnType diagonal();
|
||||
|
||||
typedef Diagonal<const Derived> ConstDiagonalReturnType;
|
||||
EIGEN_DEVICE_FUNC const ConstDiagonalReturnType diagonal() const;
|
||||
|
||||
template <int Index>
|
||||
EIGEN_DEVICE_FUNC Diagonal<Derived, Index> diagonal();
|
||||
|
||||
template <int Index>
|
||||
EIGEN_DEVICE_FUNC const Diagonal<const Derived, Index> diagonal() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC Diagonal<Derived, DynamicIndex> diagonal(Index index);
|
||||
EIGEN_DEVICE_FUNC const Diagonal<const Derived, DynamicIndex> diagonal(Index index) const;
|
||||
|
||||
template <unsigned int Mode>
|
||||
struct TriangularViewReturnType {
|
||||
typedef TriangularView<Derived, Mode> Type;
|
||||
};
|
||||
template <unsigned int Mode>
|
||||
struct ConstTriangularViewReturnType {
|
||||
typedef const TriangularView<const Derived, Mode> Type;
|
||||
};
|
||||
|
||||
template <unsigned int Mode>
|
||||
EIGEN_DEVICE_FUNC typename TriangularViewReturnType<Mode>::Type triangularView();
|
||||
template <unsigned int Mode>
|
||||
EIGEN_DEVICE_FUNC typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
|
||||
|
||||
template <unsigned int UpLo>
|
||||
struct SelfAdjointViewReturnType {
|
||||
typedef SelfAdjointView<Derived, UpLo> Type;
|
||||
};
|
||||
template <unsigned int UpLo>
|
||||
struct ConstSelfAdjointViewReturnType {
|
||||
typedef const SelfAdjointView<const Derived, UpLo> Type;
|
||||
};
|
||||
|
||||
template <unsigned int UpLo>
|
||||
EIGEN_DEVICE_FUNC typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
|
||||
template <unsigned int UpLo>
|
||||
EIGEN_DEVICE_FUNC typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
|
||||
|
||||
const SparseView<Derived> sparseView(
|
||||
const Scalar& m_reference = Scalar(0),
|
||||
const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
|
||||
EIGEN_DEVICE_FUNC static const IdentityReturnType Identity();
|
||||
EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols);
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i);
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i);
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType UnitX();
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType UnitY();
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ();
|
||||
EIGEN_DEVICE_FUNC static const BasisReturnType UnitW();
|
||||
|
||||
EIGEN_DEVICE_FUNC const DiagonalWrapper<const Derived> asDiagonal() const;
|
||||
const PermutationWrapper<const Derived> asPermutation() const;
|
||||
EIGEN_DEVICE_FUNC const SkewSymmetricWrapper<const Derived> asSkewSymmetric() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC Derived& setIdentity();
|
||||
EIGEN_DEVICE_FUNC Derived& setIdentity(Index rows, Index cols);
|
||||
EIGEN_DEVICE_FUNC Derived& setUnit(Index i);
|
||||
EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i);
|
||||
|
||||
bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
bool isSkewSymmetric(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
template <typename OtherDerived>
|
||||
bool isOrthogonal(const MatrixBase<OtherDerived>& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
/** \returns true if each coefficients of \c *this and \a other are all exactly equal.
|
||||
* \warning When using floating point scalar values you probably should rather use a
|
||||
* fuzzy comparison such as isApprox()
|
||||
* \sa isApprox(), operator!= */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase<OtherDerived>& other) const {
|
||||
return (this->rows() == other.rows()) && (this->cols() == other.cols()) && cwiseEqual(other).all();
|
||||
}
|
||||
|
||||
/** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
|
||||
* \warning When using floating point scalar values you probably should rather use a
|
||||
* fuzzy comparison such as isApprox()
|
||||
* \sa isApprox(), operator== */
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase<OtherDerived>& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
NoAlias<Derived, Eigen::MatrixBase> EIGEN_DEVICE_FUNC noalias();
|
||||
|
||||
// TODO forceAlignedAccess is temporarily disabled
|
||||
// Need to find a nicer workaround.
|
||||
inline const Derived& forceAlignedAccess() const { return derived(); }
|
||||
inline Derived& forceAlignedAccess() { return derived(); }
|
||||
template <bool Enable>
|
||||
inline const Derived& forceAlignedAccessIf() const {
|
||||
return derived();
|
||||
}
|
||||
template <bool Enable>
|
||||
inline Derived& forceAlignedAccessIf() {
|
||||
return derived();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC Scalar trace() const;
|
||||
|
||||
template <int p>
|
||||
EIGEN_DEVICE_FUNC RealScalar lpNorm() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC MatrixBase<Derived>& matrix() { return *this; }
|
||||
EIGEN_DEVICE_FUNC const MatrixBase<Derived>& matrix() const { return *this; }
|
||||
|
||||
/** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
|
||||
* \sa ArrayBase::matrix() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper<Derived> array() { return ArrayWrapper<Derived>(derived()); }
|
||||
/** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix
|
||||
* \sa ArrayBase::matrix() */
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper<const Derived> array() const {
|
||||
return ArrayWrapper<const Derived>(derived());
|
||||
}
|
||||
|
||||
/////////// LU module ///////////
|
||||
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const FullPivLU<PlainObject, PermutationIndex> fullPivLu() const;
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const PartialPivLU<PlainObject, PermutationIndex> partialPivLu() const;
|
||||
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const PartialPivLU<PlainObject, PermutationIndex> lu() const;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline const Inverse<Derived> inverse() const;
|
||||
|
||||
template <typename ResultType>
|
||||
inline void computeInverseAndDetWithCheck(
|
||||
ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible,
|
||||
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
template <typename ResultType>
|
||||
inline void computeInverseWithCheck(
|
||||
ResultType& inverse, bool& invertible,
|
||||
const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC Scalar determinant() const;
|
||||
|
||||
/////////// Cholesky module ///////////
|
||||
|
||||
inline const LLT<PlainObject> llt() const;
|
||||
inline const LDLT<PlainObject> ldlt() const;
|
||||
|
||||
/////////// QR module ///////////
|
||||
|
||||
inline const HouseholderQR<PlainObject> householderQr() const;
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const ColPivHouseholderQR<PlainObject, PermutationIndex> colPivHouseholderQr() const;
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const FullPivHouseholderQR<PlainObject, PermutationIndex> fullPivHouseholderQr() const;
|
||||
template <typename PermutationIndex = DefaultPermutationIndex>
|
||||
inline const CompleteOrthogonalDecomposition<PlainObject, PermutationIndex> completeOrthogonalDecomposition() const;
|
||||
|
||||
/////////// Eigenvalues module ///////////
|
||||
|
||||
inline EigenvaluesReturnType eigenvalues() const;
|
||||
inline RealScalar operatorNorm() const;
|
||||
|
||||
/////////// SVD module ///////////
|
||||
|
||||
template <int Options = 0>
|
||||
inline JacobiSVD<PlainObject, Options> jacobiSvd() const;
|
||||
template <int Options = 0>
|
||||
EIGEN_DEPRECATED inline JacobiSVD<PlainObject, Options> jacobiSvd(unsigned int computationOptions) const;
|
||||
|
||||
template <int Options = 0>
|
||||
inline BDCSVD<PlainObject, Options> bdcSvd() const;
|
||||
template <int Options = 0>
|
||||
EIGEN_DEPRECATED inline BDCSVD<PlainObject, Options> bdcSvd(unsigned int computationOptions) const;
|
||||
|
||||
/////////// Geometry module ///////////
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline typename internal::cross_impl<Derived, OtherDerived>::return_type cross(
|
||||
const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC inline PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline PlainObject unitOrthogonal(void) const;
|
||||
|
||||
EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline Matrix<Scalar, 3, 1> eulerAngles(Index a0, Index a1, Index a2) const;
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Matrix<Scalar, 3, 1> canonicalEulerAngles(Index a0, Index a1, Index a2) const;
|
||||
|
||||
// put this as separate enum value to work around possible GCC 4.3 bug (?)
|
||||
enum {
|
||||
HomogeneousReturnTypeDirection =
|
||||
ColsAtCompileTime == 1 && RowsAtCompileTime == 1
|
||||
? ((internal::traits<Derived>::Flags & RowMajorBit) == RowMajorBit ? Horizontal : Vertical)
|
||||
: ColsAtCompileTime == 1 ? Vertical
|
||||
: Horizontal
|
||||
};
|
||||
typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
|
||||
EIGEN_DEVICE_FUNC inline HomogeneousReturnType homogeneous() const;
|
||||
|
||||
enum { SizeMinusOne = SizeAtCompileTime == Dynamic ? Dynamic : SizeAtCompileTime - 1 };
|
||||
typedef Block<const Derived, internal::traits<Derived>::ColsAtCompileTime == 1 ? SizeMinusOne : 1,
|
||||
internal::traits<Derived>::ColsAtCompileTime == 1 ? 1 : SizeMinusOne>
|
||||
ConstStartMinusOne;
|
||||
typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne, Scalar, quotient) HNormalizedReturnType;
|
||||
EIGEN_DEVICE_FUNC inline const HNormalizedReturnType hnormalized() const;
|
||||
|
||||
////////// Householder module ///////////
|
||||
|
||||
EIGEN_DEVICE_FUNC void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
|
||||
template <typename EssentialPart>
|
||||
EIGEN_DEVICE_FUNC void makeHouseholder(EssentialPart& essential, Scalar& tau, RealScalar& beta) const;
|
||||
template <typename EssentialPart>
|
||||
EIGEN_DEVICE_FUNC void applyHouseholderOnTheLeft(const EssentialPart& essential, const Scalar& tau,
|
||||
Scalar* workspace);
|
||||
template <typename EssentialPart>
|
||||
EIGEN_DEVICE_FUNC void applyHouseholderOnTheRight(const EssentialPart& essential, const Scalar& tau,
|
||||
Scalar* workspace);
|
||||
|
||||
///////// Jacobi module /////////
|
||||
|
||||
template <typename OtherScalar>
|
||||
EIGEN_DEVICE_FUNC void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
||||
template <typename OtherScalar>
|
||||
EIGEN_DEVICE_FUNC void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
||||
|
||||
///////// SparseCore module /////////
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
|
||||
cwiseProduct(const SparseMatrixBase<OtherDerived>& other) const {
|
||||
return other.cwiseProduct(derived());
|
||||
}
|
||||
|
||||
///////// MatrixFunctions module /////////
|
||||
|
||||
typedef typename internal::stem_function<Scalar>::type StemFunction;
|
||||
#define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \
|
||||
/** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
|
||||
* href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
|
||||
* coefficient-wise Description use ArrayBase::##Name . */ \
|
||||
const ReturnType<Derived> Name() const;
|
||||
#define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \
|
||||
/** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
|
||||
* href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
|
||||
* coefficient-wise Description use ArrayBase::##Name . */ \
|
||||
const ReturnType<Derived> Name(Argument) const;
|
||||
|
||||
EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential)
|
||||
/** \brief Helper function for the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported
|
||||
* MatrixFunctions module</a>.*/
|
||||
const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root)
|
||||
EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm)
|
||||
EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p)
|
||||
EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const internal::make_complex_t<Scalar>& p)
|
||||
|
||||
protected:
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase)
|
||||
EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase)
|
||||
|
||||
private:
|
||||
EIGEN_DEVICE_FUNC explicit MatrixBase(int);
|
||||
EIGEN_DEVICE_FUNC MatrixBase(int, int);
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase<OtherDerived>&);
|
||||
|
||||
protected:
|
||||
// mixing arrays and matrices is not legal
|
||||
template <typename OtherDerived>
|
||||
Derived& operator+=(const ArrayBase<OtherDerived>&) {
|
||||
EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
|
||||
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
|
||||
return *this;
|
||||
}
|
||||
// mixing arrays and matrices is not legal
|
||||
template <typename OtherDerived>
|
||||
Derived& operator-=(const ArrayBase<OtherDerived>&) {
|
||||
EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
|
||||
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of matrix base methods
|
||||
***************************************************************************/
|
||||
|
||||
/** replaces \c *this by \c *this * \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheRight.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheRight.out
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
inline Derived& MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived>& other) {
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheRight.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheRight.out
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived>& other) {
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
}
|
||||
|
||||
/** replaces \c *this by \a other * \c *this.
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheLeft.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheLeft.out
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived>& other) {
|
||||
other.derived().applyThisOnTheLeft(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MATRIXBASE_H
|
||||
@@ -0,0 +1,91 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_NESTBYVALUE_H
|
||||
#define EIGEN_NESTBYVALUE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename ExpressionType>
|
||||
struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType> {
|
||||
enum { Flags = traits<ExpressionType>::Flags & ~NestByRefBit };
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class NestByValue
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression which must be nested by value
|
||||
*
|
||||
* \tparam ExpressionType the type of the object of which we are requiring nesting-by-value
|
||||
*
|
||||
* This class is the return type of MatrixBase::nestByValue()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::nestByValue()
|
||||
*/
|
||||
template <typename ExpressionType>
|
||||
class NestByValue : public internal::dense_xpr_base<NestByValue<ExpressionType> >::type {
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<NestByValue>::type Base;
|
||||
static constexpr bool HasDirectAccess = internal::has_direct_access<ExpressionType>::ret;
|
||||
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return m_expression.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return m_expression.cols(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
|
||||
|
||||
EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; }
|
||||
|
||||
EIGEN_DEVICE_FUNC typename std::enable_if<HasDirectAccess, const Scalar*>::type data() const {
|
||||
return m_expression.data();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC typename std::enable_if<HasDirectAccess, Index>::type innerStride() const {
|
||||
return m_expression.innerStride();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC typename std::enable_if<HasDirectAccess, Index>::type outerStride() const {
|
||||
return m_expression.outerStride();
|
||||
}
|
||||
|
||||
protected:
|
||||
const ExpressionType m_expression;
|
||||
};
|
||||
|
||||
/** \returns an expression of the temporary version of *this.
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline const NestByValue<Derived> DenseBase<Derived>::nestByValue() const {
|
||||
return NestByValue<Derived>(derived());
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Evaluator of Solve -> eval into a temporary
|
||||
template <typename ArgType>
|
||||
struct evaluator<NestByValue<ArgType> > : public evaluator<ArgType> {
|
||||
typedef evaluator<ArgType> Base;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue<ArgType>& xpr) : Base(xpr.nestedExpression()) {}
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_NESTBYVALUE_H
|
||||
102
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/NoAlias.h
Normal file
102
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/NoAlias.h
Normal file
@@ -0,0 +1,102 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_NOALIAS_H
|
||||
#define EIGEN_NOALIAS_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class NoAlias
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Pseudo expression providing an operator = assuming no aliasing
|
||||
*
|
||||
* \tparam ExpressionType the type of the object on which to do the lazy assignment
|
||||
*
|
||||
* This class represents an expression with special assignment operators
|
||||
* assuming no aliasing between the target expression and the source expression.
|
||||
* More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
|
||||
* It is the return type of MatrixBase::noalias()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::noalias()
|
||||
*/
|
||||
template <typename ExpressionType, template <typename> class StorageBase>
|
||||
class NoAlias {
|
||||
public:
|
||||
typedef typename ExpressionType::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other) {
|
||||
call_assignment_no_alias(m_expression, other.derived(),
|
||||
internal::assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other) {
|
||||
call_assignment_no_alias(m_expression, other.derived(),
|
||||
internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other) {
|
||||
call_assignment_no_alias(m_expression, other.derived(),
|
||||
internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC ExpressionType& expression() const { return m_expression; }
|
||||
|
||||
protected:
|
||||
ExpressionType& m_expression;
|
||||
};
|
||||
|
||||
/** \returns a pseudo expression of \c *this with an operator= assuming
|
||||
* no aliasing between \c *this and the source expression.
|
||||
*
|
||||
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
|
||||
* Currently, even though several expressions may alias, only product
|
||||
* expressions have this flag. Therefore, noalias() is only useful when
|
||||
* the source expression contains a matrix product.
|
||||
*
|
||||
* Here are some examples where noalias is useful:
|
||||
* \code
|
||||
* D.noalias() = A * B;
|
||||
* D.noalias() += A.transpose() * B;
|
||||
* D.noalias() -= 2 * A * B.adjoint();
|
||||
* \endcode
|
||||
*
|
||||
* On the other hand the following example will lead to a \b wrong result:
|
||||
* \code
|
||||
* A.noalias() = A * B;
|
||||
* \endcode
|
||||
* because the result matrix A is also an operand of the matrix product. Therefore,
|
||||
* there is no alternative than evaluating A * B in a temporary, that is the default
|
||||
* behavior when you write:
|
||||
* \code
|
||||
* A = A * B;
|
||||
* \endcode
|
||||
*
|
||||
* \sa class NoAlias
|
||||
*/
|
||||
template <typename Derived>
|
||||
NoAlias<Derived, MatrixBase> EIGEN_DEVICE_FUNC MatrixBase<Derived>::noalias() {
|
||||
return NoAlias<Derived, Eigen::MatrixBase>(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_NOALIAS_H
|
||||
335
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/NumTraits.h
Normal file
335
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/NumTraits.h
Normal file
@@ -0,0 +1,335 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_NUMTRAITS_H
|
||||
#define EIGEN_NUMTRAITS_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// default implementation of digits(), based on numeric_limits if specialized,
|
||||
// 0 for integer types, and log2(epsilon()) otherwise.
|
||||
template <typename T, bool use_numeric_limits = std::numeric_limits<T>::is_specialized,
|
||||
bool is_integer = NumTraits<T>::IsInteger>
|
||||
struct default_digits_impl {
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return std::numeric_limits<T>::digits; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_digits_impl<T, false, false> // Floating point
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() {
|
||||
using std::ceil;
|
||||
using std::log2;
|
||||
typedef typename NumTraits<T>::Real Real;
|
||||
return int(ceil(-log2(NumTraits<Real>::epsilon())));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_digits_impl<T, false, true> // Integer
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return 0; }
|
||||
};
|
||||
|
||||
// default implementation of digits10(), based on numeric_limits if specialized,
|
||||
// 0 for integer types, and floor((digits()-1)*log10(2)) otherwise.
|
||||
template <typename T, bool use_numeric_limits = std::numeric_limits<T>::is_specialized,
|
||||
bool is_integer = NumTraits<T>::IsInteger>
|
||||
struct default_digits10_impl {
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return std::numeric_limits<T>::digits10; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_digits10_impl<T, false, false> // Floating point
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() {
|
||||
using std::floor;
|
||||
using std::log10;
|
||||
typedef typename NumTraits<T>::Real Real;
|
||||
return int(floor((internal::default_digits_impl<Real>::run() - 1) * log10(2)));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_digits10_impl<T, false, true> // Integer
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return 0; }
|
||||
};
|
||||
|
||||
// default implementation of max_digits10(), based on numeric_limits if specialized,
|
||||
// 0 for integer types, and log10(2) * digits() + 1 otherwise.
|
||||
template <typename T, bool use_numeric_limits = std::numeric_limits<T>::is_specialized,
|
||||
bool is_integer = NumTraits<T>::IsInteger>
|
||||
struct default_max_digits10_impl {
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return std::numeric_limits<T>::max_digits10; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_max_digits10_impl<T, false, false> // Floating point
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() {
|
||||
using std::ceil;
|
||||
using std::log10;
|
||||
typedef typename NumTraits<T>::Real Real;
|
||||
return int(ceil(internal::default_digits_impl<Real>::run() * log10(2) + 1));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct default_max_digits10_impl<T, false, true> // Integer
|
||||
{
|
||||
EIGEN_DEVICE_FUNC constexpr static int run() { return 0; }
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
namespace numext {
|
||||
|
||||
/** \internal bit-wise cast without changing the underlying bit representation. */
|
||||
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
|
||||
template <typename Tgt, typename Src>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Tgt bit_cast(const Src& src) {
|
||||
return std::bit_cast<Tgt>(src);
|
||||
}
|
||||
#elif EIGEN_HAS_BUILTIN(__builtin_bit_cast)
|
||||
template <typename Tgt, typename Src>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Tgt bit_cast(const Src& src) {
|
||||
EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Src>::value, THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Tgt>::value, THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
return __builtin_bit_cast(Tgt, src);
|
||||
}
|
||||
#else
|
||||
template <typename Tgt, typename Src>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) {
|
||||
// The behaviour of memcpy is not specified for non-trivially copyable types
|
||||
EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Src>::value, THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Tgt>::value && std::is_default_constructible<Tgt>::value,
|
||||
THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED)
|
||||
|
||||
Tgt tgt;
|
||||
// Load src into registers first. This allows the memcpy to be elided by CUDA.
|
||||
const Src staged = src;
|
||||
EIGEN_USING_STD(memcpy)
|
||||
memcpy(static_cast<void*>(&tgt), static_cast<const void*>(&staged), sizeof(Tgt));
|
||||
return tgt;
|
||||
}
|
||||
#endif
|
||||
} // namespace numext
|
||||
|
||||
// clang-format off
|
||||
/** \class NumTraits
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
|
||||
*
|
||||
* \tparam T the numeric type at hand
|
||||
*
|
||||
* This class stores enums, typedefs and static methods giving information about a numeric type.
|
||||
*
|
||||
* The provided data consists of:
|
||||
* \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real,
|
||||
* then \c Real is just a typedef to \a T. If \a T is `std::complex<U>` then \c Real
|
||||
* is a typedef to \a U.
|
||||
* \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values,
|
||||
* such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives
|
||||
* \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to
|
||||
* take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is
|
||||
* only intended as a helper for code that needs to explicitly promote types.
|
||||
* \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for
|
||||
* `std::complex<U>`, Literal is defined as \a U. Of course, this type must be fully compatible with \a T. In doubt,
|
||||
* just use \a T here.
|
||||
* \li A typedef \c Nested giving the type to use to nest a value inside of the expression tree. If you don't know what
|
||||
* this means, just use \a T here.
|
||||
* \li An enum value \c IsComplex. It is equal to 1 if \a T is a \c std::complex type, and to 0 otherwise.
|
||||
* \li An enum value \c IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, and to \c 0 otherwise.
|
||||
* \li Enum values \c ReadCost, \c AddCost and \c MulCost representing a rough estimate of the number of CPU cycles needed to by
|
||||
* move / add / mul instructions respectively, assuming the data is already stored in CPU registers. Stay vague here.
|
||||
* No need to do architecture-specific stuff. If you don't know what this means, just use \c Eigen::HugeCost.
|
||||
* \li An enum value \c IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned.
|
||||
* \li An enum value \c RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must be
|
||||
* called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise.
|
||||
* \li An epsilon() function which, unlike <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/epsilon">
|
||||
* `std::numeric_limits::epsilon()`</a>, it returns a \c Real instead of a \a T.
|
||||
* \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default value by the fuzzy
|
||||
* comparison operators.
|
||||
* \li highest() and lowest() functions returning the highest and lowest possible values respectively.
|
||||
* \li digits() function returning the number of radix digits (non-sign digits for integers, mantissa for floating-point).
|
||||
* This is the analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits">
|
||||
* `std::numeric_limits<T>::digits`</a> which is used as the default implementation if specialized.
|
||||
* \li digits10() function returning the number of decimal digits that can be represented without change. This is the
|
||||
* analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits10">
|
||||
* `std::numeric_limits<T>::digits10`</a> which is used as the default implementation if specialized.
|
||||
* \li max_digits10() function returning the number of decimal digits required to uniquely represent all distinct values
|
||||
* of the type. This is the analogue of <a
|
||||
* href="http://en.cppreference.com/w/cpp/types/numeric_limits/max_digits10">`std::numeric_limits<T>::max_digits10`</a>
|
||||
* which is used as the default implementation if specialized.
|
||||
* \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively,
|
||||
* such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent
|
||||
* to <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/min_exponent">
|
||||
* `std::numeric_limits<T>::min_exponent`</a>/<a
|
||||
* href="http://en.cppreference.com/w/cpp/types/numeric_limits/max_exponent">`std::numeric_limits<T>::max_exponent`</a>.
|
||||
* \li infinity() function returning a representation of positive infinity, if available.
|
||||
* \li quiet_NaN() function returning a non-signaling "not-a-number", if available.
|
||||
*/
|
||||
// clang-format on
|
||||
template <typename T>
|
||||
struct GenericNumTraits {
|
||||
enum {
|
||||
IsInteger = std::numeric_limits<T>::is_integer,
|
||||
IsSigned = std::numeric_limits<T>::is_signed,
|
||||
IsComplex = 0,
|
||||
RequireInitialization = internal::is_arithmetic<T>::value ? 0 : 1,
|
||||
ReadCost = 1,
|
||||
AddCost = 1,
|
||||
MulCost = 1
|
||||
};
|
||||
|
||||
typedef T Real;
|
||||
typedef std::conditional_t<IsInteger, std::conditional_t<sizeof(T) <= 2, float, double>, T> NonInteger;
|
||||
typedef T Nested;
|
||||
typedef T Literal;
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static Real epsilon() { return numext::numeric_limits<T>::epsilon(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static int digits10() { return internal::default_digits10_impl<T>::run(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static int max_digits10() { return internal::default_max_digits10_impl<T>::run(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static int digits() { return internal::default_digits_impl<T>::run(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static int min_exponent() { return numext::numeric_limits<T>::min_exponent; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static int max_exponent() { return numext::numeric_limits<T>::max_exponent; }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static Real dummy_precision() {
|
||||
// make sure to override this for floating-point types
|
||||
return Real(0);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static T highest() { return (numext::numeric_limits<T>::max)(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static T lowest() { return (numext::numeric_limits<T>::lowest)(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static T infinity() { return numext::numeric_limits<T>::infinity(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static T quiet_NaN() { return numext::numeric_limits<T>::quiet_NaN(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct NumTraits : GenericNumTraits<T> {};
|
||||
|
||||
template <>
|
||||
struct NumTraits<float> : GenericNumTraits<float> {
|
||||
EIGEN_DEVICE_FUNC constexpr static float dummy_precision() { return 1e-5f; }
|
||||
};
|
||||
|
||||
template <>
|
||||
struct NumTraits<double> : GenericNumTraits<double> {
|
||||
EIGEN_DEVICE_FUNC constexpr static double dummy_precision() { return 1e-12; }
|
||||
};
|
||||
|
||||
// GPU devices treat `long double` as `double`.
|
||||
#ifndef EIGEN_GPU_COMPILE_PHASE
|
||||
template <>
|
||||
struct NumTraits<long double> : GenericNumTraits<long double> {
|
||||
EIGEN_DEVICE_FUNC constexpr static long double dummy_precision() { return static_cast<long double>(1e-15l); }
|
||||
|
||||
#if defined(EIGEN_ARCH_PPC) && (__LDBL_MANT_DIG__ == 106)
|
||||
// PowerPC double double causes issues with some values
|
||||
EIGEN_DEVICE_FUNC constexpr static long double epsilon() {
|
||||
// 2^(-(__LDBL_MANT_DIG__)+1)
|
||||
return static_cast<long double>(2.4651903288156618919116517665087e-32l);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
template <typename Real_>
|
||||
struct NumTraits<std::complex<Real_> > : GenericNumTraits<std::complex<Real_> > {
|
||||
typedef Real_ Real;
|
||||
typedef typename NumTraits<Real_>::Literal Literal;
|
||||
enum {
|
||||
IsComplex = 1,
|
||||
IsSigned = NumTraits<Real_>::IsSigned,
|
||||
RequireInitialization = NumTraits<Real_>::RequireInitialization,
|
||||
ReadCost = 2 * NumTraits<Real_>::ReadCost,
|
||||
AddCost = 2 * NumTraits<Real>::AddCost,
|
||||
MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static Real epsilon() { return NumTraits<Real>::epsilon(); }
|
||||
EIGEN_DEVICE_FUNC constexpr static Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
|
||||
EIGEN_DEVICE_FUNC constexpr static int digits10() { return NumTraits<Real>::digits10(); }
|
||||
EIGEN_DEVICE_FUNC constexpr static int max_digits10() { return NumTraits<Real>::max_digits10(); }
|
||||
};
|
||||
|
||||
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
|
||||
struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > {
|
||||
typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> ArrayType;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef Array<RealScalar, Rows, Cols, Options, MaxRows, MaxCols> Real;
|
||||
typedef typename NumTraits<Scalar>::NonInteger NonIntegerScalar;
|
||||
typedef Array<NonIntegerScalar, Rows, Cols, Options, MaxRows, MaxCols> NonInteger;
|
||||
typedef ArrayType& Nested;
|
||||
typedef typename NumTraits<Scalar>::Literal Literal;
|
||||
|
||||
enum {
|
||||
IsComplex = NumTraits<Scalar>::IsComplex,
|
||||
IsInteger = NumTraits<Scalar>::IsInteger,
|
||||
IsSigned = NumTraits<Scalar>::IsSigned,
|
||||
RequireInitialization = 1,
|
||||
ReadCost = ArrayType::SizeAtCompileTime == Dynamic
|
||||
? HugeCost
|
||||
: ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::ReadCost),
|
||||
AddCost = ArrayType::SizeAtCompileTime == Dynamic ? HugeCost
|
||||
: ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::AddCost),
|
||||
MulCost = ArrayType::SizeAtCompileTime == Dynamic ? HugeCost
|
||||
: ArrayType::SizeAtCompileTime * int(NumTraits<Scalar>::MulCost)
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr static RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
|
||||
EIGEN_DEVICE_FUNC constexpr static RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
|
||||
|
||||
constexpr static int digits10() { return NumTraits<Scalar>::digits10(); }
|
||||
constexpr static int max_digits10() { return NumTraits<Scalar>::max_digits10(); }
|
||||
};
|
||||
|
||||
template <>
|
||||
struct NumTraits<std::string> : GenericNumTraits<std::string> {
|
||||
enum { RequireInitialization = 1, ReadCost = HugeCost, AddCost = HugeCost, MulCost = HugeCost };
|
||||
|
||||
constexpr static int digits10() { return 0; }
|
||||
constexpr static int max_digits10() { return 0; }
|
||||
|
||||
private:
|
||||
static inline std::string epsilon();
|
||||
static inline std::string dummy_precision();
|
||||
static inline std::string lowest();
|
||||
static inline std::string highest();
|
||||
static inline std::string infinity();
|
||||
static inline std::string quiet_NaN();
|
||||
};
|
||||
|
||||
// Empty specialization for void to allow template specialization based on NumTraits<T>::Real with T==void and SFINAE.
|
||||
template <>
|
||||
struct NumTraits<void> {};
|
||||
|
||||
template <>
|
||||
struct NumTraits<bool> : GenericNumTraits<bool> {};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_NUMTRAITS_H
|
||||
@@ -0,0 +1,253 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2011-2018 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_PARTIALREDUX_H
|
||||
#define EIGEN_PARTIALREDUX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/***************************************************************************
|
||||
*
|
||||
* This file provides evaluators for partial reductions.
|
||||
* There are two modes:
|
||||
*
|
||||
* - scalar path: simply calls the respective function on the column or row.
|
||||
* -> nothing special here, all the tricky part is handled by the return
|
||||
* types of VectorwiseOp's members. They embed the functor calling the
|
||||
* respective DenseBase's member function.
|
||||
*
|
||||
* - vectorized path: implements a packet-wise reductions followed by
|
||||
* some (optional) processing of the outcome, e.g., division by n for mean.
|
||||
*
|
||||
* For the vectorized path let's observe that the packet-size and outer-unrolling
|
||||
* are both decided by the assignment logic. So all we have to do is to decide
|
||||
* on the inner unrolling.
|
||||
*
|
||||
* For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h,
|
||||
* but be need to be careful to specify correct increment.
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
/* logic deciding a strategy for unrolling of vectorized paths */
|
||||
template <typename Func, typename Evaluator>
|
||||
struct packetwise_redux_traits {
|
||||
enum {
|
||||
OuterSize = int(Evaluator::IsRowMajor) ? Evaluator::RowsAtCompileTime : Evaluator::ColsAtCompileTime,
|
||||
Cost = OuterSize == Dynamic ? HugeCost
|
||||
: OuterSize * Evaluator::CoeffReadCost + (OuterSize - 1) * functor_traits<Func>::Cost,
|
||||
Unrolling = Cost <= EIGEN_UNROLLING_LIMIT ? CompleteUnrolling : NoUnrolling
|
||||
};
|
||||
};
|
||||
|
||||
/* Value to be returned when size==0 , by default let's return 0 */
|
||||
template <typename PacketType, typename Func>
|
||||
EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const Func&) {
|
||||
const typename unpacket_traits<PacketType>::type zero(0);
|
||||
return pset1<PacketType>(zero);
|
||||
}
|
||||
|
||||
/* For products the default is 1 */
|
||||
template <typename PacketType, typename Scalar>
|
||||
EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const scalar_product_op<Scalar, Scalar>&) {
|
||||
return pset1<PacketType>(Scalar(1));
|
||||
}
|
||||
|
||||
/* Perform the actual reduction */
|
||||
template <typename Func, typename Evaluator, int Unrolling = packetwise_redux_traits<Func, Evaluator>::Unrolling>
|
||||
struct packetwise_redux_impl;
|
||||
|
||||
/* Perform the actual reduction with unrolling */
|
||||
template <typename Func, typename Evaluator>
|
||||
struct packetwise_redux_impl<Func, Evaluator, CompleteUnrolling> {
|
||||
typedef redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func, Index /*size*/) {
|
||||
return redux_vec_unroller<Func, Evaluator, 0,
|
||||
packetwise_redux_traits<Func, Evaluator>::OuterSize>::template run<PacketType>(eval,
|
||||
func);
|
||||
}
|
||||
};
|
||||
|
||||
/* Add a specialization of redux_vec_unroller for size==0 at compiletime.
|
||||
* This specialization is not required for general reductions, which is
|
||||
* why it is defined here.
|
||||
*/
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_vec_unroller<Func, Evaluator, Start, 0> {
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator&, const Func& f) {
|
||||
return packetwise_redux_empty_value<PacketType>(f);
|
||||
}
|
||||
};
|
||||
|
||||
/* Perform the actual reduction for dynamic sizes */
|
||||
template <typename Func, typename Evaluator>
|
||||
struct packetwise_redux_impl<Func, Evaluator, NoUnrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
|
||||
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static PacketType run(const Evaluator& eval, const Func& func, Index size) {
|
||||
if (size == 0) return packetwise_redux_empty_value<PacketType>(func);
|
||||
|
||||
const Index size4 = 1 + numext::round_down(size - 1, 4);
|
||||
PacketType p = eval.template packetByOuterInner<Unaligned, PacketType>(0, 0);
|
||||
// This loop is optimized for instruction pipelining:
|
||||
// - each iteration generates two independent instructions
|
||||
// - thanks to branch prediction and out-of-order execution we have independent instructions across loops
|
||||
for (Index i = 1; i < size4; i += 4)
|
||||
p = func.packetOp(
|
||||
p, func.packetOp(func.packetOp(eval.template packetByOuterInner<Unaligned, PacketType>(i + 0, 0),
|
||||
eval.template packetByOuterInner<Unaligned, PacketType>(i + 1, 0)),
|
||||
func.packetOp(eval.template packetByOuterInner<Unaligned, PacketType>(i + 2, 0),
|
||||
eval.template packetByOuterInner<Unaligned, PacketType>(i + 3, 0))));
|
||||
for (Index i = size4; i < size; ++i)
|
||||
p = func.packetOp(p, eval.template packetByOuterInner<Unaligned, PacketType>(i, 0));
|
||||
return p;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct packetwise_segment_redux_impl {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
|
||||
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static PacketType run(const Evaluator& eval, const Func& func, Index size, Index begin,
|
||||
Index count) {
|
||||
if (size == 0) return packetwise_redux_empty_value<PacketType>(func);
|
||||
|
||||
PacketType p = eval.template packetSegmentByOuterInner<Unaligned, PacketType>(0, 0, begin, count);
|
||||
for (Index i = 1; i < size; ++i)
|
||||
p = func.packetOp(p, eval.template packetSegmentByOuterInner<Unaligned, PacketType>(i, 0, begin, count));
|
||||
return p;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ArgType, typename MemberOp, int Direction>
|
||||
struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
|
||||
: evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> > {
|
||||
typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
|
||||
typedef typename internal::nested_eval<ArgType, 1>::type ArgTypeNested;
|
||||
typedef add_const_on_value_type_t<ArgTypeNested> ConstArgTypeNested;
|
||||
typedef internal::remove_all_t<ArgTypeNested> ArgTypeNestedCleaned;
|
||||
typedef typename ArgType::Scalar InputScalar;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
enum {
|
||||
TraversalSize = Direction == int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
|
||||
};
|
||||
typedef typename MemberOp::template Cost<int(TraversalSize)> CostOpType;
|
||||
enum {
|
||||
CoeffReadCost = TraversalSize == Dynamic ? HugeCost
|
||||
: TraversalSize == 0
|
||||
? 1
|
||||
: int(TraversalSize) * int(evaluator<ArgType>::CoeffReadCost) + int(CostOpType::value),
|
||||
|
||||
ArgFlags_ = evaluator<ArgType>::Flags,
|
||||
|
||||
Vectorizable_ = bool(int(ArgFlags_) & PacketAccessBit) && bool(MemberOp::Vectorizable) &&
|
||||
(Direction == int(Vertical) ? bool(ArgFlags_ & RowMajorBit) : (ArgFlags_ & RowMajorBit) == 0) &&
|
||||
(TraversalSize != 0),
|
||||
|
||||
Flags = (traits<XprType>::Flags & RowMajorBit) | (evaluator<ArgType>::Flags & (HereditaryBits & (~RowMajorBit))) |
|
||||
(Vectorizable_ ? PacketAccessBit : 0) | LinearAccessBit,
|
||||
|
||||
Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) {
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize == Dynamic ? HugeCost
|
||||
: (TraversalSize == 0 ? 1 : int(CostOpType::value)));
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const {
|
||||
return coeff(Direction == Vertical ? j : i);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const {
|
||||
return m_functor(m_arg.template subVector<DirectionType(Direction)>(index));
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index i, Index j) const {
|
||||
return packet<LoadMode, PacketType>(Direction == Vertical ? j : i);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC PacketType packet(Index idx) const {
|
||||
static constexpr int PacketSize = internal::unpacket_traits<PacketType>::size;
|
||||
static constexpr int PanelRows = Direction == Vertical ? ArgType::RowsAtCompileTime : PacketSize;
|
||||
static constexpr int PanelCols = Direction == Vertical ? PacketSize : ArgType::ColsAtCompileTime;
|
||||
using PanelType = Block<const ArgTypeNestedCleaned, PanelRows, PanelCols, true /* InnerPanel */>;
|
||||
using PanelEvaluator = typename internal::redux_evaluator<PanelType>;
|
||||
using BinaryOp = typename MemberOp::BinaryOp;
|
||||
using Impl = internal::packetwise_redux_impl<BinaryOp, PanelEvaluator>;
|
||||
|
||||
// FIXME
|
||||
// See bug 1612, currently if PacketSize==1 (i.e. complex<double> with 128bits registers) then the storage-order of
|
||||
// panel get reversed and methods like packetByOuterInner do not make sense anymore in this context. So let's just
|
||||
// by pass "vectorization" in this case:
|
||||
if (PacketSize == 1) return internal::pset1<PacketType>(coeff(idx));
|
||||
|
||||
Index startRow = Direction == Vertical ? 0 : idx;
|
||||
Index startCol = Direction == Vertical ? idx : 0;
|
||||
Index numRows = Direction == Vertical ? m_arg.rows() : PacketSize;
|
||||
Index numCols = Direction == Vertical ? PacketSize : m_arg.cols();
|
||||
|
||||
PanelType panel(m_arg, startRow, startCol, numRows, numCols);
|
||||
PanelEvaluator panel_eval(panel);
|
||||
PacketType p = Impl::template run<PacketType>(panel_eval, m_functor.binaryFunc(), m_arg.outerSize());
|
||||
return p;
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetSegment(Index i, Index j, Index begin, Index count) const {
|
||||
return packetSegment<LoadMode, PacketType>(Direction == Vertical ? j : i, begin, count);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC PacketType packetSegment(Index idx, Index begin, Index count) const {
|
||||
static constexpr int PanelRows = Direction == Vertical ? ArgType::RowsAtCompileTime : Dynamic;
|
||||
static constexpr int PanelCols = Direction == Vertical ? Dynamic : ArgType::ColsAtCompileTime;
|
||||
using PanelType = Block<const ArgTypeNestedCleaned, PanelRows, PanelCols, true /* InnerPanel */>;
|
||||
using PanelEvaluator = typename internal::redux_evaluator<PanelType>;
|
||||
using BinaryOp = typename MemberOp::BinaryOp;
|
||||
using Impl = internal::packetwise_segment_redux_impl<BinaryOp, PanelEvaluator>;
|
||||
|
||||
Index startRow = Direction == Vertical ? 0 : idx;
|
||||
Index startCol = Direction == Vertical ? idx : 0;
|
||||
Index numRows = Direction == Vertical ? m_arg.rows() : begin + count;
|
||||
Index numCols = Direction == Vertical ? begin + count : m_arg.cols();
|
||||
|
||||
PanelType panel(m_arg, startRow, startCol, numRows, numCols);
|
||||
PanelEvaluator panel_eval(panel);
|
||||
PacketType p = Impl::template run<PacketType>(panel_eval, m_functor.binaryFunc(), m_arg.outerSize(), begin, count);
|
||||
return p;
|
||||
}
|
||||
|
||||
protected:
|
||||
ConstArgTypeNested m_arg;
|
||||
const MemberOp m_functor;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_PARTIALREDUX_H
|
||||
@@ -0,0 +1,552 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_PERMUTATIONMATRIX_H
|
||||
#define EIGEN_PERMUTATIONMATRIX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
enum PermPermProduct_t { PermPermProduct };
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \class PermutationBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Base class for permutations
|
||||
*
|
||||
* \tparam Derived the derived class
|
||||
*
|
||||
* This class is the base class for all expressions representing a permutation matrix,
|
||||
* internally stored as a vector of integers.
|
||||
* The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
|
||||
* \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
|
||||
* \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
|
||||
* This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
|
||||
* \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
|
||||
*
|
||||
* Permutation matrices are square and invertible.
|
||||
*
|
||||
* Notice that in addition to the member functions and operators listed here, there also are non-member
|
||||
* operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
|
||||
* on either side.
|
||||
*
|
||||
* \sa class PermutationMatrix, class PermutationWrapper
|
||||
*/
|
||||
template <typename Derived>
|
||||
class PermutationBase : public EigenBase<Derived> {
|
||||
typedef internal::traits<Derived> Traits;
|
||||
typedef EigenBase<Derived> Base;
|
||||
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
enum {
|
||||
Flags = Traits::Flags,
|
||||
RowsAtCompileTime = Traits::RowsAtCompileTime,
|
||||
ColsAtCompileTime = Traits::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime>
|
||||
DenseMatrixType;
|
||||
typedef PermutationMatrix<IndicesType::SizeAtCompileTime, IndicesType::MaxSizeAtCompileTime, StorageIndex>
|
||||
PlainPermutationType;
|
||||
typedef PlainPermutationType PlainObject;
|
||||
using Base::derived;
|
||||
typedef Inverse<Derived> InverseReturnType;
|
||||
typedef void Scalar;
|
||||
#endif
|
||||
|
||||
/** Copies the other permutation into *this */
|
||||
template <typename OtherDerived>
|
||||
Derived& operator=(const PermutationBase<OtherDerived>& other) {
|
||||
indices() = other.indices();
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Assignment from the Transpositions \a tr */
|
||||
template <typename OtherDerived>
|
||||
Derived& operator=(const TranspositionsBase<OtherDerived>& tr) {
|
||||
setIdentity(tr.size());
|
||||
for (Index k = size() - 1; k >= 0; --k) applyTranspositionOnTheRight(k, tr.coeff(k));
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** \returns the number of rows */
|
||||
inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); }
|
||||
|
||||
/** \returns the number of columns */
|
||||
inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); }
|
||||
|
||||
/** \returns the size of a side of the respective square matrix, i.e., the number of indices */
|
||||
inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); }
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename DenseDerived>
|
||||
void evalTo(MatrixBase<DenseDerived>& other) const {
|
||||
other.setZero();
|
||||
for (Index i = 0; i < rows(); ++i) other.coeffRef(indices().coeff(i), i) = typename DenseDerived::Scalar(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \returns a Matrix object initialized from this permutation matrix. Notice that it
|
||||
* is inefficient to return this Matrix object by value. For efficiency, favor using
|
||||
* the Matrix constructor taking EigenBase objects.
|
||||
*/
|
||||
DenseMatrixType toDenseMatrix() const { return derived(); }
|
||||
|
||||
/** const version of indices(). */
|
||||
const IndicesType& indices() const { return derived().indices(); }
|
||||
/** \returns a reference to the stored array representing the permutation. */
|
||||
IndicesType& indices() { return derived().indices(); }
|
||||
|
||||
/** Resizes to given size.
|
||||
*/
|
||||
inline void resize(Index newSize) { indices().resize(newSize); }
|
||||
|
||||
/** Sets *this to be the identity permutation matrix */
|
||||
void setIdentity() {
|
||||
StorageIndex n = StorageIndex(size());
|
||||
for (StorageIndex i = 0; i < n; ++i) indices().coeffRef(i) = i;
|
||||
}
|
||||
|
||||
/** Sets *this to be the identity permutation matrix of given size.
|
||||
*/
|
||||
void setIdentity(Index newSize) {
|
||||
resize(newSize);
|
||||
setIdentity();
|
||||
}
|
||||
|
||||
/** Multiplies *this by the transposition \f$(ij)\f$ on the left.
|
||||
*
|
||||
* \returns a reference to *this.
|
||||
*
|
||||
* \warning This is much slower than applyTranspositionOnTheRight(Index,Index):
|
||||
* this has linear complexity and requires a lot of branching.
|
||||
*
|
||||
* \sa applyTranspositionOnTheRight(Index,Index)
|
||||
*/
|
||||
Derived& applyTranspositionOnTheLeft(Index i, Index j) {
|
||||
eigen_assert(i >= 0 && j >= 0 && i < size() && j < size());
|
||||
for (Index k = 0; k < size(); ++k) {
|
||||
if (indices().coeff(k) == i)
|
||||
indices().coeffRef(k) = StorageIndex(j);
|
||||
else if (indices().coeff(k) == j)
|
||||
indices().coeffRef(k) = StorageIndex(i);
|
||||
}
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** Multiplies *this by the transposition \f$(ij)\f$ on the right.
|
||||
*
|
||||
* \returns a reference to *this.
|
||||
*
|
||||
* This is a fast operation, it only consists in swapping two indices.
|
||||
*
|
||||
* \sa applyTranspositionOnTheLeft(Index,Index)
|
||||
*/
|
||||
Derived& applyTranspositionOnTheRight(Index i, Index j) {
|
||||
eigen_assert(i >= 0 && j >= 0 && i < size() && j < size());
|
||||
std::swap(indices().coeffRef(i), indices().coeffRef(j));
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** \returns the inverse permutation matrix.
|
||||
*
|
||||
* \note \blank \note_try_to_help_rvo
|
||||
*/
|
||||
inline InverseReturnType inverse() const { return InverseReturnType(derived()); }
|
||||
/** \returns the transpose permutation matrix.
|
||||
*
|
||||
* \note \blank \note_try_to_help_rvo
|
||||
*/
|
||||
inline InverseReturnType transpose() const { return InverseReturnType(derived()); }
|
||||
|
||||
/**** multiplication helpers to hopefully get RVO ****/
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
protected:
|
||||
template <typename OtherDerived>
|
||||
void assignTranspose(const PermutationBase<OtherDerived>& other) {
|
||||
for (Index i = 0; i < rows(); ++i) indices().coeffRef(other.indices().coeff(i)) = i;
|
||||
}
|
||||
template <typename Lhs, typename Rhs>
|
||||
void assignProduct(const Lhs& lhs, const Rhs& rhs) {
|
||||
eigen_assert(lhs.cols() == rhs.rows());
|
||||
for (Index i = 0; i < rows(); ++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));
|
||||
}
|
||||
#endif
|
||||
|
||||
public:
|
||||
/** \returns the product permutation matrix.
|
||||
*
|
||||
* \note \blank \note_try_to_help_rvo
|
||||
*/
|
||||
template <typename Other>
|
||||
inline PlainPermutationType operator*(const PermutationBase<Other>& other) const {
|
||||
return PlainPermutationType(internal::PermPermProduct, derived(), other.derived());
|
||||
}
|
||||
|
||||
/** \returns the product of a permutation with another inverse permutation.
|
||||
*
|
||||
* \note \blank \note_try_to_help_rvo
|
||||
*/
|
||||
template <typename Other>
|
||||
inline PlainPermutationType operator*(const InverseImpl<Other, PermutationStorage>& other) const {
|
||||
return PlainPermutationType(internal::PermPermProduct, *this, other.eval());
|
||||
}
|
||||
|
||||
/** \returns the product of an inverse permutation with another permutation.
|
||||
*
|
||||
* \note \blank \note_try_to_help_rvo
|
||||
*/
|
||||
template <typename Other>
|
||||
friend inline PlainPermutationType operator*(const InverseImpl<Other, PermutationStorage>& other,
|
||||
const PermutationBase& perm) {
|
||||
return PlainPermutationType(internal::PermPermProduct, other.eval(), perm);
|
||||
}
|
||||
|
||||
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the
|
||||
* permutation.
|
||||
*
|
||||
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
|
||||
*/
|
||||
Index determinant() const {
|
||||
Index res = 1;
|
||||
Index n = size();
|
||||
Matrix<bool, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime> mask(n);
|
||||
mask.fill(false);
|
||||
Index r = 0;
|
||||
while (r < n) {
|
||||
// search for the next seed
|
||||
while (r < n && mask[r]) r++;
|
||||
if (r >= n) break;
|
||||
// we got one, let's follow it until we are back to the seed
|
||||
Index k0 = r++;
|
||||
mask.coeffRef(k0) = true;
|
||||
for (Index k = indices().coeff(k0); k != k0; k = indices().coeff(k)) {
|
||||
mask.coeffRef(k) = true;
|
||||
res = -res;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
template <int SizeAtCompileTime, int MaxSizeAtCompileTime, typename StorageIndex_>
|
||||
struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, StorageIndex_> >
|
||||
: traits<
|
||||
Matrix<StorageIndex_, SizeAtCompileTime, SizeAtCompileTime, 0, MaxSizeAtCompileTime, MaxSizeAtCompileTime> > {
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef Matrix<StorageIndex_, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef void Scalar;
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class PermutationMatrix
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Permutation matrix
|
||||
*
|
||||
* \tparam SizeAtCompileTime the number of rows/cols, or Dynamic
|
||||
* \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to
|
||||
* SizeAtCompileTime. Most of the time, you should not have to specify it. \tparam StorageIndex_ the integer type of the
|
||||
* indices
|
||||
*
|
||||
* This class represents a permutation matrix, internally stored as a vector of integers.
|
||||
*
|
||||
* \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
|
||||
*/
|
||||
template <int SizeAtCompileTime, int MaxSizeAtCompileTime, typename StorageIndex_>
|
||||
class PermutationMatrix
|
||||
: public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, StorageIndex_> > {
|
||||
typedef PermutationBase<PermutationMatrix> Base;
|
||||
typedef internal::traits<PermutationMatrix> Traits;
|
||||
|
||||
public:
|
||||
typedef const PermutationMatrix& Nested;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename Traits::StorageIndex StorageIndex;
|
||||
#endif
|
||||
|
||||
inline PermutationMatrix() {}
|
||||
|
||||
/** Constructs an uninitialized permutation matrix of given size.
|
||||
*/
|
||||
explicit inline PermutationMatrix(Index size) : m_indices(size) {
|
||||
eigen_internal_assert(size <= NumTraits<StorageIndex>::highest());
|
||||
}
|
||||
|
||||
/** Copy constructor. */
|
||||
template <typename OtherDerived>
|
||||
inline PermutationMatrix(const PermutationBase<OtherDerived>& other) : m_indices(other.indices()) {}
|
||||
|
||||
/** Generic constructor from expression of the indices. The indices
|
||||
* array has the meaning that the permutations sends each integer i to indices[i].
|
||||
*
|
||||
* \warning It is your responsibility to check that the indices array that you passes actually
|
||||
* describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
|
||||
* array's size.
|
||||
*/
|
||||
template <typename Other>
|
||||
explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices) {}
|
||||
|
||||
/** Convert the Transpositions \a tr to a permutation matrix */
|
||||
template <typename Other>
|
||||
explicit PermutationMatrix(const TranspositionsBase<Other>& tr) : m_indices(tr.size()) {
|
||||
*this = tr;
|
||||
}
|
||||
|
||||
/** Copies the other permutation into *this */
|
||||
template <typename Other>
|
||||
PermutationMatrix& operator=(const PermutationBase<Other>& other) {
|
||||
m_indices = other.indices();
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Assignment from the Transpositions \a tr */
|
||||
template <typename Other>
|
||||
PermutationMatrix& operator=(const TranspositionsBase<Other>& tr) {
|
||||
return Base::operator=(tr.derived());
|
||||
}
|
||||
|
||||
/** const version of indices(). */
|
||||
const IndicesType& indices() const { return m_indices; }
|
||||
/** \returns a reference to the stored array representing the permutation. */
|
||||
IndicesType& indices() { return m_indices; }
|
||||
|
||||
/**** multiplication helpers to hopefully get RVO ****/
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename Other>
|
||||
PermutationMatrix(const InverseImpl<Other, PermutationStorage>& other)
|
||||
: m_indices(other.derived().nestedExpression().size()) {
|
||||
eigen_internal_assert(m_indices.size() <= NumTraits<StorageIndex>::highest());
|
||||
StorageIndex end = StorageIndex(m_indices.size());
|
||||
for (StorageIndex i = 0; i < end; ++i)
|
||||
m_indices.coeffRef(other.derived().nestedExpression().indices().coeff(i)) = i;
|
||||
}
|
||||
template <typename Lhs, typename Rhs>
|
||||
PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) : m_indices(lhs.indices().size()) {
|
||||
Base::assignProduct(lhs, rhs);
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
IndicesType m_indices;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
template <int SizeAtCompileTime, int MaxSizeAtCompileTime, typename StorageIndex_, int PacketAccess_>
|
||||
struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, StorageIndex_>, PacketAccess_> >
|
||||
: traits<
|
||||
Matrix<StorageIndex_, SizeAtCompileTime, SizeAtCompileTime, 0, MaxSizeAtCompileTime, MaxSizeAtCompileTime> > {
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef Map<const Matrix<StorageIndex_, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, PacketAccess_> IndicesType;
|
||||
typedef StorageIndex_ StorageIndex;
|
||||
typedef void Scalar;
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
template <int SizeAtCompileTime, int MaxSizeAtCompileTime, typename StorageIndex_, int PacketAccess_>
|
||||
class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, StorageIndex_>, PacketAccess_>
|
||||
: public PermutationBase<
|
||||
Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, StorageIndex_>, PacketAccess_> > {
|
||||
typedef PermutationBase<Map> Base;
|
||||
typedef internal::traits<Map> Traits;
|
||||
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
typedef typename IndicesType::Scalar StorageIndex;
|
||||
#endif
|
||||
|
||||
inline Map(const StorageIndex* indicesPtr) : m_indices(indicesPtr) {}
|
||||
|
||||
inline Map(const StorageIndex* indicesPtr, Index size) : m_indices(indicesPtr, size) {}
|
||||
|
||||
/** Copies the other permutation into *this */
|
||||
template <typename Other>
|
||||
Map& operator=(const PermutationBase<Other>& other) {
|
||||
return Base::operator=(other.derived());
|
||||
}
|
||||
|
||||
/** Assignment from the Transpositions \a tr */
|
||||
template <typename Other>
|
||||
Map& operator=(const TranspositionsBase<Other>& tr) {
|
||||
return Base::operator=(tr.derived());
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is a special case of the templated operator=. Its purpose is to
|
||||
* prevent a default operator= from hiding the templated operator=.
|
||||
*/
|
||||
Map& operator=(const Map& other) {
|
||||
m_indices = other.m_indices;
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** const version of indices(). */
|
||||
const IndicesType& indices() const { return m_indices; }
|
||||
/** \returns a reference to the stored array representing the permutation. */
|
||||
IndicesType& indices() { return m_indices; }
|
||||
|
||||
protected:
|
||||
IndicesType m_indices;
|
||||
};
|
||||
|
||||
template <typename IndicesType_>
|
||||
class TranspositionsWrapper;
|
||||
namespace internal {
|
||||
template <typename IndicesType_>
|
||||
struct traits<PermutationWrapper<IndicesType_> > {
|
||||
typedef PermutationStorage StorageKind;
|
||||
typedef void Scalar;
|
||||
typedef typename IndicesType_::Scalar StorageIndex;
|
||||
typedef IndicesType_ IndicesType;
|
||||
enum {
|
||||
RowsAtCompileTime = IndicesType_::SizeAtCompileTime,
|
||||
ColsAtCompileTime = IndicesType_::SizeAtCompileTime,
|
||||
MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime,
|
||||
MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime,
|
||||
Flags = 0
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/** \class PermutationWrapper
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Class to view a vector of integers as a permutation matrix
|
||||
*
|
||||
* \tparam IndicesType_ the type of the vector of integer (can be any compatible expression)
|
||||
*
|
||||
* This class allows to view any vector expression of integers as a permutation matrix.
|
||||
*
|
||||
* \sa class PermutationBase, class PermutationMatrix
|
||||
*/
|
||||
template <typename IndicesType_>
|
||||
class PermutationWrapper : public PermutationBase<PermutationWrapper<IndicesType_> > {
|
||||
typedef PermutationBase<PermutationWrapper> Base;
|
||||
typedef internal::traits<PermutationWrapper> Traits;
|
||||
|
||||
public:
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename Traits::IndicesType IndicesType;
|
||||
#endif
|
||||
|
||||
inline PermutationWrapper(const IndicesType& indices) : m_indices(indices) {}
|
||||
|
||||
/** const version of indices(). */
|
||||
const internal::remove_all_t<typename IndicesType::Nested>& indices() const { return m_indices; }
|
||||
|
||||
protected:
|
||||
typename IndicesType::Nested m_indices;
|
||||
};
|
||||
|
||||
/** \returns the matrix with the permutation applied to the columns.
|
||||
*/
|
||||
template <typename MatrixDerived, typename PermutationDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<MatrixDerived, PermutationDerived, DefaultProduct> operator*(
|
||||
const MatrixBase<MatrixDerived>& matrix, const PermutationBase<PermutationDerived>& permutation) {
|
||||
return Product<MatrixDerived, PermutationDerived, DefaultProduct>(matrix.derived(), permutation.derived());
|
||||
}
|
||||
|
||||
/** \returns the matrix with the permutation applied to the rows.
|
||||
*/
|
||||
template <typename PermutationDerived, typename MatrixDerived>
|
||||
EIGEN_DEVICE_FUNC const Product<PermutationDerived, MatrixDerived, DefaultProduct> operator*(
|
||||
const PermutationBase<PermutationDerived>& permutation, const MatrixBase<MatrixDerived>& matrix) {
|
||||
return Product<PermutationDerived, MatrixDerived, DefaultProduct>(permutation.derived(), matrix.derived());
|
||||
}
|
||||
|
||||
template <typename PermutationType>
|
||||
class InverseImpl<PermutationType, PermutationStorage> : public EigenBase<Inverse<PermutationType> > {
|
||||
typedef typename PermutationType::PlainPermutationType PlainPermutationType;
|
||||
typedef internal::traits<PermutationType> PermTraits;
|
||||
|
||||
protected:
|
||||
InverseImpl() {}
|
||||
|
||||
public:
|
||||
typedef Inverse<PermutationType> InverseType;
|
||||
using EigenBase<Inverse<PermutationType> >::derived;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef typename PermutationType::DenseMatrixType DenseMatrixType;
|
||||
enum {
|
||||
RowsAtCompileTime = PermTraits::RowsAtCompileTime,
|
||||
ColsAtCompileTime = PermTraits::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename DenseDerived>
|
||||
void evalTo(MatrixBase<DenseDerived>& other) const {
|
||||
other.setZero();
|
||||
for (Index i = 0; i < derived().rows(); ++i)
|
||||
other.coeffRef(i, derived().nestedExpression().indices().coeff(i)) = typename DenseDerived::Scalar(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \return the equivalent permutation matrix */
|
||||
PlainPermutationType eval() const { return derived(); }
|
||||
|
||||
DenseMatrixType toDenseMatrix() const { return derived(); }
|
||||
|
||||
/** \returns the matrix with the inverse permutation applied to the columns.
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
friend const Product<OtherDerived, InverseType, DefaultProduct> operator*(const MatrixBase<OtherDerived>& matrix,
|
||||
const InverseType& trPerm) {
|
||||
return Product<OtherDerived, InverseType, DefaultProduct>(matrix.derived(), trPerm.derived());
|
||||
}
|
||||
|
||||
/** \returns the matrix with the inverse permutation applied to the rows.
|
||||
*/
|
||||
template <typename OtherDerived>
|
||||
const Product<InverseType, OtherDerived, DefaultProduct> operator*(const MatrixBase<OtherDerived>& matrix) const {
|
||||
return Product<InverseType, OtherDerived, DefaultProduct>(derived(), matrix.derived());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const {
|
||||
return derived();
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <>
|
||||
struct AssignmentKind<DenseShape, PermutationShape> {
|
||||
typedef EigenBase2EigenBase Kind;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_PERMUTATIONMATRIX_H
|
||||
1014
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/PlainObjectBase.h
Normal file
1014
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/PlainObjectBase.h
Normal file
File diff suppressed because it is too large
Load Diff
307
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Product.h
Normal file
307
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Product.h
Normal file
@@ -0,0 +1,307 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_PRODUCT_H
|
||||
#define EIGEN_PRODUCT_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option, typename StorageKind>
|
||||
class ProductImpl;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
struct traits<Product<Lhs, Rhs, Option>> {
|
||||
typedef remove_all_t<Lhs> LhsCleaned;
|
||||
typedef remove_all_t<Rhs> RhsCleaned;
|
||||
typedef traits<LhsCleaned> LhsTraits;
|
||||
typedef traits<RhsCleaned> RhsTraits;
|
||||
|
||||
typedef MatrixXpr XprKind;
|
||||
|
||||
typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar,
|
||||
typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
|
||||
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind, typename RhsTraits::StorageKind,
|
||||
internal::product_type<Lhs, Rhs>::ret>::ret StorageKind;
|
||||
typedef typename promote_index_type<typename LhsTraits::StorageIndex, typename RhsTraits::StorageIndex>::type
|
||||
StorageIndex;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
|
||||
ColsAtCompileTime = RhsTraits::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
|
||||
|
||||
// FIXME: only needed by GeneralMatrixMatrixTriangular
|
||||
InnerSize = min_size_prefer_fixed(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
|
||||
|
||||
// The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
|
||||
Flags = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? RowMajorBit
|
||||
: (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0
|
||||
: (((LhsTraits::Flags & NoPreferredStorageOrderBit) && (RhsTraits::Flags & RowMajorBit)) ||
|
||||
((RhsTraits::Flags & NoPreferredStorageOrderBit) && (LhsTraits::Flags & RowMajorBit)))
|
||||
? RowMajorBit
|
||||
: NoPreferredStorageOrderBit
|
||||
};
|
||||
};
|
||||
|
||||
struct TransposeProductEnum {
|
||||
// convenience enumerations to specialize transposed products
|
||||
enum : int {
|
||||
Default = 0x00,
|
||||
Matrix = 0x01,
|
||||
Permutation = 0x02,
|
||||
MatrixMatrix = (Matrix << 8) | Matrix,
|
||||
MatrixPermutation = (Matrix << 8) | Permutation,
|
||||
PermutationMatrix = (Permutation << 8) | Matrix
|
||||
};
|
||||
};
|
||||
template <typename Xpr>
|
||||
struct TransposeKind {
|
||||
static constexpr int Kind = is_matrix_base_xpr<Xpr>::value ? TransposeProductEnum::Matrix
|
||||
: is_permutation_base_xpr<Xpr>::value ? TransposeProductEnum::Permutation
|
||||
: TransposeProductEnum::Default;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs>
|
||||
struct TransposeProductKind {
|
||||
static constexpr int Kind = (TransposeKind<Lhs>::Kind << 8) | TransposeKind<Rhs>::Kind;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option, int Kind = TransposeProductKind<Lhs, Rhs>::Kind>
|
||||
struct product_transpose_helper {
|
||||
// by default, don't optimize the transposed product
|
||||
using Derived = Product<Lhs, Rhs, Option>;
|
||||
using Scalar = typename Derived::Scalar;
|
||||
using TransposeType = Transpose<const Derived>;
|
||||
using ConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<Scalar>, TransposeType>;
|
||||
using AdjointType = std::conditional_t<NumTraits<Scalar>::IsComplex, ConjugateTransposeType, TransposeType>;
|
||||
|
||||
// return (lhs * rhs)^T
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
|
||||
return TransposeType(derived);
|
||||
}
|
||||
// return (lhs * rhs)^H
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
|
||||
return AdjointType(TransposeType(derived));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::MatrixMatrix> {
|
||||
// expand the transposed matrix-matrix product
|
||||
using Derived = Product<Lhs, Rhs, Option>;
|
||||
|
||||
using LhsScalar = typename traits<Lhs>::Scalar;
|
||||
using LhsTransposeType = typename DenseBase<Lhs>::ConstTransposeReturnType;
|
||||
using LhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<LhsScalar>, LhsTransposeType>;
|
||||
using LhsAdjointType =
|
||||
std::conditional_t<NumTraits<LhsScalar>::IsComplex, LhsConjugateTransposeType, LhsTransposeType>;
|
||||
|
||||
using RhsScalar = typename traits<Rhs>::Scalar;
|
||||
using RhsTransposeType = typename DenseBase<Rhs>::ConstTransposeReturnType;
|
||||
using RhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<RhsScalar>, RhsTransposeType>;
|
||||
using RhsAdjointType =
|
||||
std::conditional_t<NumTraits<RhsScalar>::IsComplex, RhsConjugateTransposeType, RhsTransposeType>;
|
||||
|
||||
using TransposeType = Product<RhsTransposeType, LhsTransposeType, Option>;
|
||||
using AdjointType = Product<RhsAdjointType, LhsAdjointType, Option>;
|
||||
|
||||
// return rhs^T * lhs^T
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
|
||||
return TransposeType(RhsTransposeType(derived.rhs()), LhsTransposeType(derived.lhs()));
|
||||
}
|
||||
// return rhs^H * lhs^H
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
|
||||
return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())),
|
||||
LhsAdjointType(LhsTransposeType(derived.lhs())));
|
||||
}
|
||||
};
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::PermutationMatrix> {
|
||||
// expand the transposed permutation-matrix product
|
||||
using Derived = Product<Lhs, Rhs, Option>;
|
||||
|
||||
using LhsInverseType = typename PermutationBase<Lhs>::InverseReturnType;
|
||||
|
||||
using RhsScalar = typename traits<Rhs>::Scalar;
|
||||
using RhsTransposeType = typename DenseBase<Rhs>::ConstTransposeReturnType;
|
||||
using RhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<RhsScalar>, RhsTransposeType>;
|
||||
using RhsAdjointType =
|
||||
std::conditional_t<NumTraits<RhsScalar>::IsComplex, RhsConjugateTransposeType, RhsTransposeType>;
|
||||
|
||||
using TransposeType = Product<RhsTransposeType, LhsInverseType, Option>;
|
||||
using AdjointType = Product<RhsAdjointType, LhsInverseType, Option>;
|
||||
|
||||
// return rhs^T * lhs^-1
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
|
||||
return TransposeType(RhsTransposeType(derived.rhs()), LhsInverseType(derived.lhs()));
|
||||
}
|
||||
// return rhs^H * lhs^-1
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
|
||||
return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())), LhsInverseType(derived.lhs()));
|
||||
}
|
||||
};
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::MatrixPermutation> {
|
||||
// expand the transposed matrix-permutation product
|
||||
using Derived = Product<Lhs, Rhs, Option>;
|
||||
|
||||
using LhsScalar = typename traits<Lhs>::Scalar;
|
||||
using LhsTransposeType = typename DenseBase<Lhs>::ConstTransposeReturnType;
|
||||
using LhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<LhsScalar>, LhsTransposeType>;
|
||||
using LhsAdjointType =
|
||||
std::conditional_t<NumTraits<LhsScalar>::IsComplex, LhsConjugateTransposeType, LhsTransposeType>;
|
||||
|
||||
using RhsInverseType = typename PermutationBase<Rhs>::InverseReturnType;
|
||||
|
||||
using TransposeType = Product<RhsInverseType, LhsTransposeType, Option>;
|
||||
using AdjointType = Product<RhsInverseType, LhsAdjointType, Option>;
|
||||
|
||||
// return rhs^-1 * lhs^T
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
|
||||
return TransposeType(RhsInverseType(derived.rhs()), LhsTransposeType(derived.lhs()));
|
||||
}
|
||||
// return rhs^-1 * lhs^H
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
|
||||
return AdjointType(RhsInverseType(derived.rhs()), LhsAdjointType(LhsTransposeType(derived.lhs())));
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \class Product
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of the product of two arbitrary matrices or vectors
|
||||
*
|
||||
* \tparam Lhs_ the type of the left-hand side expression
|
||||
* \tparam Rhs_ the type of the right-hand side expression
|
||||
*
|
||||
* This class represents an expression of the product of two arbitrary matrices.
|
||||
*
|
||||
* The other template parameters are:
|
||||
* \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct
|
||||
*
|
||||
*/
|
||||
template <typename Lhs_, typename Rhs_, int Option>
|
||||
class Product
|
||||
: public ProductImpl<Lhs_, Rhs_, Option,
|
||||
typename internal::product_promote_storage_type<
|
||||
typename internal::traits<Lhs_>::StorageKind, typename internal::traits<Rhs_>::StorageKind,
|
||||
internal::product_type<Lhs_, Rhs_>::ret>::ret> {
|
||||
public:
|
||||
typedef Lhs_ Lhs;
|
||||
typedef Rhs_ Rhs;
|
||||
|
||||
typedef
|
||||
typename ProductImpl<Lhs, Rhs, Option,
|
||||
typename internal::product_promote_storage_type<
|
||||
typename internal::traits<Lhs>::StorageKind, typename internal::traits<Rhs>::StorageKind,
|
||||
internal::product_type<Lhs, Rhs>::ret>::ret>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
|
||||
|
||||
typedef typename internal::ref_selector<Lhs>::type LhsNested;
|
||||
typedef typename internal::ref_selector<Rhs>::type RhsNested;
|
||||
typedef internal::remove_all_t<LhsNested> LhsNestedCleaned;
|
||||
typedef internal::remove_all_t<RhsNested> RhsNestedCleaned;
|
||||
|
||||
using TransposeReturnType = typename internal::product_transpose_helper<Lhs, Rhs, Option>::TransposeType;
|
||||
using AdjointReturnType = typename internal::product_transpose_helper<Lhs, Rhs, Option>::AdjointType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {
|
||||
eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" &&
|
||||
"if you wanted a coeff-wise or a dot product use the respective explicit functions");
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index rows() const noexcept { return m_lhs.rows(); }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Index cols() const noexcept { return m_rhs.cols(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNestedCleaned& lhs() const { return m_lhs; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNestedCleaned& rhs() const { return m_rhs; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeReturnType transpose() const {
|
||||
return internal::product_transpose_helper<Lhs, Rhs, Option>::run_transpose(*this);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointReturnType adjoint() const {
|
||||
return internal::product_transpose_helper<Lhs, Rhs, Option>::run_adjoint(*this);
|
||||
}
|
||||
|
||||
protected:
|
||||
LhsNested m_lhs;
|
||||
RhsNested m_rhs;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs, Rhs>::ret>
|
||||
class dense_product_base : public internal::dense_xpr_base<Product<Lhs, Rhs, Option>>::type {};
|
||||
|
||||
/** Conversion to scalar for inner-products */
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
class dense_product_base<Lhs, Rhs, Option, InnerProduct>
|
||||
: public internal::dense_xpr_base<Product<Lhs, Rhs, Option>>::type {
|
||||
typedef Product<Lhs, Rhs, Option> ProductXpr;
|
||||
typedef typename internal::dense_xpr_base<ProductXpr>::type Base;
|
||||
|
||||
public:
|
||||
using Base::derived;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const {
|
||||
return internal::evaluator<ProductXpr>(derived()).coeff(0, 0);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// Generic API dispatcher
|
||||
template <typename Lhs, typename Rhs, int Option, typename StorageKind>
|
||||
class ProductImpl : public internal::generic_xpr_base<Product<Lhs, Rhs, Option>, MatrixXpr, StorageKind>::type {
|
||||
public:
|
||||
typedef typename internal::generic_xpr_base<Product<Lhs, Rhs, Option>, MatrixXpr, StorageKind>::type Base;
|
||||
};
|
||||
|
||||
template <typename Lhs, typename Rhs, int Option>
|
||||
class ProductImpl<Lhs, Rhs, Option, Dense> : public internal::dense_product_base<Lhs, Rhs, Option> {
|
||||
typedef Product<Lhs, Rhs, Option> Derived;
|
||||
|
||||
public:
|
||||
typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
||||
protected:
|
||||
enum {
|
||||
IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) &&
|
||||
(ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic),
|
||||
EnableCoeff = IsOneByOne || Option == LazyProduct
|
||||
};
|
||||
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const {
|
||||
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
|
||||
eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1));
|
||||
|
||||
return internal::evaluator<Derived>(derived()).coeff(row, col);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const {
|
||||
EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
|
||||
eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1));
|
||||
|
||||
return internal::evaluator<Derived>(derived()).coeff(i);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_PRODUCT_H
|
||||
1271
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ProductEvaluators.h
Normal file
1271
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/ProductEvaluators.h
Normal file
File diff suppressed because it is too large
Load Diff
207
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Random.h
Normal file
207
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Random.h
Normal file
@@ -0,0 +1,207 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_RANDOM_H
|
||||
#define EIGEN_RANDOM_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Scalar>
|
||||
struct scalar_random_op {
|
||||
inline const Scalar operator()() const { return random<Scalar>(); }
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct functor_traits<scalar_random_op<Scalar> > {
|
||||
enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false };
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \returns a random matrix expression
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||
* it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
|
||||
* instead.
|
||||
*
|
||||
*
|
||||
* Example: \include MatrixBase_random_int_int.cpp
|
||||
* Output: \verbinclude MatrixBase_random_int_int.out
|
||||
*
|
||||
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
||||
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
||||
* behavior with expressions involving random matrices.
|
||||
*
|
||||
* See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.
|
||||
*
|
||||
* \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random(Index rows, Index cols) {
|
||||
return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \returns a random vector expression
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* The parameter \a size is the size of the returned vector.
|
||||
* Must be compatible with this MatrixBase type.
|
||||
*
|
||||
* \only_for_vectors
|
||||
* \not_reentrant
|
||||
*
|
||||
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
|
||||
* it is redundant to pass \a size as argument, so Random() should be used
|
||||
* instead.
|
||||
*
|
||||
* Example: \include MatrixBase_random_int.cpp
|
||||
* Output: \verbinclude MatrixBase_random_int.out
|
||||
*
|
||||
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
||||
* a temporary vector whenever it is nested in a larger expression. This prevents unexpected
|
||||
* behavior with expressions involving random matrices.
|
||||
*
|
||||
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random(Index size) {
|
||||
return NullaryExpr(size, internal::scalar_random_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \returns a fixed-size random matrix or vector expression
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
||||
* need to use the variants taking size arguments.
|
||||
*
|
||||
* Example: \include MatrixBase_random.cpp
|
||||
* Output: \verbinclude MatrixBase_random.out
|
||||
*
|
||||
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
||||
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
||||
* behavior with expressions involving random matrices.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random() {
|
||||
return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
|
||||
}
|
||||
|
||||
/** Sets all coefficients in this expression to random values.
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* Example: \include MatrixBase_setRandom.cpp
|
||||
* Output: \verbinclude MatrixBase_setRandom.out
|
||||
*
|
||||
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Derived& DenseBase<Derived>::setRandom() {
|
||||
return *this = Random(rows(), cols());
|
||||
}
|
||||
|
||||
/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* \only_for_vectors
|
||||
* \not_reentrant
|
||||
*
|
||||
* Example: \include Matrix_setRandom_int.cpp
|
||||
* Output: \verbinclude Matrix_setRandom_int.out
|
||||
*
|
||||
* \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(Index newSize) {
|
||||
resize(newSize);
|
||||
return setRandom();
|
||||
}
|
||||
|
||||
/** Resizes to the given size, and sets all coefficients in this expression to random values.
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* \param rows the new number of rows
|
||||
* \param cols the new number of columns
|
||||
*
|
||||
* Example: \include Matrix_setRandom_int_int.cpp
|
||||
* Output: \verbinclude Matrix_setRandom_int_int.out
|
||||
*
|
||||
* \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(Index rows, Index cols) {
|
||||
resize(rows, cols);
|
||||
return setRandom();
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of columns, and sets all
|
||||
* coefficients in this expression to random values. For the parameter of type
|
||||
* NoChange_t, just pass the special value \c NoChange.
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(NoChange_t, Index cols) {
|
||||
return setRandom(rows(), cols);
|
||||
}
|
||||
|
||||
/** Resizes to the given size, changing only the number of rows, and sets all
|
||||
* coefficients in this expression to random values. For the parameter of type
|
||||
* NoChange_t, just pass the special value \c NoChange.
|
||||
*
|
||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||
* and in the [-1:1] range for floating point scalar types.
|
||||
*
|
||||
* \not_reentrant
|
||||
*
|
||||
* \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(Index rows, NoChange_t) {
|
||||
return setRandom(rows, cols());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_RANDOM_H
|
||||
262
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/RandomImpl.h
Normal file
262
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/RandomImpl.h
Normal file
@@ -0,0 +1,262 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2024 Charles Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_RANDOM_IMPL_H
|
||||
#define EIGEN_RANDOM_IMPL_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of random *
|
||||
****************************************************************************/
|
||||
|
||||
template <typename Scalar, bool IsComplex, bool IsInteger>
|
||||
struct random_default_impl {};
|
||||
|
||||
template <typename Scalar>
|
||||
struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
|
||||
|
||||
template <typename Scalar>
|
||||
struct random_retval {
|
||||
typedef Scalar type;
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) {
|
||||
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() {
|
||||
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
|
||||
}
|
||||
|
||||
// TODO: replace or provide alternatives to this, e.g. std::random_device
|
||||
struct eigen_random_device {
|
||||
using ReturnType = int;
|
||||
static constexpr int Entropy = meta_floor_log2<(unsigned int)(RAND_MAX) + 1>::value;
|
||||
static constexpr ReturnType Highest = RAND_MAX;
|
||||
static EIGEN_DEVICE_FUNC inline ReturnType run() { return std::rand(); }
|
||||
};
|
||||
|
||||
// Fill a built-in unsigned integer with numRandomBits beginning with the least significant bit
|
||||
template <typename Scalar>
|
||||
struct random_bits_impl {
|
||||
EIGEN_STATIC_ASSERT(std::is_unsigned<Scalar>::value, SCALAR MUST BE A BUILT - IN UNSIGNED INTEGER)
|
||||
using RandomDevice = eigen_random_device;
|
||||
using RandomReturnType = typename RandomDevice::ReturnType;
|
||||
static constexpr int kEntropy = RandomDevice::Entropy;
|
||||
static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
|
||||
// return a Scalar filled with numRandomBits beginning from the least significant bit
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
|
||||
eigen_assert((numRandomBits >= 0) && (numRandomBits <= kTotalBits));
|
||||
const Scalar mask = Scalar(-1) >> ((kTotalBits - numRandomBits) & (kTotalBits - 1));
|
||||
Scalar randomBits = 0;
|
||||
for (int shift = 0; shift < numRandomBits; shift += kEntropy) {
|
||||
RandomReturnType r = RandomDevice::run();
|
||||
randomBits |= static_cast<Scalar>(r) << shift;
|
||||
}
|
||||
// clear the excess bits
|
||||
randomBits &= mask;
|
||||
return randomBits;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename BitsType>
|
||||
EIGEN_DEVICE_FUNC inline BitsType getRandomBits(int numRandomBits) {
|
||||
return random_bits_impl<BitsType>::run(numRandomBits);
|
||||
}
|
||||
|
||||
// random implementation for a built-in floating point type
|
||||
template <typename Scalar, bool BuiltIn = std::is_floating_point<Scalar>::value>
|
||||
struct random_float_impl {
|
||||
using BitsType = typename numext::get_integer_by_size<sizeof(Scalar)>::unsigned_type;
|
||||
static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() {
|
||||
const int digits = NumTraits<Scalar>::digits();
|
||||
return digits - 1;
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
|
||||
eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
|
||||
BitsType randomBits = getRandomBits<BitsType>(numRandomBits);
|
||||
// if fewer than MantissaBits is requested, shift them to the left
|
||||
randomBits <<= (mantissaBits() - numRandomBits);
|
||||
// randomBits is in the half-open interval [2,4)
|
||||
randomBits |= numext::bit_cast<BitsType>(Scalar(2));
|
||||
// result is in the half-open interval [-1,1)
|
||||
Scalar result = numext::bit_cast<Scalar>(randomBits) - Scalar(3);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
// random implementation for a custom floating point type
|
||||
// uses double as the implementation with a mantissa with a size equal to either the target scalar's mantissa or that of
|
||||
// double, whichever is smaller
|
||||
template <typename Scalar>
|
||||
struct random_float_impl<Scalar, false> {
|
||||
static EIGEN_DEVICE_FUNC inline int mantissaBits() {
|
||||
const int digits = NumTraits<Scalar>::digits();
|
||||
constexpr int kDoubleDigits = NumTraits<double>::digits();
|
||||
return numext::mini(digits, kDoubleDigits) - 1;
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
|
||||
eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
|
||||
Scalar result = static_cast<Scalar>(random_float_impl<double>::run(numRandomBits));
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
#if !EIGEN_COMP_NVCC
|
||||
// random implementation for long double
|
||||
// this specialization is not compatible with double-double scalars
|
||||
template <bool Specialize = (sizeof(long double) == 2 * sizeof(uint64_t)) &&
|
||||
((std::numeric_limits<long double>::digits != (2 * std::numeric_limits<double>::digits)))>
|
||||
struct random_longdouble_impl {
|
||||
static constexpr int Size = sizeof(long double);
|
||||
static constexpr EIGEN_DEVICE_FUNC int mantissaBits() { return NumTraits<long double>::digits() - 1; }
|
||||
static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) {
|
||||
eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
|
||||
EIGEN_USING_STD(memcpy);
|
||||
int numLowBits = numext::mini(numRandomBits, 64);
|
||||
int numHighBits = numext::maxi(numRandomBits - 64, 0);
|
||||
uint64_t randomBits[2];
|
||||
long double result = 2.0L;
|
||||
memcpy(&randomBits, &result, Size);
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
randomBits[0] |= getRandomBits<uint64_t>(numLowBits);
|
||||
randomBits[1] |= getRandomBits<uint64_t>(numHighBits);
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
randomBits[0] |= getRandomBits<uint64_t>(numHighBits);
|
||||
randomBits[1] |= getRandomBits<uint64_t>(numLowBits);
|
||||
#else
|
||||
#error Unexpected or undefined __BYTE_ORDER__
|
||||
#endif
|
||||
memcpy(&result, &randomBits, Size);
|
||||
result -= 3.0L;
|
||||
return result;
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct random_longdouble_impl<false> {
|
||||
static constexpr EIGEN_DEVICE_FUNC int mantissaBits() { return NumTraits<double>::digits() - 1; }
|
||||
static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) {
|
||||
return static_cast<long double>(random_float_impl<double>::run(numRandomBits));
|
||||
}
|
||||
};
|
||||
template <>
|
||||
struct random_float_impl<long double> : random_longdouble_impl<> {};
|
||||
#endif
|
||||
|
||||
template <typename Scalar>
|
||||
struct random_default_impl<Scalar, false, false> {
|
||||
using Impl = random_float_impl<Scalar>;
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) {
|
||||
Scalar half_x = Scalar(0.5) * x;
|
||||
Scalar half_y = Scalar(0.5) * y;
|
||||
Scalar result = (half_x + half_y) + (half_y - half_x) * run(numRandomBits);
|
||||
// result is in the half-open interval [x, y) -- provided that x < y
|
||||
return result;
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
|
||||
return run(x, y, Impl::mantissaBits());
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { return Impl::run(numRandomBits); }
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run() { return run(Impl::mantissaBits()); }
|
||||
};
|
||||
|
||||
template <typename Scalar, bool IsSigned = NumTraits<Scalar>::IsSigned, bool BuiltIn = std::is_integral<Scalar>::value>
|
||||
struct random_int_impl;
|
||||
|
||||
// random implementation for a built-in unsigned integer type
|
||||
template <typename Scalar>
|
||||
struct random_int_impl<Scalar, false, true> {
|
||||
static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
|
||||
if (y <= x) return x;
|
||||
Scalar range = y - x;
|
||||
// handle edge case where [x,y] spans the entire range of Scalar
|
||||
if (range == NumTraits<Scalar>::highest()) return run();
|
||||
Scalar count = range + 1;
|
||||
// calculate the number of random bits needed to fill range
|
||||
int numRandomBits = log2_ceil(count);
|
||||
Scalar randomBits;
|
||||
do {
|
||||
randomBits = getRandomBits<Scalar>(numRandomBits);
|
||||
// if the random draw is outside [0, range), try again (rejection sampling)
|
||||
// in the worst-case scenario, the probability of rejection is: 1/2 - 1/2^numRandomBits < 50%
|
||||
} while (randomBits >= count);
|
||||
Scalar result = x + randomBits;
|
||||
return result;
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run() { return getRandomBits<Scalar>(kTotalBits); }
|
||||
};
|
||||
|
||||
// random implementation for a built-in signed integer type
|
||||
template <typename Scalar>
|
||||
struct random_int_impl<Scalar, true, true> {
|
||||
static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
|
||||
using BitsType = typename make_unsigned<Scalar>::type;
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
|
||||
if (y <= x) return x;
|
||||
// Avoid overflow by representing `range` as an unsigned type
|
||||
BitsType range = static_cast<BitsType>(y) - static_cast<BitsType>(x);
|
||||
BitsType randomBits = random_int_impl<BitsType>::run(0, range);
|
||||
// Avoid overflow in the case where `x` is negative and there is a large range so
|
||||
// `randomBits` would also be negative if cast to `Scalar` first.
|
||||
Scalar result = static_cast<Scalar>(static_cast<BitsType>(x) + randomBits);
|
||||
return result;
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run() { return static_cast<Scalar>(getRandomBits<BitsType>(kTotalBits)); }
|
||||
};
|
||||
|
||||
// todo: custom integers
|
||||
template <typename Scalar, bool IsSigned>
|
||||
struct random_int_impl<Scalar, IsSigned, false> {
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar&, const Scalar&) { return run(); }
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run() {
|
||||
eigen_assert(std::false_type::value && "RANDOM FOR CUSTOM INTEGERS NOT YET SUPPORTED");
|
||||
return Scalar(0);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct random_default_impl<Scalar, false, true> : random_int_impl<Scalar> {};
|
||||
|
||||
template <>
|
||||
struct random_impl<bool> {
|
||||
static EIGEN_DEVICE_FUNC inline bool run(const bool& x, const bool& y) {
|
||||
if (y <= x) return x;
|
||||
return run();
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline bool run() { return getRandomBits<unsigned>(1) ? true : false; }
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
struct random_default_impl<Scalar, true, false> {
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
using Impl = random_impl<RealScalar>;
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) {
|
||||
return Scalar(Impl::run(x.real(), y.real(), numRandomBits), Impl::run(x.imag(), y.imag(), numRandomBits));
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
|
||||
return Scalar(Impl::run(x.real(), y.real()), Impl::run(x.imag(), y.imag()));
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
|
||||
return Scalar(Impl::run(numRandomBits), Impl::run(numRandomBits));
|
||||
}
|
||||
static EIGEN_DEVICE_FUNC inline Scalar run() { return Scalar(Impl::run(), Impl::run()); }
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_RANDOM_IMPL_H
|
||||
250
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/RealView.h
Normal file
250
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/RealView.h
Normal file
@@ -0,0 +1,250 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2025 Charlie Schlosser <cs.schlosser@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_REALVIEW_H
|
||||
#define EIGEN_REALVIEW_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Vectorized assignment to RealView requires array-oriented access to the real and imaginary components.
|
||||
// From https://en.cppreference.com/w/cpp/numeric/complex.html:
|
||||
// For any pointer to an element of an array of std::complex<T> named p and any valid array index i,
|
||||
// reinterpret_cast<T*>(p)[2 * i] is the real part of the complex number p[i], and
|
||||
// reinterpret_cast<T*>(p)[2 * i + 1] is the imaginary part of the complex number p[i].
|
||||
|
||||
template <typename ComplexScalar>
|
||||
struct complex_array_access : std::false_type {};
|
||||
template <>
|
||||
struct complex_array_access<std::complex<float>> : std::true_type {};
|
||||
template <>
|
||||
struct complex_array_access<std::complex<double>> : std::true_type {};
|
||||
template <>
|
||||
struct complex_array_access<std::complex<long double>> : std::true_type {};
|
||||
|
||||
template <typename Xpr>
|
||||
struct traits<RealView<Xpr>> : public traits<Xpr> {
|
||||
template <typename T>
|
||||
static constexpr int double_size(T size, bool times_two) {
|
||||
int size_as_int = int(size);
|
||||
if (size_as_int == Dynamic) return Dynamic;
|
||||
return times_two ? (2 * size_as_int) : size_as_int;
|
||||
}
|
||||
using Base = traits<Xpr>;
|
||||
using ComplexScalar = typename Base::Scalar;
|
||||
using Scalar = typename NumTraits<ComplexScalar>::Real;
|
||||
static constexpr int ActualDirectAccessBit = complex_array_access<ComplexScalar>::value ? DirectAccessBit : 0;
|
||||
static constexpr int ActualPacketAccessBit = packet_traits<Scalar>::Vectorizable ? PacketAccessBit : 0;
|
||||
static constexpr int FlagMask =
|
||||
ActualDirectAccessBit | ActualPacketAccessBit | HereditaryBits | LinearAccessBit | LvalueBit;
|
||||
static constexpr int BaseFlags = int(evaluator<Xpr>::Flags) | int(Base::Flags);
|
||||
static constexpr int Flags = BaseFlags & FlagMask;
|
||||
static constexpr bool IsRowMajor = Flags & RowMajorBit;
|
||||
static constexpr int RowsAtCompileTime = double_size(Base::RowsAtCompileTime, !IsRowMajor);
|
||||
static constexpr int ColsAtCompileTime = double_size(Base::ColsAtCompileTime, IsRowMajor);
|
||||
static constexpr int SizeAtCompileTime = size_at_compile_time(RowsAtCompileTime, ColsAtCompileTime);
|
||||
static constexpr int MaxRowsAtCompileTime = double_size(Base::MaxRowsAtCompileTime, !IsRowMajor);
|
||||
static constexpr int MaxColsAtCompileTime = double_size(Base::MaxColsAtCompileTime, IsRowMajor);
|
||||
static constexpr int MaxSizeAtCompileTime = size_at_compile_time(MaxRowsAtCompileTime, MaxColsAtCompileTime);
|
||||
static constexpr int OuterStrideAtCompileTime = double_size(outer_stride_at_compile_time<Xpr>::ret, true);
|
||||
static constexpr int InnerStrideAtCompileTime = inner_stride_at_compile_time<Xpr>::ret;
|
||||
};
|
||||
|
||||
template <typename Xpr>
|
||||
struct evaluator<RealView<Xpr>> : private evaluator<Xpr> {
|
||||
using BaseEvaluator = evaluator<Xpr>;
|
||||
using XprType = RealView<Xpr>;
|
||||
using ExpressionTraits = traits<XprType>;
|
||||
using ComplexScalar = typename ExpressionTraits::ComplexScalar;
|
||||
using ComplexCoeffReturnType = typename BaseEvaluator::CoeffReturnType;
|
||||
using Scalar = typename ExpressionTraits::Scalar;
|
||||
|
||||
static constexpr bool IsRowMajor = ExpressionTraits::IsRowMajor;
|
||||
static constexpr int Flags = ExpressionTraits::Flags;
|
||||
static constexpr int CoeffReadCost = BaseEvaluator::CoeffReadCost;
|
||||
static constexpr int Alignment = BaseEvaluator::Alignment;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit evaluator(XprType realView) : BaseEvaluator(realView.m_xpr) {}
|
||||
|
||||
template <bool Enable = std::is_reference<ComplexCoeffReturnType>::value, typename = std::enable_if_t<!Enable>>
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const {
|
||||
ComplexCoeffReturnType cscalar = BaseEvaluator::coeff(IsRowMajor ? row : row / 2, IsRowMajor ? col / 2 : col);
|
||||
Index p = (IsRowMajor ? col : row) & 1;
|
||||
return p ? numext::real(cscalar) : numext::imag(cscalar);
|
||||
}
|
||||
|
||||
template <bool Enable = std::is_reference<ComplexCoeffReturnType>::value, typename = std::enable_if_t<Enable>>
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const {
|
||||
ComplexCoeffReturnType cscalar = BaseEvaluator::coeff(IsRowMajor ? row : row / 2, IsRowMajor ? col / 2 : col);
|
||||
Index p = (IsRowMajor ? col : row) & 1;
|
||||
return reinterpret_cast<const Scalar(&)[2]>(cscalar)[p];
|
||||
}
|
||||
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) {
|
||||
ComplexScalar& cscalar = BaseEvaluator::coeffRef(IsRowMajor ? row : row / 2, IsRowMajor ? col / 2 : col);
|
||||
Index p = (IsRowMajor ? col : row) & 1;
|
||||
return reinterpret_cast<Scalar(&)[2]>(cscalar)[p];
|
||||
}
|
||||
|
||||
template <bool Enable = std::is_reference<ComplexCoeffReturnType>::value, typename = std::enable_if_t<!Enable>>
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index index) const {
|
||||
ComplexCoeffReturnType cscalar = BaseEvaluator::coeff(index / 2);
|
||||
Index p = index & 1;
|
||||
return p ? numext::real(cscalar) : numext::imag(cscalar);
|
||||
}
|
||||
|
||||
template <bool Enable = std::is_reference<ComplexCoeffReturnType>::value, typename = std::enable_if_t<Enable>>
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const {
|
||||
ComplexCoeffReturnType cscalar = BaseEvaluator::coeff(index / 2);
|
||||
Index p = index & 1;
|
||||
return reinterpret_cast<const Scalar(&)[2]>(cscalar)[p];
|
||||
}
|
||||
|
||||
constexpr EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
|
||||
ComplexScalar& cscalar = BaseEvaluator::coeffRef(index / 2);
|
||||
Index p = index & 1;
|
||||
return reinterpret_cast<Scalar(&)[2]>(cscalar)[p];
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const {
|
||||
constexpr int RealPacketSize = unpacket_traits<PacketType>::size;
|
||||
using ComplexPacket = typename find_packet_by_size<ComplexScalar, RealPacketSize / 2>::type;
|
||||
EIGEN_STATIC_ASSERT((find_packet_by_size<ComplexScalar, RealPacketSize / 2>::value),
|
||||
MISSING COMPATIBLE COMPLEX PACKET TYPE)
|
||||
eigen_assert(((IsRowMajor ? col : row) % 2 == 0) && "the inner index must be even");
|
||||
|
||||
Index crow = IsRowMajor ? row : row / 2;
|
||||
Index ccol = IsRowMajor ? col / 2 : col;
|
||||
ComplexPacket cpacket = BaseEvaluator::template packet<LoadMode, ComplexPacket>(crow, ccol);
|
||||
return preinterpret<PacketType, ComplexPacket>(cpacket);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index index) const {
|
||||
constexpr int RealPacketSize = unpacket_traits<PacketType>::size;
|
||||
using ComplexPacket = typename find_packet_by_size<ComplexScalar, RealPacketSize / 2>::type;
|
||||
EIGEN_STATIC_ASSERT((find_packet_by_size<ComplexScalar, RealPacketSize / 2>::value),
|
||||
MISSING COMPATIBLE COMPLEX PACKET TYPE)
|
||||
eigen_assert((index % 2 == 0) && "the index must be even");
|
||||
|
||||
Index cindex = index / 2;
|
||||
ComplexPacket cpacket = BaseEvaluator::template packet<LoadMode, ComplexPacket>(cindex);
|
||||
return preinterpret<PacketType, ComplexPacket>(cpacket);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetSegment(Index row, Index col, Index begin, Index count) const {
|
||||
constexpr int RealPacketSize = unpacket_traits<PacketType>::size;
|
||||
using ComplexPacket = typename find_packet_by_size<ComplexScalar, RealPacketSize / 2>::type;
|
||||
EIGEN_STATIC_ASSERT((find_packet_by_size<ComplexScalar, RealPacketSize / 2>::value),
|
||||
MISSING COMPATIBLE COMPLEX PACKET TYPE)
|
||||
eigen_assert(((IsRowMajor ? col : row) % 2 == 0) && "the inner index must be even");
|
||||
eigen_assert((begin % 2 == 0) && (count % 2 == 0) && "begin and count must be even");
|
||||
|
||||
Index crow = IsRowMajor ? row : row / 2;
|
||||
Index ccol = IsRowMajor ? col / 2 : col;
|
||||
Index cbegin = begin / 2;
|
||||
Index ccount = count / 2;
|
||||
ComplexPacket cpacket = BaseEvaluator::template packetSegment<LoadMode, ComplexPacket>(crow, ccol, cbegin, ccount);
|
||||
return preinterpret<PacketType, ComplexPacket>(cpacket);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetSegment(Index index, Index begin, Index count) const {
|
||||
constexpr int RealPacketSize = unpacket_traits<PacketType>::size;
|
||||
using ComplexPacket = typename find_packet_by_size<ComplexScalar, RealPacketSize / 2>::type;
|
||||
EIGEN_STATIC_ASSERT((find_packet_by_size<ComplexScalar, RealPacketSize / 2>::value),
|
||||
MISSING COMPATIBLE COMPLEX PACKET TYPE)
|
||||
eigen_assert((index % 2 == 0) && "the index must be even");
|
||||
eigen_assert((begin % 2 == 0) && (count % 2 == 0) && "begin and count must be even");
|
||||
|
||||
Index cindex = index / 2;
|
||||
Index cbegin = begin / 2;
|
||||
Index ccount = count / 2;
|
||||
ComplexPacket cpacket = BaseEvaluator::template packetSegment<LoadMode, ComplexPacket>(cindex, cbegin, ccount);
|
||||
return preinterpret<PacketType, ComplexPacket>(cpacket);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename Xpr>
|
||||
class RealView : public internal::dense_xpr_base<RealView<Xpr>>::type {
|
||||
using ExpressionTraits = internal::traits<RealView>;
|
||||
EIGEN_STATIC_ASSERT(NumTraits<typename Xpr::Scalar>::IsComplex, SCALAR MUST BE COMPLEX)
|
||||
public:
|
||||
using Scalar = typename ExpressionTraits::Scalar;
|
||||
using Nested = RealView;
|
||||
|
||||
EIGEN_DEVICE_FUNC explicit RealView(Xpr& xpr) : m_xpr(xpr) {}
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const noexcept { return Xpr::IsRowMajor ? m_xpr.rows() : 2 * m_xpr.rows(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const noexcept { return Xpr::IsRowMajor ? 2 * m_xpr.cols() : m_xpr.cols(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index size() const noexcept { return 2 * m_xpr.size(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const noexcept { return m_xpr.innerStride(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const noexcept { return 2 * m_xpr.outerStride(); }
|
||||
EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) {
|
||||
m_xpr.resize(Xpr::IsRowMajor ? rows : rows / 2, Xpr::IsRowMajor ? cols / 2 : cols);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC void resize(Index size) { m_xpr.resize(size / 2); }
|
||||
EIGEN_DEVICE_FUNC Scalar* data() { return reinterpret_cast<Scalar*>(m_xpr.data()); }
|
||||
EIGEN_DEVICE_FUNC const Scalar* data() const { return reinterpret_cast<const Scalar*>(m_xpr.data()); }
|
||||
|
||||
EIGEN_DEVICE_FUNC RealView(const RealView&) = default;
|
||||
|
||||
EIGEN_DEVICE_FUNC RealView& operator=(const RealView& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC RealView& operator=(const RealView<OtherDerived>& other);
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC RealView& operator=(const DenseBase<OtherDerived>& other);
|
||||
|
||||
protected:
|
||||
friend struct internal::evaluator<RealView<Xpr>>;
|
||||
Xpr& m_xpr;
|
||||
};
|
||||
|
||||
template <typename Xpr>
|
||||
EIGEN_DEVICE_FUNC RealView<Xpr>& RealView<Xpr>::operator=(const RealView& other) {
|
||||
internal::call_assignment(*this, other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename Xpr>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC RealView<Xpr>& RealView<Xpr>::operator=(const RealView<OtherDerived>& other) {
|
||||
internal::call_assignment(*this, other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename Xpr>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC RealView<Xpr>& RealView<Xpr>::operator=(const DenseBase<OtherDerived>& other) {
|
||||
internal::call_assignment(*this, other.derived());
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC typename DenseBase<Derived>::RealViewReturnType DenseBase<Derived>::realView() {
|
||||
return RealViewReturnType(derived());
|
||||
}
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC typename DenseBase<Derived>::ConstRealViewReturnType DenseBase<Derived>::realView() const {
|
||||
return ConstRealViewReturnType(derived());
|
||||
}
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_REALVIEW_H
|
||||
535
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Redux.h
Normal file
535
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Redux.h
Normal file
@@ -0,0 +1,535 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_REDUX_H
|
||||
#define EIGEN_REDUX_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// TODO
|
||||
// * implement other kind of vectorization
|
||||
// * factorize code
|
||||
|
||||
/***************************************************************************
|
||||
* Part 1 : the logic deciding a strategy for vectorization and unrolling
|
||||
***************************************************************************/
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_traits {
|
||||
public:
|
||||
typedef typename find_best_packet<typename Evaluator::Scalar, Evaluator::SizeAtCompileTime>::type PacketType;
|
||||
enum {
|
||||
PacketSize = unpacket_traits<PacketType>::size,
|
||||
InnerMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxColsAtCompileTime : Evaluator::MaxRowsAtCompileTime,
|
||||
OuterMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxRowsAtCompileTime : Evaluator::MaxColsAtCompileTime,
|
||||
SliceVectorizedWork = int(InnerMaxSize) == Dynamic ? Dynamic
|
||||
: int(OuterMaxSize) == Dynamic ? (int(InnerMaxSize) >= int(PacketSize) ? Dynamic : 0)
|
||||
: (int(InnerMaxSize) / int(PacketSize)) * int(OuterMaxSize)
|
||||
};
|
||||
|
||||
enum {
|
||||
MayLinearize = (int(Evaluator::Flags) & LinearAccessBit),
|
||||
MightVectorize = (int(Evaluator::Flags) & ActualPacketAccessBit) && (functor_traits<Func>::PacketAccess),
|
||||
MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize),
|
||||
MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork) == Dynamic || int(SliceVectorizedWork) >= 3)
|
||||
};
|
||||
|
||||
public:
|
||||
enum {
|
||||
Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
|
||||
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
|
||||
: int(MayLinearize) ? int(LinearTraversal)
|
||||
: int(DefaultTraversal)
|
||||
};
|
||||
|
||||
public:
|
||||
enum {
|
||||
Cost = Evaluator::SizeAtCompileTime == Dynamic
|
||||
? HugeCost
|
||||
: int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) +
|
||||
(Evaluator::SizeAtCompileTime - 1) * functor_traits<Func>::Cost,
|
||||
UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
|
||||
};
|
||||
|
||||
public:
|
||||
enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling };
|
||||
|
||||
#ifdef EIGEN_DEBUG_ASSIGN
|
||||
static void debug() {
|
||||
std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl;
|
||||
std::cerr.setf(std::ios::hex, std::ios::basefield);
|
||||
EIGEN_DEBUG_VAR(Evaluator::Flags)
|
||||
std::cerr.unsetf(std::ios::hex);
|
||||
EIGEN_DEBUG_VAR(InnerMaxSize)
|
||||
EIGEN_DEBUG_VAR(OuterMaxSize)
|
||||
EIGEN_DEBUG_VAR(SliceVectorizedWork)
|
||||
EIGEN_DEBUG_VAR(PacketSize)
|
||||
EIGEN_DEBUG_VAR(MightVectorize)
|
||||
EIGEN_DEBUG_VAR(MayLinearVectorize)
|
||||
EIGEN_DEBUG_VAR(MaySliceVectorize)
|
||||
std::cerr << "Traversal"
|
||||
<< " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl;
|
||||
EIGEN_DEBUG_VAR(UnrollingLimit)
|
||||
std::cerr << "Unrolling"
|
||||
<< " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl;
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
* Part 2 : unrollers
|
||||
***************************************************************************/
|
||||
|
||||
/*** no vectorization ***/
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start, Index Length>
|
||||
struct redux_novec_unroller {
|
||||
static constexpr Index HalfLength = Length / 2;
|
||||
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
|
||||
return func(redux_novec_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
|
||||
redux_novec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_novec_unroller<Func, Evaluator, Start, 1> {
|
||||
static constexpr Index outer = Start / Evaluator::InnerSizeAtCompileTime;
|
||||
static constexpr Index inner = Start % Evaluator::InnerSizeAtCompileTime;
|
||||
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
|
||||
return eval.coeffByOuterInner(outer, inner);
|
||||
}
|
||||
};
|
||||
|
||||
// This is actually dead code and will never be called. It is required
|
||||
// to prevent false warnings regarding failed inlining though
|
||||
// for 0 length run() will never be called at all.
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_novec_unroller<Func, Evaluator, Start, 0> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start, Index Length>
|
||||
struct redux_novec_linear_unroller {
|
||||
static constexpr Index HalfLength = Length / 2;
|
||||
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
|
||||
return func(redux_novec_linear_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
|
||||
redux_novec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_novec_linear_unroller<Func, Evaluator, Start, 1> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
|
||||
return eval.coeff(Start);
|
||||
}
|
||||
};
|
||||
|
||||
// This is actually dead code and will never be called. It is required
|
||||
// to prevent false warnings regarding failed inlining though
|
||||
// for 0 length run() will never be called at all.
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_novec_linear_unroller<Func, Evaluator, Start, 0> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
|
||||
};
|
||||
|
||||
/*** vectorization ***/
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start, Index Length>
|
||||
struct redux_vec_unroller {
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
|
||||
constexpr Index HalfLength = Length / 2;
|
||||
|
||||
return func.packetOp(
|
||||
redux_vec_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
|
||||
redux_vec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(eval,
|
||||
func));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_vec_unroller<Func, Evaluator, Start, 1> {
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
|
||||
constexpr Index PacketSize = unpacket_traits<PacketType>::size;
|
||||
constexpr Index index = Start * PacketSize;
|
||||
constexpr Index outer = index / int(Evaluator::InnerSizeAtCompileTime);
|
||||
constexpr Index inner = index % int(Evaluator::InnerSizeAtCompileTime);
|
||||
constexpr int alignment = Evaluator::Alignment;
|
||||
|
||||
return eval.template packetByOuterInner<alignment, PacketType>(outer, inner);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start, Index Length>
|
||||
struct redux_vec_linear_unroller {
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
|
||||
constexpr Index HalfLength = Length / 2;
|
||||
|
||||
return func.packetOp(
|
||||
redux_vec_linear_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
|
||||
redux_vec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(
|
||||
eval, func));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator, Index Start>
|
||||
struct redux_vec_linear_unroller<Func, Evaluator, Start, 1> {
|
||||
template <typename PacketType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
|
||||
constexpr Index PacketSize = unpacket_traits<PacketType>::size;
|
||||
constexpr Index index = (Start * PacketSize);
|
||||
constexpr int alignment = Evaluator::Alignment;
|
||||
return eval.template packet<alignment, PacketType>(index);
|
||||
}
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
* Part 3 : implementation of all cases
|
||||
***************************************************************************/
|
||||
|
||||
template <typename Func, typename Evaluator, int Traversal = redux_traits<Func, Evaluator>::Traversal,
|
||||
int Unrolling = redux_traits<Func, Evaluator>::Unrolling>
|
||||
struct redux_impl;
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
|
||||
eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
|
||||
Scalar res = eval.coeffByOuterInner(0, 0);
|
||||
for (Index i = 1; i < xpr.innerSize(); ++i) res = func(res, eval.coeffByOuterInner(0, i));
|
||||
for (Index i = 1; i < xpr.outerSize(); ++i)
|
||||
for (Index j = 0; j < xpr.innerSize(); ++j) res = func(res, eval.coeffByOuterInner(i, j));
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, LinearTraversal, NoUnrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
|
||||
eigen_assert(xpr.size() > 0 && "you are using an empty matrix");
|
||||
Scalar res = eval.coeff(0);
|
||||
for (Index k = 1; k < xpr.size(); ++k) res = func(res, eval.coeff(k));
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, DefaultTraversal, CompleteUnrolling>
|
||||
: redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
|
||||
typedef redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
|
||||
const XprType& /*xpr*/) {
|
||||
return Base::run(eval, func);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, LinearTraversal, CompleteUnrolling>
|
||||
: redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
|
||||
typedef redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
|
||||
const XprType& /*xpr*/) {
|
||||
return Base::run(eval, func);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, NoUnrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
|
||||
|
||||
template <typename XprType>
|
||||
static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
|
||||
const Index size = xpr.size();
|
||||
|
||||
constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
|
||||
constexpr int packetAlignment = unpacket_traits<PacketScalar>::alignment;
|
||||
constexpr int alignment0 =
|
||||
(bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar))
|
||||
? int(packetAlignment)
|
||||
: int(Unaligned);
|
||||
constexpr int alignment = plain_enum_max(alignment0, Evaluator::Alignment);
|
||||
const Index alignedStart = internal::first_default_aligned(xpr);
|
||||
const Index alignedSize2 = ((size - alignedStart) / (2 * packetSize)) * (2 * packetSize);
|
||||
const Index alignedSize = ((size - alignedStart) / (packetSize)) * (packetSize);
|
||||
const Index alignedEnd2 = alignedStart + alignedSize2;
|
||||
const Index alignedEnd = alignedStart + alignedSize;
|
||||
Scalar res;
|
||||
if (alignedSize) {
|
||||
PacketScalar packet_res0 = eval.template packet<alignment, PacketScalar>(alignedStart);
|
||||
if (alignedSize > packetSize) // we have at least two packets to partly unroll the loop
|
||||
{
|
||||
PacketScalar packet_res1 = eval.template packet<alignment, PacketScalar>(alignedStart + packetSize);
|
||||
for (Index index = alignedStart + 2 * packetSize; index < alignedEnd2; index += 2 * packetSize) {
|
||||
packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(index));
|
||||
packet_res1 = func.packetOp(packet_res1, eval.template packet<alignment, PacketScalar>(index + packetSize));
|
||||
}
|
||||
|
||||
packet_res0 = func.packetOp(packet_res0, packet_res1);
|
||||
if (alignedEnd > alignedEnd2)
|
||||
packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(alignedEnd2));
|
||||
}
|
||||
res = func.predux(packet_res0);
|
||||
|
||||
for (Index index = 0; index < alignedStart; ++index) res = func(res, eval.coeff(index));
|
||||
|
||||
for (Index index = alignedEnd; index < size; ++index) res = func(res, eval.coeff(index));
|
||||
} else // too small to vectorize anything.
|
||||
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
|
||||
{
|
||||
res = eval.coeff(0);
|
||||
for (Index index = 1; index < size; ++index) res = func(res, eval.coeff(index));
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
|
||||
template <typename Func, typename Evaluator, int Unrolling>
|
||||
struct redux_impl<Func, Evaluator, SliceVectorizedTraversal, Unrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
|
||||
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
|
||||
eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
|
||||
constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
|
||||
const Index innerSize = xpr.innerSize();
|
||||
const Index outerSize = xpr.outerSize();
|
||||
const Index packetedInnerSize = ((innerSize) / packetSize) * packetSize;
|
||||
Scalar res;
|
||||
if (packetedInnerSize) {
|
||||
PacketType packet_res = eval.template packet<Unaligned, PacketType>(0, 0);
|
||||
for (Index j = 0; j < outerSize; ++j)
|
||||
for (Index i = (j == 0 ? packetSize : 0); i < packetedInnerSize; i += Index(packetSize))
|
||||
packet_res = func.packetOp(packet_res, eval.template packetByOuterInner<Unaligned, PacketType>(j, i));
|
||||
|
||||
res = func.predux(packet_res);
|
||||
for (Index j = 0; j < outerSize; ++j)
|
||||
for (Index i = packetedInnerSize; i < innerSize; ++i) res = func(res, eval.coeffByOuterInner(j, i));
|
||||
} else // too small to vectorize anything.
|
||||
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
|
||||
{
|
||||
res = redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>::run(eval, func, xpr);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Func, typename Evaluator>
|
||||
struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, CompleteUnrolling> {
|
||||
typedef typename Evaluator::Scalar Scalar;
|
||||
|
||||
typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
|
||||
static constexpr Index PacketSize = redux_traits<Func, Evaluator>::PacketSize;
|
||||
static constexpr Index Size = Evaluator::SizeAtCompileTime;
|
||||
static constexpr Index VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize);
|
||||
|
||||
template <typename XprType>
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(xpr)
|
||||
eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
|
||||
if (VectorizedSize > 0) {
|
||||
Scalar res = func.predux(
|
||||
redux_vec_linear_unroller<Func, Evaluator, 0, Size / PacketSize>::template run<PacketType>(eval, func));
|
||||
if (VectorizedSize != Size)
|
||||
res = func(
|
||||
res, redux_novec_linear_unroller<Func, Evaluator, VectorizedSize, Size - VectorizedSize>::run(eval, func));
|
||||
return res;
|
||||
} else {
|
||||
return redux_novec_linear_unroller<Func, Evaluator, 0, Size>::run(eval, func);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// evaluator adaptor
|
||||
template <typename XprType_>
|
||||
class redux_evaluator : public internal::evaluator<XprType_> {
|
||||
typedef internal::evaluator<XprType_> Base;
|
||||
|
||||
public:
|
||||
typedef XprType_ XprType;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit redux_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename XprType::PacketScalar PacketScalar;
|
||||
|
||||
enum {
|
||||
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
|
||||
// TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime
|
||||
// from the evaluator
|
||||
Flags = Base::Flags & ~DirectAccessBit,
|
||||
IsRowMajor = XprType::IsRowMajor,
|
||||
SizeAtCompileTime = XprType::SizeAtCompileTime,
|
||||
InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const {
|
||||
return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetByOuterInner(Index outer, Index inner) const {
|
||||
return Base::template packet<LoadMode, PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetSegmentByOuterInner(Index outer, Index inner, Index begin,
|
||||
Index count) const {
|
||||
return Base::template packetSegment<LoadMode, PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer,
|
||||
begin, count);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/***************************************************************************
|
||||
* Part 4 : public API
|
||||
***************************************************************************/
|
||||
|
||||
/** \returns the result of a full redux operation on the whole matrix or vector using \a func
|
||||
*
|
||||
* The template parameter \a BinaryOp is the type of the functor \a func which must be
|
||||
* an associative operator. Both current C++98 and C++11 functor styles are handled.
|
||||
*
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*
|
||||
* \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <typename Func>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::redux(
|
||||
const Func& func) const {
|
||||
eigen_assert(this->rows() > 0 && this->cols() > 0 && "you are using an empty matrix");
|
||||
|
||||
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
|
||||
ThisEvaluator thisEval(derived());
|
||||
|
||||
// The initial expression is passed to the reducer as an additional argument instead of
|
||||
// passing it as a member of redux_evaluator to help
|
||||
return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func, derived());
|
||||
}
|
||||
|
||||
/** \returns the minimum of all coefficients of \c *this.
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff() const {
|
||||
return derived().redux(Eigen::internal::scalar_min_op<Scalar, Scalar, NaNPropagation>());
|
||||
}
|
||||
|
||||
/** \returns the maximum of all coefficients of \c *this.
|
||||
* In case \c *this contains NaN, NaNPropagation determines the behavior:
|
||||
* NaNPropagation == PropagateFast : undefined
|
||||
* NaNPropagation == PropagateNaN : result is NaN
|
||||
* NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
|
||||
* \warning the matrix must be not empty, otherwise an assertion is triggered.
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int NaNPropagation>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff() const {
|
||||
return derived().redux(Eigen::internal::scalar_max_op<Scalar, Scalar, NaNPropagation>());
|
||||
}
|
||||
|
||||
/** \returns the sum of all coefficients of \c *this
|
||||
*
|
||||
* If \c *this is empty, then the value 0 is returned.
|
||||
*
|
||||
* \sa trace(), prod(), mean()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::sum() const {
|
||||
if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(0);
|
||||
return derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>());
|
||||
}
|
||||
|
||||
/** \returns the mean of all coefficients of *this
|
||||
*
|
||||
* \sa trace(), prod(), sum()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::mean() const {
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning push
|
||||
#pragma warning(disable : 2259)
|
||||
#endif
|
||||
return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>())) / Scalar(this->size());
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning pop
|
||||
#endif
|
||||
}
|
||||
|
||||
/** \returns the product of all coefficients of *this
|
||||
*
|
||||
* Example: \include MatrixBase_prod.cpp
|
||||
* Output: \verbinclude MatrixBase_prod.out
|
||||
*
|
||||
* \sa sum(), mean(), trace()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::prod() const {
|
||||
if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(1);
|
||||
return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
|
||||
*
|
||||
* \c *this can be any matrix, not necessarily square.
|
||||
*
|
||||
* \sa diagonal(), sum()
|
||||
*/
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar MatrixBase<Derived>::trace() const {
|
||||
return derived().diagonal().sum();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_REDUX_H
|
||||
383
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Ref.h
Normal file
383
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Ref.h
Normal file
@@ -0,0 +1,383 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_REF_H
|
||||
#define EIGEN_REF_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename PlainObjectType_, int Options_, typename StrideType_>
|
||||
struct traits<Ref<PlainObjectType_, Options_, StrideType_> >
|
||||
: public traits<Map<PlainObjectType_, Options_, StrideType_> > {
|
||||
typedef PlainObjectType_ PlainObjectType;
|
||||
typedef StrideType_ StrideType;
|
||||
enum {
|
||||
Options = Options_,
|
||||
Flags = traits<Map<PlainObjectType_, Options_, StrideType_> >::Flags | NestByRefBit,
|
||||
Alignment = traits<Map<PlainObjectType_, Options_, StrideType_> >::Alignment,
|
||||
InnerStrideAtCompileTime = traits<Map<PlainObjectType_, Options_, StrideType_> >::InnerStrideAtCompileTime,
|
||||
OuterStrideAtCompileTime = traits<Map<PlainObjectType_, Options_, StrideType_> >::OuterStrideAtCompileTime
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct match {
|
||||
enum {
|
||||
IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime,
|
||||
HasDirectAccess = internal::has_direct_access<Derived>::ret,
|
||||
StorageOrderMatch =
|
||||
IsVectorAtCompileTime || ((PlainObjectType::Flags & RowMajorBit) == (Derived::Flags & RowMajorBit)),
|
||||
InnerStrideMatch = int(InnerStrideAtCompileTime) == int(Dynamic) ||
|
||||
int(InnerStrideAtCompileTime) == int(Derived::InnerStrideAtCompileTime) ||
|
||||
(int(InnerStrideAtCompileTime) == 0 && int(Derived::InnerStrideAtCompileTime) == 1),
|
||||
OuterStrideMatch = IsVectorAtCompileTime || int(OuterStrideAtCompileTime) == int(Dynamic) ||
|
||||
int(OuterStrideAtCompileTime) == int(Derived::OuterStrideAtCompileTime),
|
||||
// NOTE, this indirection of evaluator<Derived>::Alignment is needed
|
||||
// to workaround a very strange bug in MSVC related to the instantiation
|
||||
// of has_*ary_operator in evaluator<CwiseNullaryOp>.
|
||||
// This line is surprisingly very sensitive. For instance, simply adding parenthesis
|
||||
// as "DerivedAlignment = (int(evaluator<Derived>::Alignment))," will make MSVC fail...
|
||||
DerivedAlignment = int(evaluator<Derived>::Alignment),
|
||||
AlignmentMatch = (int(traits<PlainObjectType>::Alignment) == int(Unaligned)) ||
|
||||
(DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should
|
||||
// be replaced by the required alignment
|
||||
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
|
||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch &&
|
||||
AlignmentMatch && ScalarTypeMatch
|
||||
};
|
||||
typedef std::conditional_t<MatchAtCompileTime, internal::true_type, internal::false_type> type;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
struct traits<RefBase<Derived> > : public traits<Derived> {};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename Derived>
|
||||
class RefBase : public MapBase<Derived> {
|
||||
typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
|
||||
typedef typename internal::traits<Derived>::StrideType StrideType;
|
||||
|
||||
public:
|
||||
typedef MapBase<Derived> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index innerStride() const {
|
||||
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index outerStride() const {
|
||||
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
|
||||
: IsVectorAtCompileTime ? this->size()
|
||||
: int(Flags) & RowMajorBit ? this->cols()
|
||||
: this->rows();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC RefBase()
|
||||
: Base(0, RowsAtCompileTime == Dynamic ? 0 : RowsAtCompileTime,
|
||||
ColsAtCompileTime == Dynamic ? 0 : ColsAtCompileTime),
|
||||
// Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
|
||||
m_stride(StrideType::OuterStrideAtCompileTime == Dynamic ? 0 : StrideType::OuterStrideAtCompileTime,
|
||||
StrideType::InnerStrideAtCompileTime == Dynamic ? 0 : StrideType::InnerStrideAtCompileTime) {}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
|
||||
|
||||
protected:
|
||||
typedef Stride<StrideType::OuterStrideAtCompileTime, StrideType::InnerStrideAtCompileTime> StrideBase;
|
||||
|
||||
// Resolves inner stride if default 0.
|
||||
static EIGEN_DEVICE_FUNC constexpr Index resolveInnerStride(Index inner) { return inner == 0 ? 1 : inner; }
|
||||
|
||||
// Resolves outer stride if default 0.
|
||||
static EIGEN_DEVICE_FUNC constexpr Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols,
|
||||
bool isVectorAtCompileTime, bool isRowMajor) {
|
||||
return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer;
|
||||
}
|
||||
|
||||
// Returns true if construction is valid, false if there is a stride mismatch,
|
||||
// and fails if there is a size mismatch.
|
||||
template <typename Expression>
|
||||
EIGEN_DEVICE_FUNC bool construct(Expression& expr) {
|
||||
// Check matrix sizes. If this is a compile-time vector, we do allow
|
||||
// implicitly transposing.
|
||||
EIGEN_STATIC_ASSERT(EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression)
|
||||
// If it is a vector, the transpose sizes might match.
|
||||
|| (PlainObjectType::IsVectorAtCompileTime &&
|
||||
((int(PlainObjectType::RowsAtCompileTime) == Eigen::Dynamic ||
|
||||
int(Expression::ColsAtCompileTime) == Eigen::Dynamic ||
|
||||
int(PlainObjectType::RowsAtCompileTime) == int(Expression::ColsAtCompileTime)) &&
|
||||
(int(PlainObjectType::ColsAtCompileTime) == Eigen::Dynamic ||
|
||||
int(Expression::RowsAtCompileTime) == Eigen::Dynamic ||
|
||||
int(PlainObjectType::ColsAtCompileTime) == int(Expression::RowsAtCompileTime)))),
|
||||
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
|
||||
|
||||
// Determine runtime rows and columns.
|
||||
Index rows = expr.rows();
|
||||
Index cols = expr.cols();
|
||||
if (PlainObjectType::RowsAtCompileTime == 1) {
|
||||
eigen_assert(expr.rows() == 1 || expr.cols() == 1);
|
||||
rows = 1;
|
||||
cols = expr.size();
|
||||
} else if (PlainObjectType::ColsAtCompileTime == 1) {
|
||||
eigen_assert(expr.rows() == 1 || expr.cols() == 1);
|
||||
rows = expr.size();
|
||||
cols = 1;
|
||||
}
|
||||
// Verify that the sizes are valid.
|
||||
eigen_assert((PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows));
|
||||
eigen_assert((PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols));
|
||||
|
||||
// If this is a vector, we might be transposing, which means that stride should swap.
|
||||
const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows());
|
||||
// If the storage format differs, we also need to swap the stride.
|
||||
const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0;
|
||||
const bool expr_row_major = (Expression::Flags & RowMajorBit) != 0;
|
||||
const bool storage_differs = (row_major != expr_row_major);
|
||||
|
||||
const bool swap_stride = (transpose != storage_differs);
|
||||
|
||||
// Determine expr's actual strides, resolving any defaults if zero.
|
||||
const Index expr_inner_actual = resolveInnerStride(expr.innerStride());
|
||||
const Index expr_outer_actual = resolveOuterStride(expr_inner_actual, expr.outerStride(), expr.rows(), expr.cols(),
|
||||
Expression::IsVectorAtCompileTime != 0, expr_row_major);
|
||||
|
||||
// If this is a column-major row vector or row-major column vector, the inner-stride
|
||||
// is arbitrary, so set it to either the compile-time inner stride or 1.
|
||||
const bool row_vector = (rows == 1);
|
||||
const bool col_vector = (cols == 1);
|
||||
const Index inner_stride =
|
||||
((!row_major && row_vector) || (row_major && col_vector))
|
||||
? (StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1)
|
||||
: swap_stride ? expr_outer_actual
|
||||
: expr_inner_actual;
|
||||
|
||||
// If this is a column-major column vector or row-major row vector, the outer-stride
|
||||
// is arbitrary, so set it to either the compile-time outer stride or vector size.
|
||||
const Index outer_stride =
|
||||
((!row_major && col_vector) || (row_major && row_vector))
|
||||
? (StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime)
|
||||
: rows * cols * inner_stride)
|
||||
: swap_stride ? expr_inner_actual
|
||||
: expr_outer_actual;
|
||||
|
||||
// Check if given inner/outer strides are compatible with compile-time strides.
|
||||
const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic) ||
|
||||
(resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride);
|
||||
if (!inner_valid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const bool outer_valid =
|
||||
(StrideType::OuterStrideAtCompileTime == Dynamic) ||
|
||||
(resolveOuterStride(inner_stride, Index(StrideType::OuterStrideAtCompileTime), rows, cols,
|
||||
PlainObjectType::IsVectorAtCompileTime != 0, row_major) == outer_stride);
|
||||
if (!outer_valid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
internal::construct_at<Base>(this, expr.data(), rows, cols);
|
||||
internal::construct_at(&m_stride, (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride,
|
||||
(StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride);
|
||||
return true;
|
||||
}
|
||||
|
||||
StrideBase m_stride;
|
||||
};
|
||||
|
||||
/** \class Ref
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief A matrix or vector expression mapping an existing expression
|
||||
*
|
||||
* \tparam PlainObjectType the equivalent matrix type of the mapped data
|
||||
* \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32,
|
||||
* \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies
|
||||
* strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), but accepts a
|
||||
* variable outer stride (leading dimension). This can be overridden by specifying strides. The type passed here must be
|
||||
* a specialization of the Stride template, see examples below.
|
||||
*
|
||||
* This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the
|
||||
* number of copies. A Ref<> object can represent either a const expression or a l-value: \code
|
||||
* // in-out argument:
|
||||
* void foo1(Ref<VectorXf> x);
|
||||
*
|
||||
* // read-only const argument:
|
||||
* void foo2(const Ref<const VectorXf>& x);
|
||||
* \endcode
|
||||
*
|
||||
* In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation
|
||||
* issue will be triggered. By default, a Ref<VectorXf> can reference any dense vector expression of float having a
|
||||
* contiguous memory layout. Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float
|
||||
* whose column's elements are contiguously stored with the possibility to have a constant space in-between each column,
|
||||
* i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) can be greater than the number
|
||||
* of rows.
|
||||
*
|
||||
* In the const case, if the input expression does not match the above requirement, then it is evaluated into a
|
||||
* temporary before being passed to the function. Here are some examples: \code MatrixXf A; VectorXf a; foo1(a.head());
|
||||
* // OK foo1(A.col()); // OK foo1(A.row()); // Compilation error because here innerstride!=1
|
||||
* foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object
|
||||
* foo2(A.row().transpose()); // The row is copied into a contiguous temporary
|
||||
* foo2(2*a); // The expression is evaluated into a temporary
|
||||
* foo2(A.col().segment(2,4)); // No temporary
|
||||
* \endcode
|
||||
*
|
||||
* The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.
|
||||
* Here is an example accepting an innerstride!=1:
|
||||
* \code
|
||||
* // in-out argument:
|
||||
* void foo3(Ref<VectorXf,0,InnerStride<> > x);
|
||||
* foo3(A.row()); // OK
|
||||
* \endcode
|
||||
* The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to
|
||||
* exploit vectorization, and will involve more expensive address computations even if the input is contiguously stored
|
||||
* in memory. To overcome this issue, one might propose to overload internally calling a template function, e.g.: \code
|
||||
* // in the .h:
|
||||
* void foo(const Ref<MatrixXf>& A);
|
||||
* void foo(const Ref<MatrixXf,0,Stride<> >& A);
|
||||
*
|
||||
* // in the .cpp:
|
||||
* template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
|
||||
* ... // crazy code goes here
|
||||
* }
|
||||
* void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
|
||||
* void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
|
||||
* \endcode
|
||||
*
|
||||
* See also the following stackoverflow questions for further references:
|
||||
* - <a href="http://stackoverflow.com/questions/21132538/correct-usage-of-the-eigenref-class">Correct usage of the
|
||||
* Eigen::Ref<> class</a>
|
||||
*
|
||||
* \sa PlainObjectBase::Map(), \ref TopicStorageOrders
|
||||
*/
|
||||
template <typename PlainObjectType, int Options, typename StrideType>
|
||||
class Ref : public RefBase<Ref<PlainObjectType, Options, StrideType> > {
|
||||
private:
|
||||
typedef internal::traits<Ref> Traits;
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Ref(
|
||||
const PlainObjectBase<Derived>& expr,
|
||||
std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0);
|
||||
|
||||
public:
|
||||
typedef RefBase<Ref> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Ref(
|
||||
PlainObjectBase<Derived>& expr,
|
||||
std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0) {
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
// Construction must pass since we will not create temporary storage in the non-const case.
|
||||
const bool success = Base::construct(expr.derived());
|
||||
EIGEN_UNUSED_VARIABLE(success)
|
||||
eigen_assert(success);
|
||||
}
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Ref(
|
||||
const DenseBase<Derived>& expr,
|
||||
std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0)
|
||||
#else
|
||||
/** Implicit constructor from any dense expression */
|
||||
template <typename Derived>
|
||||
inline Ref(DenseBase<Derived>& expr)
|
||||
#endif
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
// Construction must pass since we will not create temporary storage in the non-const case.
|
||||
const bool success = Base::construct(expr.const_cast_derived());
|
||||
EIGEN_UNUSED_VARIABLE(success)
|
||||
eigen_assert(success);
|
||||
}
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
|
||||
};
|
||||
|
||||
// this is the const ref version
|
||||
template <typename TPlainObjectType, int Options, typename StrideType>
|
||||
class Ref<const TPlainObjectType, Options, StrideType>
|
||||
: public RefBase<Ref<const TPlainObjectType, Options, StrideType> > {
|
||||
typedef internal::traits<Ref> Traits;
|
||||
|
||||
static constexpr bool may_map_m_object_successfully =
|
||||
(static_cast<int>(StrideType::InnerStrideAtCompileTime) == 0 ||
|
||||
static_cast<int>(StrideType::InnerStrideAtCompileTime) == 1 ||
|
||||
static_cast<int>(StrideType::InnerStrideAtCompileTime) == Dynamic) &&
|
||||
(TPlainObjectType::IsVectorAtCompileTime || static_cast<int>(StrideType::OuterStrideAtCompileTime) == 0 ||
|
||||
static_cast<int>(StrideType::OuterStrideAtCompileTime) == Dynamic ||
|
||||
static_cast<int>(StrideType::OuterStrideAtCompileTime) ==
|
||||
static_cast<int>(TPlainObjectType::InnerSizeAtCompileTime) ||
|
||||
static_cast<int>(TPlainObjectType::InnerSizeAtCompileTime) == Dynamic);
|
||||
|
||||
public:
|
||||
typedef RefBase<Ref> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
template <typename Derived>
|
||||
EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,
|
||||
std::enable_if_t<bool(Traits::template match<Derived>::ScalarTypeMatch), Derived>* = 0) {
|
||||
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << ","
|
||||
// << match_helper<Derived>::InnerStrideMatch << "\n"; std::cout << int(StrideType::OuterStrideAtCompileTime)
|
||||
// << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; std::cout <<
|
||||
// int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
|
||||
EIGEN_STATIC_ASSERT(Traits::template match<Derived>::type::value || may_map_m_object_successfully,
|
||||
STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) {
|
||||
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Ref(Ref&& other) {
|
||||
if (other.data() == other.m_object.data()) {
|
||||
m_object = std::move(other.m_object);
|
||||
Base::construct(m_object);
|
||||
} else
|
||||
Base::construct(other);
|
||||
}
|
||||
|
||||
template <typename OtherRef>
|
||||
EIGEN_DEVICE_FUNC inline Ref(const RefBase<OtherRef>& other) {
|
||||
EIGEN_STATIC_ASSERT(Traits::template match<OtherRef>::type::value || may_map_m_object_successfully,
|
||||
STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||
}
|
||||
|
||||
protected:
|
||||
template <typename Expression>
|
||||
EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::true_type) {
|
||||
// Check if we can use the underlying expr's storage directly, otherwise call the copy version.
|
||||
if (!Base::construct(expr)) {
|
||||
construct(expr, internal::false_type());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Expression>
|
||||
EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) {
|
||||
internal::call_assignment_no_alias(m_object, expr, internal::assign_op<Scalar, Scalar>());
|
||||
const bool success = Base::construct(m_object);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(success)
|
||||
eigen_assert(success);
|
||||
}
|
||||
|
||||
protected:
|
||||
TPlainObjectType m_object;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_REF_H
|
||||
130
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Replicate.h
Normal file
130
2025.09.22_cpp_with_eigen_package/Eigen/src/Core/Replicate.h
Normal file
@@ -0,0 +1,130 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_REPLICATE_H
|
||||
#define EIGEN_REPLICATE_H
|
||||
|
||||
// IWYU pragma: private
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template <typename MatrixType, int RowFactor, int ColFactor>
|
||||
struct traits<Replicate<MatrixType, RowFactor, ColFactor> > : traits<MatrixType> {
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename traits<MatrixType>::XprKind XprKind;
|
||||
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
|
||||
typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
|
||||
enum {
|
||||
RowsAtCompileTime = RowFactor == Dynamic || int(MatrixType::RowsAtCompileTime) == Dynamic
|
||||
? Dynamic
|
||||
: RowFactor * MatrixType::RowsAtCompileTime,
|
||||
ColsAtCompileTime = ColFactor == Dynamic || int(MatrixType::ColsAtCompileTime) == Dynamic
|
||||
? Dynamic
|
||||
: ColFactor * MatrixType::ColsAtCompileTime,
|
||||
// FIXME we don't propagate the max sizes !!!
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
IsRowMajor = MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1 ? 1
|
||||
: MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1 ? 0
|
||||
: (MatrixType::Flags & RowMajorBit) ? 1
|
||||
: 0,
|
||||
|
||||
// FIXME enable DirectAccess with negative strides?
|
||||
Flags = IsRowMajor ? RowMajorBit : 0
|
||||
};
|
||||
};
|
||||
} // namespace internal
|
||||
|
||||
/**
|
||||
* \class Replicate
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Expression of the multiple replication of a matrix or vector
|
||||
*
|
||||
* \tparam MatrixType the type of the object we are replicating
|
||||
* \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.
|
||||
* \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.
|
||||
*
|
||||
* This class represents an expression of the multiple replication of a matrix or vector.
|
||||
* It is the return type of DenseBase::replicate() and most of the time
|
||||
* this is the only way it is used.
|
||||
*
|
||||
* \sa DenseBase::replicate()
|
||||
*/
|
||||
template <typename MatrixType, int RowFactor, int ColFactor>
|
||||
class Replicate : public internal::dense_xpr_base<Replicate<MatrixType, RowFactor, ColFactor> >::type {
|
||||
typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
|
||||
typedef typename internal::traits<Replicate>::MatrixTypeNested_ MatrixTypeNested_;
|
||||
|
||||
public:
|
||||
typedef typename internal::dense_xpr_base<Replicate>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
|
||||
typedef internal::remove_all_t<MatrixType> NestedExpression;
|
||||
|
||||
template <typename OriginalMatrixType>
|
||||
EIGEN_DEVICE_FUNC inline explicit Replicate(const OriginalMatrixType& matrix)
|
||||
: m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<std::remove_const_t<MatrixType>, OriginalMatrixType>::value),
|
||||
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
|
||||
eigen_assert(RowFactor != Dynamic && ColFactor != Dynamic);
|
||||
}
|
||||
|
||||
template <typename OriginalMatrixType>
|
||||
EIGEN_DEVICE_FUNC inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
|
||||
: m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) {
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<std::remove_const_t<MatrixType>, OriginalMatrixType>::value),
|
||||
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC constexpr Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
|
||||
EIGEN_DEVICE_FUNC constexpr Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC const MatrixTypeNested_& nestedExpression() const { return m_matrix; }
|
||||
|
||||
protected:
|
||||
MatrixTypeNested m_matrix;
|
||||
const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
|
||||
const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
|
||||
};
|
||||
|
||||
/**
|
||||
* \return an expression of the replication of \c *this
|
||||
*
|
||||
* Example: \include MatrixBase_replicate.cpp
|
||||
* Output: \verbinclude MatrixBase_replicate.out
|
||||
*
|
||||
* \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
|
||||
*/
|
||||
template <typename Derived>
|
||||
template <int RowFactor, int ColFactor>
|
||||
EIGEN_DEVICE_FUNC const Replicate<Derived, RowFactor, ColFactor> DenseBase<Derived>::replicate() const {
|
||||
return Replicate<Derived, RowFactor, ColFactor>(derived());
|
||||
}
|
||||
|
||||
/**
|
||||
* \return an expression of the replication of each column (or row) of \c *this
|
||||
*
|
||||
* Example: \include DirectionWise_replicate_int.cpp
|
||||
* Output: \verbinclude DirectionWise_replicate_int.out
|
||||
*
|
||||
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
|
||||
*/
|
||||
template <typename ExpressionType, int Direction>
|
||||
EIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType, Direction>::ReplicateReturnType
|
||||
VectorwiseOp<ExpressionType, Direction>::replicate(Index factor) const {
|
||||
return typename VectorwiseOp<ExpressionType, Direction>::ReplicateReturnType(
|
||||
_expression(), Direction == Vertical ? factor : 1, Direction == Horizontal ? factor : 1);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_REPLICATE_H
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user