Merge remote-tracking branch 'origin2/master'
diff --git a/Eigen/Core b/Eigen/Core
index 5f46dde..94fd6ec 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -277,6 +277,15 @@
 #include "src/Core/arch/SVE/PacketMath.h"
 #include "src/Core/arch/SVE/TypeCasting.h"
 #include "src/Core/arch/SVE/MathFunctions.h"
+#elif defined EIGEN_VECTORIZE_RVV10
+#include "src/Core/arch/RVV10/PacketMath.h"
+#include "src/Core/arch/RVV10/PacketMath4.h"
+#include "src/Core/arch/RVV10/PacketMath2.h"
+#include "src/Core/arch/RVV10/TypeCasting.h"
+#include "src/Core/arch/RVV10/MathFunctions.h"
+#if defined EIGEN_VECTORIZE_RVV10FP16
+#include "src/Core/arch/RVV10/PacketMathFP16.h"
+#endif
 #elif defined EIGEN_VECTORIZE_ZVECTOR
 #include "src/Core/arch/ZVector/PacketMath.h"
 #include "src/Core/arch/ZVector/MathFunctions.h"
diff --git a/Eigen/src/Core/arch/RVV10/MathFunctions.h b/Eigen/src/Core/arch/RVV10/MathFunctions.h
new file mode 100644
index 0000000..10a70c4
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/MathFunctions.h
@@ -0,0 +1,30 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2024 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATH_FUNCTIONS_RVV10_H
+#define EIGEN_MATH_FUNCTIONS_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_FLOAT(Packet1Xf)
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_FLOAT(Packet2Xf)
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_FLOAT(Packet4Xf)
+
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_DOUBLE(Packet1Xd)
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_DOUBLE(Packet2Xd)
+EIGEN_INSTANTIATE_GENERIC_MATH_FUNCS_DOUBLE(Packet4Xd)
+
+}  // end namespace internal
+}  // end namespace Eigen
+
+#endif  // EIGEN_MATH_FUNCTIONS_RVV10_H
diff --git a/Eigen/src/Core/arch/RVV10/PacketMath.h b/Eigen/src/Core/arch/RVV10/PacketMath.h
new file mode 100644
index 0000000..54db626
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/PacketMath.h
@@ -0,0 +1,2395 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2024 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_RVV10_H
+#define EIGEN_PACKET_MATH_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
+#endif
+
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
+
+template <typename Scalar, std::size_t VectorLength, std::size_t VectorLMul>
+struct rvv_packet_size_selector {
+  enum { size = VectorLength * VectorLMul / (sizeof(Scalar) * CHAR_BIT) };
+};
+
+template <std::size_t VectorLength, std::size_t VectorLMul>
+struct rvv_packet_alignment_selector {
+  enum {
+    alignment =
+        (VectorLength * VectorLMul) >= 1024
+            ? Aligned128
+            : ((VectorLength * VectorLMul) >= 512 ? Aligned64
+                                                  : ((VectorLength * VectorLMul) >= 256 ? Aligned32 : Aligned16))
+  };
+};
+
+typedef vbool64_t PacketMask64;
+typedef vbool32_t PacketMask32;
+typedef vbool16_t PacketMask16;
+typedef vbool8_t PacketMask8;
+typedef vbool4_t PacketMask4;
+
+/********************************* int32 **************************************/
+typedef eigen_packet_wrapper<vint32m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 0> Packet1Xi;
+typedef eigen_packet_wrapper<vuint32m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 1> Packet1Xu;
+
+typedef eigen_packet_wrapper<vint32m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 2>
+    Packet2Xi;
+typedef eigen_packet_wrapper<vuint32m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 3>
+    Packet2Xu;
+
+typedef eigen_packet_wrapper<vint32m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 4>
+    Packet4Xi;
+typedef eigen_packet_wrapper<vuint32m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 5>
+    Packet4Xu;
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xi PacketXi;
+typedef Packet1Xu PacketXu;
+
+template <>
+struct packet_traits<numext::int32_t> : default_packet_traits {
+  typedef Packet1Xi type;
+  typedef Packet1Xi half;  // Half not implemented yet
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 2
+typedef Packet2Xi PacketXi;
+typedef Packet2Xu PacketXu;
+
+template <>
+struct packet_traits<numext::int32_t> : default_packet_traits {
+  typedef Packet2Xi type;
+  typedef Packet1Xi half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 4
+typedef Packet4Xi PacketXi;
+typedef Packet4Xu PacketXu;
+
+template <>
+struct packet_traits<numext::int32_t> : default_packet_traits {
+  typedef Packet4Xi type;
+  typedef Packet2Xi half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xi> {
+  typedef numext::int32_t type;
+  typedef Packet1Xi half;  // Half not yet implemented
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xi> {
+  typedef numext::int32_t type;
+  typedef Packet1Xi half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet4Xi> {
+  typedef numext::int32_t type;
+  typedef Packet2Xi half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int32_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 4>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<numext::int32_t>(const numext::int32_t* addr) {
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
+  __builtin_prefetch(addr);
+#endif
+}
+
+/********************************* Packet1Xi ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pset1<Packet1Xi>(const numext::int32_t& from) {
+  return __riscv_vmv_v_x_i32m1(from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi plset<Packet1Xi>(const numext::int32_t& a) {
+  Packet1Xi idx = __riscv_vreinterpret_v_u32m1_i32m1(__riscv_vid_v_u32m1(unpacket_traits<Packet1Xi>::size));
+  return __riscv_vadd_vx_i32m1(idx, a, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pzero<Packet1Xi>(const Packet1Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m1(0, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi padd<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vadd_vv_i32m1(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi psub<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pnegate(const Packet1Xi& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pconj(const Packet1Xi& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pmul<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pdiv<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pmadd(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pmsub(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pnmadd(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c) {
+  return __riscv_vnmsub_vv_i32m1(a, b, c, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pnmsub(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c) {
+  return __riscv_vnmsub_vv_i32m1(a, b, pnegate(c), unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pmin<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pmax<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pcmp_le<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  PacketMask32 mask = __riscv_vmsle_vv_i32m1_b32(a, b, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vmerge_vxm_i32m1(pzero(a), 0xffffffff, mask, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pcmp_lt<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  PacketMask32 mask = __riscv_vmslt_vv_i32m1_b32(a, b, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vmerge_vxm_i32m1(pzero(a), 0xffffffff, mask, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pcmp_eq<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  PacketMask32 mask = __riscv_vmseq_vv_i32m1_b32(a, b, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vmerge_vxm_i32m1(pzero(a), 0xffffffff, mask, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi ptrue<Packet1Xi>(const Packet1Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m1(0xffffffffu, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pand<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vand_vv_i32m1(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi por<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vor_vv_i32m1(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pxor<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vxor_vv_i32m1(a, b, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pandnot<Packet1Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vand_vv_i32m1(a, __riscv_vnot_v_i32m1(b, unpacket_traits<Packet1Xi>::size),
+                               unpacket_traits<Packet1Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xi parithmetic_shift_right(Packet1Xi a) {
+  return __riscv_vsra_vx_i32m1(a, N, unpacket_traits<Packet1Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xi plogical_shift_right(Packet1Xi a) {
+  return __riscv_vreinterpret_i32m1(
+      __riscv_vsrl_vx_u32m1(__riscv_vreinterpret_u32m1(a), N, unpacket_traits<Packet1Xi>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xi plogical_shift_left(Packet1Xi a) {
+  return __riscv_vsll_vx_i32m1(a, N, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pload<Packet1Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_i32m1(from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi ploadu<Packet1Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_i32m1(from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi ploaddup<Packet1Xi>(const numext::int32_t* from) {
+  Packet1Xu idx = __riscv_vid_v_u32m1(unpacket_traits<Packet1Xi>::size);
+  idx = __riscv_vsll_vx_u32m1(__riscv_vand_vx_u32m1(idx, 0xfffffffeu, unpacket_traits<Packet1Xi>::size), 1,
+                              unpacket_traits<Packet1Xi>::size);
+  // idx = 0 0 sizeof(int32_t) sizeof(int32_t) 2*sizeof(int32_t) 2*sizeof(int32_t) ...
+  return __riscv_vloxei32_v_i32m1(from, idx, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi ploadquad<Packet1Xi>(const numext::int32_t* from) {
+  Packet1Xu idx = __riscv_vid_v_u32m1(unpacket_traits<Packet1Xi>::size);
+  idx = __riscv_vand_vx_u32m1(idx, 0xfffffffcu, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vloxei32_v_i32m1(from, idx, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const Packet1Xi& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_i32m1(to, from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const Packet1Xi& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_i32m1(to, from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1Xi pgather<numext::int32_t, Packet1Xi>(const numext::int32_t* from, Index stride) {
+  return __riscv_vlse32_v_i32m1(from, stride * sizeof(numext::int32_t), unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, Packet1Xi>(numext::int32_t* to, const Packet1Xi& from,
+                                                                  Index stride) {
+  __riscv_vsse32(to, stride * sizeof(numext::int32_t), from, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t pfirst<Packet1Xi>(const Packet1Xi& a) {
+  return __riscv_vmv_x_s_i32m1_i32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi preverse(const Packet1Xi& a) {
+  Packet1Xu idx = __riscv_vrsub_vx_u32m1(__riscv_vid_v_u32m1(unpacket_traits<Packet1Xi>::size),
+                                        unpacket_traits<Packet1Xi>::size - 1, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vrgather_vv_i32m1(a, idx, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pabs(const Packet1Xi& a) {
+  Packet1Xi mask = __riscv_vsra_vx_i32m1(a, 31, unpacket_traits<Packet1Xi>::size);
+  return __riscv_vsub_vv_i32m1(__riscv_vxor_vv_i32m1(a, mask, unpacket_traits<Packet1Xi>::size), mask,
+                               unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux<Packet1Xi>(const Packet1Xi& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i32m1_i32m1(a, __riscv_vmv_v_x_i32m1(0, unpacket_traits<Packet1Xi>::size),
+                                                      unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_mul<Packet1Xi>(const Packet1Xi& a) {
+  // Multiply the vector by its reverse
+  Packet1Xi prod = __riscv_vmul_vv_i32m1(preverse(a), a, unpacket_traits<Packet1Xi>::size);
+  Packet1Xi half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_i32m1(prod, 8, unpacket_traits<Packet1Xi>::size);
+    prod = __riscv_vmul_vv_i32m1(prod, half_prod, unpacket_traits<Packet1Xi>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_i32m1(prod, 4, unpacket_traits<Packet1Xi>::size);
+    prod = __riscv_vmul_vv_i32m1(prod, half_prod, unpacket_traits<Packet1Xi>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_i32m1(prod, 2, unpacket_traits<Packet1Xi>::size);
+    prod = __riscv_vmul_vv_i32m1(prod, half_prod, unpacket_traits<Packet1Xi>::size);
+  }
+  // Last reduction
+  half_prod = __riscv_vslidedown_vx_i32m1(prod, 1, unpacket_traits<Packet1Xi>::size);
+  prod = __riscv_vmul_vv_i32m1(prod, half_prod, unpacket_traits<Packet1Xi>::size);
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_min<Packet1Xi>(const Packet1Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i32m1_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::max)(), unpacket_traits<Packet1Xi>::size),
+      unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_max<Packet1Xi>(const Packet1Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i32m1_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::min)(), unpacket_traits<Packet1Xi>::size),
+      unpacket_traits<Packet1Xi>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1Xi, N>& kernel) {
+  numext::int32_t buffer[unpacket_traits<Packet1Xi>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(numext::int32_t), kernel.packet[i], unpacket_traits<Packet1Xi>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_i32m1(&buffer[i * unpacket_traits<Packet1Xi>::size], unpacket_traits<Packet1Xi>::size);
+  }
+}
+
+/********************************* float32 ************************************/
+
+typedef eigen_packet_wrapper<vfloat32m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 6> Packet1Xf;
+typedef eigen_packet_wrapper<vfloat32m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 7>
+    Packet2Xf;
+typedef eigen_packet_wrapper<vfloat32m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 8>
+    Packet4Xf;
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xf PacketXf;
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+  typedef Packet1Xf type;
+  typedef Packet1Xf half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasSin = EIGEN_FAST_MATH,
+    HasCos = EIGEN_FAST_MATH,
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1,
+    HasTanh = EIGEN_FAST_MATH,
+    HasErf = EIGEN_FAST_MATH
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 2
+typedef Packet2Xf PacketXf;
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+  typedef Packet2Xf type;
+  typedef Packet1Xf half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasSin = EIGEN_FAST_MATH,
+    HasCos = EIGEN_FAST_MATH,
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1,
+    HasTanh = EIGEN_FAST_MATH,
+    HasErf = EIGEN_FAST_MATH
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 4
+typedef Packet4Xf PacketXf;
+
+template <>
+struct packet_traits<float> : default_packet_traits {
+  typedef Packet4Xf type;
+  typedef Packet2Xf half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 4>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasSin = EIGEN_FAST_MATH,
+    HasCos = EIGEN_FAST_MATH,
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1,
+    HasTanh = EIGEN_FAST_MATH,
+    HasErf = EIGEN_FAST_MATH
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xf> {
+  typedef float type;
+  typedef Packet1Xf half;  // Half not yet implemented
+  typedef Packet1Xi integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask32 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xf> {
+  typedef float type;
+  typedef Packet1Xf half;
+  typedef Packet2Xi integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask16 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet4Xf> {
+  typedef float type;
+  typedef Packet2Xf half;
+  typedef Packet4Xi integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask8 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<float, EIGEN_RISCV64_RVV_VL, 4>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 4>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+/********************************* Packet1Xf ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf ptrue<Packet1Xf>(const Packet1Xf& /*a*/) {
+  return __riscv_vreinterpret_f32m1(__riscv_vmv_v_x_u32m1(0xffffffffu, unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pzero<Packet1Xf>(const Packet1Xf& /*a*/) {
+  return __riscv_vfmv_v_f_f32m1(0.0f, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pabs(const Packet1Xf& a) {
+  return __riscv_vfabs_v_f32m1(a, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pset1<Packet1Xf>(const float& from) {
+  return __riscv_vfmv_v_f_f32m1(from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pset1frombits<Packet1Xf>(numext::uint32_t from) {
+  return __riscv_vreinterpret_f32m1(__riscv_vmv_v_x_u32m1(from, unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf plset<Packet1Xf>(const float& a) {
+  Packet1Xf idx = __riscv_vfcvt_f_x_v_f32m1(
+      __riscv_vreinterpret_v_u32m1_i32m1(__riscv_vid_v_u32m1(unpacket_traits<Packet1Xi>::size)),
+      unpacket_traits<Packet1Xf>::size);
+  return __riscv_vfadd_vf_f32m1(idx, a, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf padd<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfadd_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf psub<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfsub_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pnegate(const Packet1Xf& a) {
+  return __riscv_vfneg_v_f32m1(a, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pconj(const Packet1Xf& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmul<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfmul_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pdiv<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfdiv_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmadd(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c) {
+  return __riscv_vfmadd_vv_f32m1(a, b, c, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmsub(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c) {
+  return __riscv_vfmsub_vv_f32m1(a, b, c, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pnmadd(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c) {
+  return __riscv_vfnmsub_vv_f32m1(a, b, c, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pnmsub(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c) {
+  return __riscv_vfnmadd_vv_f32m1(a, b, c, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmin<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  Packet1Xf nans = __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet1Xf>::size);
+  PacketMask32 mask = __riscv_vmfeq_vv_f32m1_b32(a, a, unpacket_traits<Packet1Xf>::size);
+  PacketMask32 mask2 = __riscv_vmfeq_vv_f32m1_b32(b, b, unpacket_traits<Packet1Xf>::size);
+  mask = __riscv_vmand_mm_b32(mask, mask2, unpacket_traits<Packet1Xf>::size);
+
+  return __riscv_vfmin_vv_f32m1_tumu(mask, nans, a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmin<PropagateNaN, Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return pmin<Packet1Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmin<PropagateNumbers, Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfmin_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmax<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  Packet1Xf nans = __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet1Xf>::size);
+  PacketMask32 mask = __riscv_vmfeq_vv_f32m1_b32(a, a, unpacket_traits<Packet1Xf>::size);
+  PacketMask32 mask2 = __riscv_vmfeq_vv_f32m1_b32(b, b, unpacket_traits<Packet1Xf>::size);
+  mask = __riscv_vmand_mm_b32(mask, mask2, unpacket_traits<Packet1Xf>::size);
+
+  return __riscv_vfmax_vv_f32m1_tumu(mask, nans, a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmax<PropagateNaN, Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return pmax<Packet1Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pmax<PropagateNumbers, Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vfmax_vv_f32m1(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pcmp_le<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  PacketMask32 mask = __riscv_vmfle_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vmerge_vvm_f32m1(pzero<Packet1Xf>(a), ptrue<Packet1Xf>(a), mask, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pcmp_lt<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  PacketMask32 mask = __riscv_vmflt_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vmerge_vvm_f32m1(pzero<Packet1Xf>(a), ptrue<Packet1Xf>(a), mask, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pcmp_eq<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  PacketMask32 mask = __riscv_vmfeq_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vmerge_vvm_f32m1(pzero<Packet1Xf>(a), ptrue<Packet1Xf>(a), mask, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pcmp_lt_or_nan<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  PacketMask32 mask = __riscv_vmfge_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vfmerge_vfm_f32m1(ptrue<Packet1Xf>(a), 0.0f, mask, unpacket_traits<Packet1Xf>::size);
+}
+
+// Logical Operations are not supported for float, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pand<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vreinterpret_v_u32m1_f32m1(__riscv_vand_vv_u32m1(
+      __riscv_vreinterpret_v_f32m1_u32m1(a), __riscv_vreinterpret_v_f32m1_u32m1(b), unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf por<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vreinterpret_v_u32m1_f32m1(__riscv_vor_vv_u32m1(
+      __riscv_vreinterpret_v_f32m1_u32m1(a), __riscv_vreinterpret_v_f32m1_u32m1(b), unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pxor<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vreinterpret_v_u32m1_f32m1(__riscv_vxor_vv_u32m1(
+      __riscv_vreinterpret_v_f32m1_u32m1(a), __riscv_vreinterpret_v_f32m1_u32m1(b), unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pandnot<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vreinterpret_v_u32m1_f32m1(__riscv_vand_vv_u32m1(
+      __riscv_vreinterpret_v_f32m1_u32m1(a),
+      __riscv_vnot_v_u32m1(__riscv_vreinterpret_v_f32m1_u32m1(b), unpacket_traits<Packet1Xf>::size),
+      unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pload<Packet1Xf>(const float* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_f32m1(from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf ploadu<Packet1Xf>(const float* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_f32m1(from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf ploaddup<Packet1Xf>(const float* from) {
+  Packet1Xu idx = __riscv_vid_v_u32m1(unpacket_traits<Packet1Xf>::size);
+  idx = __riscv_vsll_vx_u32m1(__riscv_vand_vx_u32m1(idx, 0xfffffffeu, unpacket_traits<Packet1Xf>::size), 1,
+                              unpacket_traits<Packet1Xf>::size);
+  return __riscv_vloxei32_v_f32m1(from, idx, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf ploadquad<Packet1Xf>(const float* from) {
+  Packet1Xu idx = __riscv_vid_v_u32m1(unpacket_traits<Packet1Xf>::size);
+  idx = __riscv_vand_vx_u32m1(idx, 0xfffffffcu, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vloxei32_v_f32m1(from, idx, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet1Xf& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_f32m1(to, from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet1Xf& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_f32m1(to, from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1Xf pgather<float, Packet1Xf>(const float* from, Index stride) {
+  return __riscv_vlse32_v_f32m1(from, stride * sizeof(float), unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, Packet1Xf>(float* to, const Packet1Xf& from, Index stride) {
+  __riscv_vsse32(to, stride * sizeof(float), from, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<Packet1Xf>(const Packet1Xf& a) {
+  return __riscv_vfmv_f_s_f32m1_f32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf psqrt(const Packet1Xf& a) {
+  return __riscv_vfsqrt_v_f32m1(a, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf print<Packet1Xf>(const Packet1Xf& a) {
+  const Packet1Xf limit = pset1<Packet1Xf>(static_cast<float>(1 << 23));
+  const Packet1Xf abs_a = pabs(a);
+
+  PacketMask32 mask = __riscv_vmfne_vv_f32m1_b32(a, a, unpacket_traits<Packet1Xf>::size);
+  const Packet1Xf x = __riscv_vfadd_vv_f32m1_tumu(mask, a, a, a, unpacket_traits<Packet1Xf>::size);
+  const Packet1Xf new_x = __riscv_vfcvt_f_x_v_f32m1(__riscv_vfcvt_x_f_v_i32m1(a, unpacket_traits<Packet1Xf>::size),
+                                                   unpacket_traits<Packet1Xf>::size);
+
+  mask = __riscv_vmflt_vv_f32m1_b32(abs_a, limit, unpacket_traits<Packet1Xf>::size);
+  Packet1Xf signed_x = __riscv_vfsgnj_vv_f32m1(new_x, x, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vmerge_vvm_f32m1(x, signed_x, mask, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pfloor<Packet1Xf>(const Packet1Xf& a) {
+  Packet1Xf tmp = print<Packet1Xf>(a);
+  // If greater, subtract one.
+  PacketMask32 mask = __riscv_vmflt_vv_f32m1_b32(a, tmp, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vfsub_vf_f32m1_tumu(mask, tmp, tmp, 1.0f, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf preverse(const Packet1Xf& a) {
+  Packet1Xu idx = __riscv_vrsub_vx_u32m1(__riscv_vid_v_u32m1(unpacket_traits<Packet1Xf>::size),
+                                        unpacket_traits<Packet1Xf>::size - 1, unpacket_traits<Packet1Xf>::size);
+  return __riscv_vrgather_vv_f32m1(a, idx, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pfrexp<Packet1Xf>(const Packet1Xf& a, Packet1Xf& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<Packet1Xf>(const Packet1Xf& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f32m1_f32m1(
+      a, __riscv_vfmv_v_f_f32m1(0.0, unpacket_traits<Packet1Xf>::size), unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul<Packet1Xf>(const Packet1Xf& a) {
+  // Multiply the vector by its reverse
+  Packet1Xf prod = __riscv_vfmul_vv_f32m1(preverse(a), a, unpacket_traits<Packet1Xf>::size);
+  Packet1Xf half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_f32m1(prod, 8, unpacket_traits<Packet1Xf>::size);
+    prod = __riscv_vfmul_vv_f32m1(prod, half_prod, unpacket_traits<Packet1Xf>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_f32m1(prod, 4, unpacket_traits<Packet1Xf>::size);
+    prod = __riscv_vfmul_vv_f32m1(prod, half_prod, unpacket_traits<Packet1Xf>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_f32m1(prod, 2, unpacket_traits<Packet1Xf>::size);
+    prod = __riscv_vfmul_vv_f32m1(prod, half_prod, unpacket_traits<Packet1Xf>::size);
+  }
+  // Last reduction
+  half_prod = __riscv_vslidedown_vx_f32m1(prod, 1, unpacket_traits<Packet1Xf>::size);
+  prod = __riscv_vfmul_vv_f32m1(prod, half_prod, unpacket_traits<Packet1Xf>::size);
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<Packet1Xf>(const Packet1Xf& a) {
+  return (
+      std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f32m1_f32m1(
+                    a,
+                    __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet1Xf>::size),
+                    unpacket_traits<Packet1Xf>::size)),
+                (std::numeric_limits<float>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<Packet1Xf>(const Packet1Xf& a) {
+  return (
+      std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f32m1_f32m1(
+                    a,
+                    __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet1Xf>::size),
+                    unpacket_traits<Packet1Xf>::size)),
+                -(std::numeric_limits<float>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1Xf, N>& kernel) {
+  float buffer[unpacket_traits<Packet1Xf>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(float), kernel.packet[i], unpacket_traits<Packet1Xf>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_f32m1(&buffer[i * unpacket_traits<Packet1Xf>::size], unpacket_traits<Packet1Xf>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pldexp<Packet1Xf>(const Packet1Xf& a, const Packet1Xf& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketMask32 por(const PacketMask32& a, const PacketMask32& b) {
+  return __riscv_vmor_mm_b32(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketMask32 pand(const PacketMask32& a, const PacketMask32& b) {
+  return __riscv_vmand_mm_b32(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+EIGEN_STRONG_INLINE PacketMask32 pcmp_eq_mask(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vmfeq_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+EIGEN_STRONG_INLINE PacketMask32 pcmp_lt_mask(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vmflt_vv_f32m1_b32(a, b, unpacket_traits<Packet1Xf>::size);
+}
+
+EIGEN_STRONG_INLINE Packet1Xf pselect(const PacketMask32& mask, const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vmerge_vvm_f32m1(b, a, mask, unpacket_traits<Packet1Xf>::size);
+}
+
+/********************************* int64 **************************************/
+
+typedef eigen_packet_wrapper<vint64m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 9> Packet1Xl;
+typedef eigen_packet_wrapper<vuint64m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 10> Packet1Xul;
+
+typedef eigen_packet_wrapper<vint64m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 11>
+    Packet2Xl;
+typedef eigen_packet_wrapper<vuint64m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 12>
+    Packet2Xul;
+
+typedef eigen_packet_wrapper<vint64m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 13>
+    Packet4Xl;
+typedef eigen_packet_wrapper<vuint64m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 14>
+    Packet4Xul;
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xl PacketXl;
+typedef Packet1Xul PacketXul;
+
+template <>
+struct packet_traits<numext::int64_t> : default_packet_traits {
+  typedef Packet1Xl type;
+  typedef Packet1Xl half;  // Half not implemented yet
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 2
+typedef Packet2Xl PacketXl;
+typedef Packet2Xul PacketXul;
+
+template <>
+struct packet_traits<numext::int64_t> : default_packet_traits {
+  typedef Packet2Xl type;
+  typedef Packet1Xl half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 4
+typedef Packet4Xl PacketXl;
+typedef Packet4Xul PacketXul;
+
+template <>
+struct packet_traits<numext::int64_t> : default_packet_traits {
+  typedef Packet4Xl type;
+  typedef Packet2Xl half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xl> {
+  typedef numext::int64_t type;
+  typedef Packet1Xl half;  // Half not yet implemented
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xl> {
+  typedef numext::int64_t type;
+  typedef Packet1Xl half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet4Xl> {
+  typedef numext::int64_t type;
+  typedef Packet2Xl half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int64_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 4>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<numext::int64_t>(const numext::int64_t* addr) {
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
+  __builtin_prefetch(addr);
+#endif
+}
+
+/********************************* Packet1Xl ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pset1<Packet1Xl>(const numext::int64_t& from) {
+  return __riscv_vmv_v_x_i64m1(from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl plset<Packet1Xl>(const numext::int64_t& a) {
+  Packet1Xl idx = __riscv_vreinterpret_v_u64m1_i64m1(__riscv_vid_v_u64m1(unpacket_traits<Packet1Xl>::size));
+  return __riscv_vadd_vx_i64m1(idx, a, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pzero<Packet1Xl>(const Packet1Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m1(0, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl padd<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vadd_vv_i64m1(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl psub<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pnegate(const Packet1Xl& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pconj(const Packet1Xl& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pmul<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pdiv<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pmadd(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pmsub(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pnmadd(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c) {
+  return __riscv_vnmsub_vv_i64m1(a, b, c, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pnmsub(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c) {
+  return __riscv_vnmsub_vv_i64m1(a, b, pnegate(c), unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pmin<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pmax<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pcmp_le<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  PacketMask64 mask = __riscv_vmsle_vv_i64m1_b64(a, b, unpacket_traits<Packet1Xl>::size);
+  return __riscv_vmerge_vxm_i64m1(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pcmp_lt<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  PacketMask64 mask = __riscv_vmslt_vv_i64m1_b64(a, b, unpacket_traits<Packet1Xl>::size);
+  return __riscv_vmerge_vxm_i64m1(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pcmp_eq<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  PacketMask64 mask = __riscv_vmseq_vv_i64m1_b64(a, b, unpacket_traits<Packet1Xl>::size);
+  return __riscv_vmerge_vxm_i64m1(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl ptrue<Packet1Xl>(const Packet1Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m1(0xffffffffffffffffu, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pand<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vand_vv_i64m1(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl por<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vor_vv_i64m1(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pxor<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vxor_vv_i64m1(a, b, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pandnot<Packet1Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vand_vv_i64m1(a, __riscv_vnot_v_i64m1(b, unpacket_traits<Packet1Xl>::size),
+                               unpacket_traits<Packet1Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xl parithmetic_shift_right(Packet1Xl a) {
+  return __riscv_vsra_vx_i64m1(a, N, unpacket_traits<Packet1Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xl plogical_shift_right(Packet1Xl a) {
+  return __riscv_vreinterpret_i64m1(
+      __riscv_vsrl_vx_u64m1(__riscv_vreinterpret_u64m1(a), N, unpacket_traits<Packet1Xl>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xl plogical_shift_left(Packet1Xl a) {
+  return __riscv_vsll_vx_i64m1(a, N, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pload<Packet1Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_i64m1(from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl ploadu<Packet1Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_i64m1(from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl ploaddup<Packet1Xl>(const numext::int64_t* from) {
+  Packet1Xul idx = __riscv_vid_v_u64m1(unpacket_traits<Packet1Xl>::size);
+  idx = __riscv_vsll_vx_u64m1(__riscv_vand_vx_u64m1(idx, 0xfffffffffffffffeu, unpacket_traits<Packet1Xl>::size), 2,
+                              unpacket_traits<Packet1Xl>::size);
+  // idx = 0 0 sizeof(int64_t) sizeof(int64_t) 2*sizeof(int64_t) 2*sizeof(int64_t) ...
+  return __riscv_vloxei64_v_i64m1(from, idx, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl ploadquad<Packet1Xl>(const numext::int64_t* from) {
+  Packet1Xul idx = __riscv_vid_v_u64m1(unpacket_traits<Packet1Xl>::size);
+  idx = __riscv_vsll_vx_u64m1(__riscv_vand_vx_u64m1(idx, 0xfffffffffffffffcu, unpacket_traits<Packet1Xl>::size), 1,
+                              unpacket_traits<Packet1Xl>::size);
+  ;
+  return __riscv_vloxei64_v_i64m1(from, idx, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int64_t>(numext::int64_t* to, const Packet1Xl& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_i64m1(to, from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int64_t>(numext::int64_t* to, const Packet1Xl& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_i64m1(to, from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1Xl pgather<numext::int64_t, Packet1Xl>(const numext::int64_t* from, Index stride) {
+  return __riscv_vlse64_v_i64m1(from, stride * sizeof(numext::int64_t), unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int64_t, Packet1Xl>(numext::int64_t* to, const Packet1Xl& from,
+                                                                  Index stride) {
+  __riscv_vsse64(to, stride * sizeof(numext::int64_t), from, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t pfirst<Packet1Xl>(const Packet1Xl& a) {
+  return __riscv_vmv_x_s_i64m1_i64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl preverse(const Packet1Xl& a) {
+  Packet1Xul idx = __riscv_vrsub_vx_u64m1(__riscv_vid_v_u64m1(unpacket_traits<Packet1Xl>::size),
+                                         unpacket_traits<Packet1Xl>::size - 1, unpacket_traits<Packet1Xl>::size);
+  return __riscv_vrgather_vv_i64m1(a, idx, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pabs(const Packet1Xl& a) {
+  Packet1Xl mask = __riscv_vsra_vx_i64m1(a, 63, unpacket_traits<Packet1Xl>::size);
+  return __riscv_vsub_vv_i64m1(__riscv_vxor_vv_i64m1(a, mask, unpacket_traits<Packet1Xl>::size), mask,
+                               unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux<Packet1Xl>(const Packet1Xl& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i64m1_i64m1(a, __riscv_vmv_v_x_i64m1(0, unpacket_traits<Packet1Xl>::size),
+                                                      unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_mul<Packet1Xl>(const Packet1Xl& a) {
+  // Multiply the vector by its reverse
+  Packet1Xl prod = __riscv_vmul_vv_i64m1(preverse(a), a, unpacket_traits<Packet1Xl>::size);
+  Packet1Xl half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_i64m1(prod, 4, unpacket_traits<Packet1Xl>::size);
+    prod = __riscv_vmul_vv_i64m1(prod, half_prod, unpacket_traits<Packet1Xl>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_i64m1(prod, 2, unpacket_traits<Packet1Xl>::size);
+    prod = __riscv_vmul_vv_i64m1(prod, half_prod, unpacket_traits<Packet1Xl>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_i64m1(prod, 1, unpacket_traits<Packet1Xl>::size);
+    prod = __riscv_vmul_vv_i64m1(prod, half_prod, unpacket_traits<Packet1Xl>::size);
+  }
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_min<Packet1Xl>(const Packet1Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i64m1_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::max)(), unpacket_traits<Packet1Xl>::size),
+      unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_max<Packet1Xl>(const Packet1Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i64m1_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::min)(), unpacket_traits<Packet1Xl>::size),
+      unpacket_traits<Packet1Xl>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1Xl, N>& kernel) {
+  numext::int64_t buffer[unpacket_traits<Packet1Xl>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(numext::int64_t), kernel.packet[i], unpacket_traits<Packet1Xl>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_i64m1(&buffer[i * unpacket_traits<Packet1Xl>::size], unpacket_traits<Packet1Xl>::size);
+  }
+}
+
+/********************************* double ************************************/
+
+typedef eigen_packet_wrapper<vfloat64m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 15> Packet1Xd;
+typedef eigen_packet_wrapper<vfloat64m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 16>
+    Packet2Xd;
+typedef eigen_packet_wrapper<vfloat64m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 17>
+    Packet4Xd;
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xd PacketXd;
+
+template <>
+struct packet_traits<double> : default_packet_traits {
+  typedef Packet1Xd type;
+  typedef Packet1Xd half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 2
+typedef Packet2Xd PacketXd;
+
+template <>
+struct packet_traits<double> : default_packet_traits {
+  typedef Packet2Xd type;
+  typedef Packet1Xd half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 4
+typedef Packet4Xd PacketXd;
+
+template <>
+struct packet_traits<double> : default_packet_traits {
+  typedef Packet4Xd type;
+  typedef Packet2Xd half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 4>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasLog = 1,
+    HasExp = 1,
+    HasSqrt = 1
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xd> {
+  typedef double type;
+  typedef Packet1Xd half;  // Half not yet implemented
+  typedef Packet1Xl integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask64 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xd> {
+  typedef double type;
+  typedef Packet1Xd half;
+  typedef Packet2Xl integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask32 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet4Xd> {
+  typedef double type;
+  typedef Packet2Xd half;
+  typedef Packet4Xl integer_packet;
+  typedef numext::uint8_t mask_t;
+  typedef PacketMask16 packet_mask;
+
+  enum {
+    size = rvv_packet_size_selector<double, EIGEN_RISCV64_RVV_VL, 4>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 4>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+/********************************* Packet1Xd ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd ptrue<Packet1Xd>(const Packet1Xd& /*a*/) {
+  return __riscv_vreinterpret_f64m1(__riscv_vmv_v_x_u64m1(0xffffffffffffffffu, unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pzero<Packet1Xd>(const Packet1Xd& /*a*/) {
+  return __riscv_vfmv_v_f_f64m1(0.0, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pabs(const Packet1Xd& a) {
+  return __riscv_vfabs_v_f64m1(a, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pset1<Packet1Xd>(const double& from) {
+  return __riscv_vfmv_v_f_f64m1(from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pset1frombits<Packet1Xd>(numext::uint64_t from) {
+  return __riscv_vreinterpret_f64m1(__riscv_vmv_v_x_u64m1(from, unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd plset<Packet1Xd>(const double& a) {
+  Packet1Xd idx = __riscv_vfcvt_f_x_v_f64m1(
+      __riscv_vreinterpret_v_u64m1_i64m1(__riscv_vid_v_u64m1(unpacket_traits<Packet1Xl>::size)),
+      unpacket_traits<Packet1Xd>::size);
+  return __riscv_vfadd_vf_f64m1(idx, a, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd padd<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfadd_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd psub<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfsub_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pnegate(const Packet1Xd& a) {
+  return __riscv_vfneg_v_f64m1(a, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pconj(const Packet1Xd& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmul<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfmul_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pdiv<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfdiv_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmadd(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c) {
+  return __riscv_vfmadd_vv_f64m1(a, b, c, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmsub(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c) {
+  return __riscv_vfmsub_vv_f64m1(a, b, c, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pnmadd(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c) {
+  return __riscv_vfnmsub_vv_f64m1(a, b, c, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pnmsub(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c) {
+  return __riscv_vfnmadd_vv_f64m1(a, b, c, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmin<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  Packet1Xd nans = __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet1Xd>::size);
+  PacketMask64 mask = __riscv_vmfeq_vv_f64m1_b64(a, a, unpacket_traits<Packet1Xd>::size);
+  PacketMask64 mask2 = __riscv_vmfeq_vv_f64m1_b64(b, b, unpacket_traits<Packet1Xd>::size);
+  mask = __riscv_vmand_mm_b64(mask, mask2, unpacket_traits<Packet1Xd>::size);
+
+  return __riscv_vfmin_vv_f64m1_tumu(mask, nans, a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmin<PropagateNaN, Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return pmin<Packet1Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmin<PropagateNumbers, Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfmin_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmax<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  Packet1Xd nans = __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet1Xd>::size);
+  PacketMask64 mask = __riscv_vmfeq_vv_f64m1_b64(a, a, unpacket_traits<Packet1Xd>::size);
+  PacketMask64 mask2 = __riscv_vmfeq_vv_f64m1_b64(b, b, unpacket_traits<Packet1Xd>::size);
+  mask = __riscv_vmand_mm_b64(mask, mask2, unpacket_traits<Packet1Xd>::size);
+
+  return __riscv_vfmax_vv_f64m1_tumu(mask, nans, a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmax<PropagateNaN, Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return pmax<Packet1Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pmax<PropagateNumbers, Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vfmax_vv_f64m1(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pcmp_le<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  PacketMask64 mask = __riscv_vmfle_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vmerge_vvm_f64m1(pzero<Packet1Xd>(a), ptrue<Packet1Xd>(a), mask, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pcmp_lt<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  PacketMask64 mask = __riscv_vmflt_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vmerge_vvm_f64m1(pzero<Packet1Xd>(a), ptrue<Packet1Xd>(a), mask, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pcmp_eq<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  PacketMask64 mask = __riscv_vmfeq_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vmerge_vvm_f64m1(pzero<Packet1Xd>(a), ptrue<Packet1Xd>(a), mask, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pcmp_lt_or_nan<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  PacketMask64 mask = __riscv_vmfge_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vfmerge_vfm_f64m1(ptrue<Packet1Xd>(a), 0.0, mask, unpacket_traits<Packet1Xd>::size);
+}
+
+// Logical Operations are not supported for double, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pand<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vreinterpret_v_u64m1_f64m1(__riscv_vand_vv_u64m1(
+      __riscv_vreinterpret_v_f64m1_u64m1(a), __riscv_vreinterpret_v_f64m1_u64m1(b), unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd por<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vreinterpret_v_u64m1_f64m1(__riscv_vor_vv_u64m1(
+      __riscv_vreinterpret_v_f64m1_u64m1(a), __riscv_vreinterpret_v_f64m1_u64m1(b), unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pxor<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vreinterpret_v_u64m1_f64m1(__riscv_vxor_vv_u64m1(
+      __riscv_vreinterpret_v_f64m1_u64m1(a), __riscv_vreinterpret_v_f64m1_u64m1(b), unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pandnot<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vreinterpret_v_u64m1_f64m1(__riscv_vand_vv_u64m1(
+      __riscv_vreinterpret_v_f64m1_u64m1(a),
+      __riscv_vnot_v_u64m1(__riscv_vreinterpret_v_f64m1_u64m1(b), unpacket_traits<Packet1Xd>::size),
+      unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pload<Packet1Xd>(const double* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_f64m1(from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd ploadu<Packet1Xd>(const double* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_f64m1(from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd ploaddup<Packet1Xd>(const double* from) {
+  Packet1Xul idx = __riscv_vid_v_u64m1(unpacket_traits<Packet1Xd>::size);
+  idx = __riscv_vsll_vx_u64m1(__riscv_vand_vx_u64m1(idx, 0xfffffffffffffffeu, unpacket_traits<Packet1Xd>::size), 2,
+                              unpacket_traits<Packet1Xd>::size);
+  return __riscv_vloxei64_v_f64m1(from, idx, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd ploadquad<Packet1Xd>(const double* from) {
+  Packet1Xul idx = __riscv_vid_v_u64m1(unpacket_traits<Packet1Xd>::size);
+  idx = __riscv_vsll_vx_u64m1(__riscv_vand_vx_u64m1(idx, 0xfffffffffffffffcu, unpacket_traits<Packet1Xd>::size), 1,
+                              unpacket_traits<Packet1Xd>::size);
+  ;
+  return __riscv_vloxei64_v_f64m1(from, idx, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet1Xd& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_f64m1(to, from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet1Xd& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_f64m1(to, from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1Xd pgather<double, Packet1Xd>(const double* from, Index stride) {
+  return __riscv_vlse64_v_f64m1(from, stride * sizeof(double), unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<double, Packet1Xd>(double* to, const Packet1Xd& from, Index stride) {
+  __riscv_vsse64(to, stride * sizeof(double), from, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE double pfirst<Packet1Xd>(const Packet1Xd& a) {
+  return __riscv_vfmv_f_s_f64m1_f64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd psqrt(const Packet1Xd& a) {
+  return __riscv_vfsqrt_v_f64m1(a, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd print<Packet1Xd>(const Packet1Xd& a) {
+  const Packet1Xd limit = pset1<Packet1Xd>(static_cast<double>(1ull << 52));
+  const Packet1Xd abs_a = pabs(a);
+
+  PacketMask64 mask = __riscv_vmfne_vv_f64m1_b64(a, a, unpacket_traits<Packet1Xd>::size);
+  const Packet1Xd x = __riscv_vfadd_vv_f64m1_tumu(mask, a, a, a, unpacket_traits<Packet1Xd>::size);
+  const Packet1Xd new_x = __riscv_vfcvt_f_x_v_f64m1(__riscv_vfcvt_x_f_v_i64m1(a, unpacket_traits<Packet1Xd>::size),
+                                                   unpacket_traits<Packet1Xd>::size);
+
+  mask = __riscv_vmflt_vv_f64m1_b64(abs_a, limit, unpacket_traits<Packet1Xd>::size);
+  Packet1Xd signed_x = __riscv_vfsgnj_vv_f64m1(new_x, x, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vmerge_vvm_f64m1(x, signed_x, mask, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pfloor<Packet1Xd>(const Packet1Xd& a) {
+  Packet1Xd tmp = print<Packet1Xd>(a);
+  // If greater, subtract one.
+  PacketMask64 mask = __riscv_vmflt_vv_f64m1_b64(a, tmp, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vfsub_vf_f64m1_tumu(mask, tmp, tmp, 1.0, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd preverse(const Packet1Xd& a) {
+  Packet1Xul idx = __riscv_vrsub_vx_u64m1(__riscv_vid_v_u64m1(unpacket_traits<Packet1Xd>::size),
+                                         unpacket_traits<Packet1Xd>::size - 1, unpacket_traits<Packet1Xd>::size);
+  return __riscv_vrgather_vv_f64m1(a, idx, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pfrexp<Packet1Xd>(const Packet1Xd& a, Packet1Xd& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux<Packet1Xd>(const Packet1Xd& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f64m1_f64m1(
+      a, __riscv_vfmv_v_f_f64m1(0.0, unpacket_traits<Packet1Xd>::size), unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul<Packet1Xd>(const Packet1Xd& a) {
+  // Multiply the vector by its reverse
+  Packet1Xd prod = __riscv_vfmul_vv_f64m1(preverse(a), a, unpacket_traits<Packet1Xd>::size);
+  Packet1Xd half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_f64m1(prod, 4, unpacket_traits<Packet1Xd>::size);
+    prod = __riscv_vfmul_vv_f64m1(prod, half_prod, unpacket_traits<Packet1Xd>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_f64m1(prod, 2, unpacket_traits<Packet1Xd>::size);
+    prod = __riscv_vfmul_vv_f64m1(prod, half_prod, unpacket_traits<Packet1Xd>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_f64m1(prod, 1, unpacket_traits<Packet1Xd>::size);
+    prod = __riscv_vfmul_vv_f64m1(prod, half_prod, unpacket_traits<Packet1Xd>::size);
+  }
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<Packet1Xd>(const Packet1Xd& a) {
+  return (
+      std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f64m1_f64m1(
+                    a,
+                    __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet1Xd>::size),
+                    unpacket_traits<Packet1Xd>::size)),
+                (std::numeric_limits<double>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<Packet1Xd>(const Packet1Xd& a) {
+  return (
+      std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f64m1_f64m1(
+                    a,
+                    __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet1Xd>::size),
+                    unpacket_traits<Packet1Xd>::size)),
+                -(std::numeric_limits<double>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1Xd, N>& kernel) {
+  double buffer[unpacket_traits<Packet1Xd>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(double), kernel.packet[i], unpacket_traits<Packet1Xd>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_f64m1(&buffer[i * unpacket_traits<Packet1Xd>::size], unpacket_traits<Packet1Xd>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pldexp<Packet1Xd>(const Packet1Xd& a, const Packet1Xd& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketMask64 por(const PacketMask64& a, const PacketMask64& b) {
+  return __riscv_vmor_mm_b64(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketMask64 pandnot(const PacketMask64& a, const PacketMask64& b) {
+  return __riscv_vmor_mm_b64(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketMask64 pand(const PacketMask64& a, const PacketMask64& b) {
+  return __riscv_vmand_mm_b64(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+EIGEN_STRONG_INLINE PacketMask64 pcmp_eq_mask(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vmfeq_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+EIGEN_STRONG_INLINE PacketMask64 pcmp_lt_mask(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vmflt_vv_f64m1_b64(a, b, unpacket_traits<Packet1Xd>::size);
+}
+
+EIGEN_STRONG_INLINE Packet1Xd pselect(const PacketMask64& mask, const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vmerge_vvm_f64m1(b, a, mask, unpacket_traits<Packet1Xd>::size);
+}
+
+/********************************* short **************************************/
+
+typedef eigen_packet_wrapper<vint16m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 18> Packet1Xs;
+typedef eigen_packet_wrapper<vuint16m1_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL))), 19> Packet1Xsu;
+
+typedef eigen_packet_wrapper<vint16m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 20>
+    Packet2Xs;
+typedef eigen_packet_wrapper<vuint16m2_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2))), 21>
+    Packet2Xsu;
+
+typedef eigen_packet_wrapper<vint16m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 22>
+    Packet4Xs;
+typedef eigen_packet_wrapper<vuint16m4_t __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 4))), 23>
+    Packet4Xsu;
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xs PacketXs;
+typedef Packet1Xsu PacketXsu;
+
+template <>
+struct packet_traits<numext::int16_t> : default_packet_traits {
+  typedef Packet1Xs type;
+  typedef Packet1Xs half;  // Half not implemented yet
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 2
+typedef Packet2Xs PacketXs;
+typedef Packet2Xsu PacketXsu;
+
+template <>
+struct packet_traits<numext::int16_t> : default_packet_traits {
+  typedef Packet2Xs type;
+  typedef Packet1Xs half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+
+#elif EIGEN_RISCV64_DEFAULT_LMUL == 4
+typedef Packet4Xs PacketXs;
+typedef Packet4Xsu PacketXsu;
+
+template <>
+struct packet_traits<numext::int16_t> : default_packet_traits {
+  typedef Packet4Xs type;
+  typedef Packet2Xs half;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xs> {
+  typedef numext::int16_t type;
+  typedef Packet1Xs half;  // Half not yet implemented
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xs> {
+  typedef numext::int16_t type;
+  typedef Packet1Xs half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet4Xs> {
+  typedef numext::int16_t type;
+  typedef Packet2Xs half;
+  typedef numext::uint8_t mask_t;
+  enum {
+    size = rvv_packet_size_selector<numext::int16_t, EIGEN_RISCV64_RVV_VL, 4>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 4>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+EIGEN_STRONG_INLINE void prefetch<numext::int16_t>(const numext::int16_t* addr) {
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
+  __builtin_prefetch(addr);
+#endif
+}
+
+/********************************* Packet1Xs ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pset1<Packet1Xs>(const numext::int16_t& from) {
+  return __riscv_vmv_v_x_i16m1(from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs plset<Packet1Xs>(const numext::int16_t& a) {
+  Packet1Xs idx = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vid_v_u16m1(unpacket_traits<Packet1Xs>::size));
+  return __riscv_vadd_vx_i16m1(idx, a, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pzero<Packet1Xs>(const Packet1Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m1(0, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs padd<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vadd_vv_i16m1(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs psub<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pnegate(const Packet1Xs& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pconj(const Packet1Xs& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pmul<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pdiv<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pmadd(const Packet1Xs& a, const Packet1Xs& b, const Packet1Xs& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pmsub(const Packet1Xs& a, const Packet1Xs& b, const Packet1Xs& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pnmadd(const Packet1Xs& a, const Packet1Xs& b, const Packet1Xs& c) {
+  return __riscv_vnmsub_vv_i16m1(a, b, c, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pnmsub(const Packet1Xs& a, const Packet1Xs& b, const Packet1Xs& c) {
+  return __riscv_vnmsub_vv_i16m1(a, b, pnegate(c), unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pmin<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pmax<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pcmp_le<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  PacketMask16 mask = __riscv_vmsle_vv_i16m1_b16(a, b, unpacket_traits<Packet1Xs>::size);
+  return __riscv_vmerge_vxm_i16m1(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pcmp_lt<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  PacketMask16 mask = __riscv_vmslt_vv_i16m1_b16(a, b, unpacket_traits<Packet1Xs>::size);
+  return __riscv_vmerge_vxm_i16m1(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pcmp_eq<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  PacketMask16 mask = __riscv_vmseq_vv_i16m1_b16(a, b, unpacket_traits<Packet1Xs>::size);
+  return __riscv_vmerge_vxm_i16m1(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs ptrue<Packet1Xs>(const Packet1Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m1(static_cast<unsigned short>(0xffffu), unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pand<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vand_vv_i16m1(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs por<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vor_vv_i16m1(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pxor<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vxor_vv_i16m1(a, b, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pandnot<Packet1Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vand_vv_i16m1(a, __riscv_vnot_v_i16m1(b, unpacket_traits<Packet1Xs>::size),
+                               unpacket_traits<Packet1Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xs parithmetic_shift_right(Packet1Xs a) {
+  return __riscv_vsra_vx_i16m1(a, N, unpacket_traits<Packet1Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xs plogical_shift_right(Packet1Xs a) {
+  return __riscv_vreinterpret_i16m1(
+      __riscv_vsrl_vx_u16m1(__riscv_vreinterpret_u16m1(a), N, unpacket_traits<Packet1Xs>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet1Xs plogical_shift_left(Packet1Xs a) {
+  return __riscv_vsll_vx_i16m1(a, N, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pload<Packet1Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle16_v_i16m1(from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs ploadu<Packet1Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle16_v_i16m1(from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs ploaddup<Packet1Xs>(const numext::int16_t* from) {
+  Packet1Xsu idx = __riscv_vid_v_u16m1(unpacket_traits<Packet1Xs>::size);
+  idx = __riscv_vand_vx_u16m1(idx, 0xfffeu, unpacket_traits<Packet1Xs>::size);
+  // idx = 0 0 sizeof(int16_t) sizeof(int16_t) 2*sizeof(int16_t) 2*sizeof(int16_t) ...
+  return __riscv_vloxei16_v_i16m1(from, idx, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs ploadquad<Packet1Xs>(const numext::int16_t* from) {
+  Packet1Xsu idx = __riscv_vid_v_u16m1(unpacket_traits<Packet1Xs>::size);
+  idx = __riscv_vsrl_vx_u16m1(__riscv_vand_vx_u16m1(idx, 0xfffcu, unpacket_traits<Packet1Xs>::size), 1,
+                              unpacket_traits<Packet1Xs>::size);
+  return __riscv_vloxei16_v_i16m1(from, idx, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int16_t>(numext::int16_t* to, const Packet1Xs& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse16_v_i16m1(to, from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int16_t>(numext::int16_t* to, const Packet1Xs& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse16_v_i16m1(to, from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet1Xs pgather<numext::int16_t, Packet1Xs>(const numext::int16_t* from, Index stride) {
+  return __riscv_vlse16_v_i16m1(from, stride * sizeof(numext::int16_t), unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int16_t, Packet1Xs>(numext::int16_t* to, const Packet1Xs& from,
+                                                                  Index stride) {
+  __riscv_vsse16(to, stride * sizeof(numext::int16_t), from, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t pfirst<Packet1Xs>(const Packet1Xs& a) {
+  return __riscv_vmv_x_s_i16m1_i16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs preverse(const Packet1Xs& a) {
+  Packet1Xsu idx = __riscv_vrsub_vx_u16m1(__riscv_vid_v_u16m1(unpacket_traits<Packet1Xs>::size),
+                                         unpacket_traits<Packet1Xs>::size - 1, unpacket_traits<Packet1Xs>::size);
+  return __riscv_vrgather_vv_i16m1(a, idx, unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xs pabs(const Packet1Xs& a) {
+  Packet1Xs mask = __riscv_vsra_vx_i16m1(a, 15, unpacket_traits<Packet1Xs>::size);
+  return __riscv_vsub_vv_i16m1(__riscv_vxor_vv_i16m1(a, mask, unpacket_traits<Packet1Xs>::size), mask,
+                               unpacket_traits<Packet1Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux<Packet1Xs>(const Packet1Xs& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i16m1_i16m1(a, __riscv_vmv_v_x_i16m1(0, unpacket_traits<Packet1Xs>::size),
+                                                      unpacket_traits<Packet1Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_mul<Packet1Xs>(const Packet1Xs& a) {
+  // Multiply the vector by its reverse
+  Packet1Xs prod = __riscv_vmul_vv_i16m1(preverse(a), a, unpacket_traits<Packet1Xs>::size);
+  Packet1Xs half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_i16m1(prod, 16, unpacket_traits<Packet1Xs>::size);
+    prod = __riscv_vmul_vv_i16m1(prod, half_prod, unpacket_traits<Packet1Xs>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_i16m1(prod, 8, unpacket_traits<Packet1Xs>::size);
+    prod = __riscv_vmul_vv_i16m1(prod, half_prod, unpacket_traits<Packet1Xs>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_i16m1(prod, 4, unpacket_traits<Packet1Xs>::size);
+    prod = __riscv_vmul_vv_i16m1(prod, half_prod, unpacket_traits<Packet1Xs>::size);
+  }
+  // Last reduction
+  half_prod = __riscv_vslidedown_vx_i16m1(prod, 2, unpacket_traits<Packet1Xs>::size);
+  prod = __riscv_vmul_vv_i16m1(prod, half_prod, unpacket_traits<Packet1Xs>::size);
+
+  half_prod = __riscv_vslidedown_vx_i16m1(prod, 1, unpacket_traits<Packet1Xs>::size);
+  prod = __riscv_vmul_vv_i16m1(prod, half_prod, unpacket_traits<Packet1Xs>::size);
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_min<Packet1Xs>(const Packet1Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i16m1_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::max)(), unpacket_traits<Packet1Xs>::size),
+      unpacket_traits<Packet1Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_max<Packet1Xs>(const Packet1Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i16m1_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::min)(), unpacket_traits<Packet1Xs>::size),
+      unpacket_traits<Packet1Xs>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1Xs, N>& kernel) {
+  numext::int16_t buffer[unpacket_traits<Packet1Xs>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse16(&buffer[i], N * sizeof(numext::int16_t), kernel.packet[i], unpacket_traits<Packet1Xs>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle16_v_i16m1(&buffer[i * unpacket_traits<Packet1Xs>::size], unpacket_traits<Packet1Xs>::size);
+  }
+}
+
+}  // namespace internal
+}  // namespace Eigen
+
+#endif  // EIGEN_PACKET_MATH_RVV10_H
diff --git a/Eigen/src/Core/arch/RVV10/PacketMath2.h b/Eigen/src/Core/arch/RVV10/PacketMath2.h
new file mode 100644
index 0000000..1fda511
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/PacketMath2.h
@@ -0,0 +1,1506 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2024 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET2_MATH_RVV10_H
+#define EIGEN_PACKET2_MATH_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+
+/********************************* Packet2Xi ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pset1<Packet2Xi>(const numext::int32_t& from) {
+  return __riscv_vmv_v_x_i32m2(from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi plset<Packet2Xi>(const numext::int32_t& a) {
+  Packet2Xi idx = __riscv_vreinterpret_v_u32m2_i32m2(__riscv_vid_v_u32m2(unpacket_traits<Packet2Xi>::size));
+  return __riscv_vadd_vx_i32m2(idx, a, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pzero<Packet2Xi>(const Packet2Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m2(0, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi padd<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vadd_vv_i32m2(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi psub<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pnegate(const Packet2Xi& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pconj(const Packet2Xi& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pmul<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pdiv<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pmadd(const Packet2Xi& a, const Packet2Xi& b, const Packet2Xi& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pmsub(const Packet2Xi& a, const Packet2Xi& b, const Packet2Xi& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pnmadd(const Packet2Xi& a, const Packet2Xi& b, const Packet2Xi& c) {
+  return __riscv_vnmsub_vv_i32m2(a, b, c, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pnmsub(const Packet2Xi& a, const Packet2Xi& b, const Packet2Xi& c) {
+  return __riscv_vnmsub_vv_i32m2(a, b, pnegate(c), unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pmin<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pmax<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcmp_le<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  PacketMask16 mask = __riscv_vmsle_vv_i32m2_b16(a, b, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vmerge_vxm_i32m2(pzero(a), 0xffffffff, mask, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcmp_lt<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  PacketMask16 mask = __riscv_vmslt_vv_i32m2_b16(a, b, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vmerge_vxm_i32m2(pzero(a), 0xffffffff, mask, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcmp_eq<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  PacketMask16 mask = __riscv_vmseq_vv_i32m2_b16(a, b, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vmerge_vxm_i32m2(pzero(a), 0xffffffff, mask, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi ptrue<Packet2Xi>(const Packet2Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m2(0xffffffffu, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pand<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vand_vv_i32m2(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi por<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vor_vv_i32m2(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pxor<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vxor_vv_i32m2(a, b, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pandnot<Packet2Xi>(const Packet2Xi& a, const Packet2Xi& b) {
+  return __riscv_vand_vv_i32m2(a, __riscv_vnot_v_i32m2(b, unpacket_traits<Packet2Xi>::size),
+                               unpacket_traits<Packet2Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xi parithmetic_shift_right(Packet2Xi a) {
+  return __riscv_vsra_vx_i32m2(a, N, unpacket_traits<Packet2Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xi plogical_shift_right(Packet2Xi a) {
+  return __riscv_vreinterpret_i32m2(
+      __riscv_vsrl_vx_u32m2(__riscv_vreinterpret_u32m2(a), N, unpacket_traits<Packet2Xi>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xi plogical_shift_left(Packet2Xi a) {
+  return __riscv_vsll_vx_i32m2(a, N, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pload<Packet2Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_i32m2(from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi ploadu<Packet2Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_i32m2(from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi ploaddup<Packet2Xi>(const numext::int32_t* from) {
+  Packet2Xu idx = __riscv_vid_v_u32m2(unpacket_traits<Packet2Xi>::size);
+  idx = __riscv_vsll_vx_u32m2(__riscv_vand_vx_u32m2(idx, 0xfffffffeu, unpacket_traits<Packet2Xi>::size), 1,
+                              unpacket_traits<Packet2Xi>::size);
+  // idx = 0 0 sizeof(int32_t) sizeof(int32_t) 2*sizeof(int32_t) 2*sizeof(int32_t) ...
+  return __riscv_vloxei32_v_i32m2(from, idx, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi ploadquad<Packet2Xi>(const numext::int32_t* from) {
+  Packet2Xu idx = __riscv_vid_v_u32m2(unpacket_traits<Packet2Xi>::size);
+  idx = __riscv_vand_vx_u32m2(idx, 0xfffffffcu, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vloxei32_v_i32m2(from, idx, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const Packet2Xi& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_i32m2(to, from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const Packet2Xi& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_i32m2(to, from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xi pgather<numext::int32_t, Packet2Xi>(const numext::int32_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse32_v_i32m2(from, stride * sizeof(numext::int32_t), unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, Packet2Xi>(numext::int32_t* to, const Packet2Xi& from,
+                                                                      Index stride) {
+  __riscv_vsse32(to, stride * sizeof(numext::int32_t), from, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t pfirst<Packet2Xi>(const Packet2Xi& a) {
+  return __riscv_vmv_x_s_i32m2_i32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi preverse(const Packet2Xi& a) {
+  Packet2Xu idx =
+      __riscv_vrsub_vx_u32m2(__riscv_vid_v_u32m2(unpacket_traits<Packet2Xi>::size),
+                             unpacket_traits<Packet2Xi>::size - 1, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vrgather_vv_i32m2(a, idx, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pabs(const Packet2Xi& a) {
+  Packet2Xi mask = __riscv_vsra_vx_i32m2(a, 31, unpacket_traits<Packet2Xi>::size);
+  return __riscv_vsub_vv_i32m2(__riscv_vxor_vv_i32m2(a, mask, unpacket_traits<Packet2Xi>::size), mask,
+                               unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux<Packet2Xi>(const Packet2Xi& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i32m2_i32m1(
+      a, __riscv_vmv_v_x_i32m1(0, unpacket_traits<Packet2Xi>::size / 2), unpacket_traits<Packet2Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_mul<Packet2Xi>(const Packet2Xi& a) {
+  return predux_mul<Packet1Xi>(__riscv_vmul_vv_i32m1(__riscv_vget_v_i32m2_i32m1(a, 0), __riscv_vget_v_i32m2_i32m1(a, 1),
+                                                    unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_min<Packet2Xi>(const Packet2Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i32m2_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::max)(), unpacket_traits<Packet2Xi>::size / 2),
+      unpacket_traits<Packet2Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_max<Packet2Xi>(const Packet2Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i32m2_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::min)(), unpacket_traits<Packet2Xi>::size / 2),
+      unpacket_traits<Packet2Xi>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xi, N>& kernel) {
+  numext::int32_t buffer[unpacket_traits<Packet2Xi>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(numext::int32_t), kernel.packet[i], unpacket_traits<Packet2Xi>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_i32m2(&buffer[i * unpacket_traits<Packet2Xi>::size], unpacket_traits<Packet2Xi>::size);
+  }
+}
+
+template <typename Packet = Packet4Xi>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet4Xi>::value && (unpacket_traits<Packet4Xi>::size % 8) == 0,
+                        Packet2Xi>::type
+predux_half_dowto4(const Packet4Xi& a) {
+  return __riscv_vadd_vv_i32m2(__riscv_vget_v_i32m4_i32m2(a, 0), __riscv_vget_v_i32m4_i32m2(a, 1),
+                               unpacket_traits<Packet2Xi>::size);
+}
+
+template <typename Packet = Packet2Xi>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xi>::value && (unpacket_traits<Packet2Xi>::size % 8) == 0,
+                        Packet1Xi>::type
+predux_half_dowto4(const Packet2Xi& a) {
+  return __riscv_vadd_vv_i32m1(__riscv_vget_v_i32m2_i32m1(a, 0), __riscv_vget_v_i32m2_i32m1(a, 1),
+                               unpacket_traits<Packet1Xi>::size);
+}
+
+/********************************* Packet2Xf ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf ptrue<Packet2Xf>(const Packet2Xf& /*a*/) {
+  return __riscv_vreinterpret_f32m2(__riscv_vmv_v_x_u32m2(0xffffffffu, unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pzero<Packet2Xf>(const Packet2Xf& /*a*/) {
+  return __riscv_vfmv_v_f_f32m2(0.0f, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pabs(const Packet2Xf& a) {
+  return __riscv_vfabs_v_f32m2(a, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pset1<Packet2Xf>(const float& from) {
+  return __riscv_vfmv_v_f_f32m2(from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pset1frombits<Packet2Xf>(numext::uint32_t from) {
+  return __riscv_vreinterpret_f32m2(__riscv_vmv_v_x_u32m2(from, unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf plset<Packet2Xf>(const float& a) {
+  Packet2Xf idx = __riscv_vfcvt_f_x_v_f32m2(
+      __riscv_vreinterpret_v_u32m2_i32m2(__riscv_vid_v_u32m2(unpacket_traits<Packet4Xi>::size)),
+      unpacket_traits<Packet2Xf>::size);
+  return __riscv_vfadd_vf_f32m2(idx, a, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf padd<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfadd_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf psub<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfsub_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pnegate(const Packet2Xf& a) {
+  return __riscv_vfneg_v_f32m2(a, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pconj(const Packet2Xf& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmul<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfmul_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pdiv<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfdiv_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmadd(const Packet2Xf& a, const Packet2Xf& b, const Packet2Xf& c) {
+  return __riscv_vfmadd_vv_f32m2(a, b, c, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmsub(const Packet2Xf& a, const Packet2Xf& b, const Packet2Xf& c) {
+  return __riscv_vfmsub_vv_f32m2(a, b, c, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pnmadd(const Packet2Xf& a, const Packet2Xf& b, const Packet2Xf& c) {
+  return __riscv_vfnmsub_vv_f32m2(a, b, c, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pnmsub(const Packet2Xf& a, const Packet2Xf& b, const Packet2Xf& c) {
+  return __riscv_vfnmadd_vv_f32m2(a, b, c, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmin<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  Packet2Xf nans =
+      __riscv_vfmv_v_f_f32m2((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet2Xf>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f32m2_b16(a, a, unpacket_traits<Packet2Xf>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f32m2_b16(b, b, unpacket_traits<Packet2Xf>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<Packet2Xf>::size);
+
+  return __riscv_vfmin_vv_f32m2_tumu(mask, nans, a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmin<PropagateNaN, Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return pmin<Packet2Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmin<PropagateNumbers, Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfmin_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmax<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  Packet2Xf nans =
+      __riscv_vfmv_v_f_f32m2((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet2Xf>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f32m2_b16(a, a, unpacket_traits<Packet2Xf>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f32m2_b16(b, b, unpacket_traits<Packet2Xf>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<Packet2Xf>::size);
+
+  return __riscv_vfmax_vv_f32m2_tumu(mask, nans, a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmax<PropagateNaN, Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return pmax<Packet2Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pmax<PropagateNumbers, Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vfmax_vv_f32m2(a, b, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcmp_le<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  PacketMask16 mask = __riscv_vmfle_vv_f32m2_b16(a, b, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vmerge_vvm_f32m2(pzero<Packet2Xf>(a), ptrue<Packet2Xf>(a), mask,
+                                  unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcmp_lt<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  PacketMask16 mask = __riscv_vmflt_vv_f32m2_b16(a, b, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vmerge_vvm_f32m2(pzero<Packet2Xf>(a), ptrue<Packet2Xf>(a), mask,
+                                  unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcmp_eq<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  PacketMask16 mask = __riscv_vmfeq_vv_f32m2_b16(a, b, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vmerge_vvm_f32m2(pzero<Packet2Xf>(a), ptrue<Packet2Xf>(a), mask,
+                                  unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcmp_lt_or_nan<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  PacketMask16 mask = __riscv_vmfge_vv_f32m2_b16(a, b, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vfmerge_vfm_f32m2(ptrue<Packet2Xf>(a), 0.0f, mask, unpacket_traits<Packet2Xf>::size);
+}
+
+// Logical Operations are not supported for float, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pand<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vand_vv_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(a),
+                                                                  __riscv_vreinterpret_v_f32m2_u32m2(b),
+                                                                  unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf por<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vor_vv_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(a),
+                                                                 __riscv_vreinterpret_v_f32m2_u32m2(b),
+                                                                 unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pxor<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vxor_vv_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(a),
+                                                                  __riscv_vreinterpret_v_f32m2_u32m2(b),
+                                                                  unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pandnot<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& b) {
+  return __riscv_vreinterpret_v_u32m2_f32m2(__riscv_vand_vv_u32m2(
+      __riscv_vreinterpret_v_f32m2_u32m2(a),
+      __riscv_vnot_v_u32m2(__riscv_vreinterpret_v_f32m2_u32m2(b), unpacket_traits<Packet2Xf>::size),
+      unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pload<Packet2Xf>(const float* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_f32m2(from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf ploadu<Packet2Xf>(const float* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_f32m2(from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf ploaddup<Packet2Xf>(const float* from) {
+  Packet2Xu idx = __riscv_vid_v_u32m2(unpacket_traits<Packet2Xf>::size);
+  idx = __riscv_vsll_vx_u32m2(__riscv_vand_vx_u32m2(idx, 0xfffffffeu, unpacket_traits<Packet2Xf>::size), 1,
+                              unpacket_traits<Packet2Xf>::size);
+  return __riscv_vloxei32_v_f32m2(from, idx, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf ploadquad<Packet2Xf>(const float* from) {
+  Packet2Xu idx = __riscv_vid_v_u32m2(unpacket_traits<Packet2Xf>::size);
+  idx = __riscv_vand_vx_u32m2(idx, 0xfffffffcu, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vloxei32_v_f32m2(from, idx, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet2Xf& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_f32m2(to, from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet2Xf& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_f32m2(to, from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xf pgather<float, Packet2Xf>(const float* from, Index stride) {
+  return __riscv_vlse32_v_f32m2(from, stride * sizeof(float), unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, Packet2Xf>(float* to, const Packet2Xf& from, Index stride) {
+  __riscv_vsse32(to, stride * sizeof(float), from, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<Packet2Xf>(const Packet2Xf& a) {
+  return __riscv_vfmv_f_s_f32m2_f32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf psqrt(const Packet2Xf& a) {
+  return __riscv_vfsqrt_v_f32m2(a, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf print<Packet2Xf>(const Packet2Xf& a) {
+  const Packet2Xf limit = pset1<Packet2Xf>(static_cast<float>(1 << 23));
+  const Packet2Xf abs_a = pabs(a);
+
+  PacketMask16 mask = __riscv_vmfne_vv_f32m2_b16(a, a, unpacket_traits<Packet2Xf>::size);
+  const Packet2Xf x = __riscv_vfadd_vv_f32m2_tumu(mask, a, a, a, unpacket_traits<Packet2Xf>::size);
+  const Packet2Xf new_x = __riscv_vfcvt_f_x_v_f32m2(
+      __riscv_vfcvt_x_f_v_i32m2(a, unpacket_traits<Packet2Xf>::size), unpacket_traits<Packet2Xf>::size);
+
+  mask = __riscv_vmflt_vv_f32m2_b16(abs_a, limit, unpacket_traits<Packet2Xf>::size);
+  Packet2Xf signed_x = __riscv_vfsgnj_vv_f32m2(new_x, x, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vmerge_vvm_f32m2(x, signed_x, mask, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pfloor<Packet2Xf>(const Packet2Xf& a) {
+  Packet2Xf tmp = print<Packet2Xf>(a);
+  // If greater, subtract one.
+  PacketMask16 mask = __riscv_vmflt_vv_f32m2_b16(a, tmp, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vfsub_vf_f32m2_tumu(mask, tmp, tmp, 1.0f, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf preverse(const Packet2Xf& a) {
+  Packet2Xu idx =
+      __riscv_vrsub_vx_u32m2(__riscv_vid_v_u32m2(unpacket_traits<Packet2Xf>::size),
+                             unpacket_traits<Packet2Xf>::size - 1, unpacket_traits<Packet2Xf>::size);
+  return __riscv_vrgather_vv_f32m2(a, idx, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pfrexp<Packet2Xf>(const Packet2Xf& a, Packet2Xf& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<Packet2Xf>(const Packet2Xf& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f32m2_f32m1(
+      a, __riscv_vfmv_v_f_f32m1(0.0, unpacket_traits<Packet2Xf>::size / 2), unpacket_traits<Packet2Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul<Packet2Xf>(const Packet2Xf& a) {
+  return predux_mul<Packet1Xf>(__riscv_vfmul_vv_f32m1(__riscv_vget_v_f32m2_f32m1(a, 0), __riscv_vget_v_f32m2_f32m1(a, 1),
+                                                     unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<Packet2Xf>(const Packet2Xf& a) {
+  return (std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f32m2_f32m1(
+                        a,
+                        __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(),
+                                               unpacket_traits<Packet2Xf>::size / 2),
+                        unpacket_traits<Packet2Xf>::size)),
+                    (std::numeric_limits<float>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<Packet2Xf>(const Packet2Xf& a) {
+  return (std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f32m2_f32m1(
+                        a,
+                        __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(),
+                                               unpacket_traits<Packet2Xf>::size / 2),
+                        unpacket_traits<Packet2Xf>::size)),
+                    -(std::numeric_limits<float>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xf, N>& kernel) {
+  float buffer[unpacket_traits<Packet2Xf>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(float), kernel.packet[i], unpacket_traits<Packet2Xf>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_f32m2(&buffer[i * unpacket_traits<Packet2Xf>::size], unpacket_traits<Packet2Xf>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pldexp<Packet2Xf>(const Packet2Xf& a, const Packet2Xf& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+template <typename Packet = Packet4Xf>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet4Xf>::value && (unpacket_traits<Packet4Xf>::size % 8) == 0,
+                        Packet2Xf>::type
+predux_half_dowto4(const Packet4Xf& a) {
+  return __riscv_vfadd_vv_f32m2(__riscv_vget_v_f32m4_f32m2(a, 0), __riscv_vget_v_f32m4_f32m2(a, 1),
+                                unpacket_traits<Packet2Xf>::size);
+}
+
+template <typename Packet = Packet2Xf>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xf>::value && (unpacket_traits<Packet2Xf>::size % 8) == 0,
+                        Packet1Xf>::type
+predux_half_dowto4(const Packet2Xf& a) {
+  return __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m2_f32m1(a, 0), __riscv_vget_v_f32m2_f32m1(a, 1),
+                                unpacket_traits<Packet1Xf>::size);
+}
+
+/********************************* Packet2Xl ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pset1<Packet2Xl>(const numext::int64_t& from) {
+  return __riscv_vmv_v_x_i64m2(from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl plset<Packet2Xl>(const numext::int64_t& a) {
+  Packet2Xl idx = __riscv_vreinterpret_v_u64m2_i64m2(__riscv_vid_v_u64m2(unpacket_traits<Packet2Xl>::size));
+  return __riscv_vadd_vx_i64m2(idx, a, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pzero<Packet2Xl>(const Packet2Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m2(0, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl padd<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vadd_vv_i64m2(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl psub<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pnegate(const Packet2Xl& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pconj(const Packet2Xl& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pmul<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pdiv<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pmadd(const Packet2Xl& a, const Packet2Xl& b, const Packet2Xl& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pmsub(const Packet2Xl& a, const Packet2Xl& b, const Packet2Xl& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pnmadd(const Packet2Xl& a, const Packet2Xl& b, const Packet2Xl& c) {
+  return __riscv_vnmsub_vv_i64m2(a, b, c, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pnmsub(const Packet2Xl& a, const Packet2Xl& b, const Packet2Xl& c) {
+  return __riscv_vnmsub_vv_i64m2(a, b, pnegate(c), unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pmin<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pmax<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcmp_le<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  PacketMask32 mask = __riscv_vmsle_vv_i64m2_b32(a, b, unpacket_traits<Packet2Xl>::size);
+  return __riscv_vmerge_vxm_i64m2(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcmp_lt<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  PacketMask32 mask = __riscv_vmslt_vv_i64m2_b32(a, b, unpacket_traits<Packet2Xl>::size);
+  return __riscv_vmerge_vxm_i64m2(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcmp_eq<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  PacketMask32 mask = __riscv_vmseq_vv_i64m2_b32(a, b, unpacket_traits<Packet2Xl>::size);
+  return __riscv_vmerge_vxm_i64m2(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl ptrue<Packet2Xl>(const Packet2Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m2(0xffffffffffffffffu, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pand<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vand_vv_i64m2(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl por<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vor_vv_i64m2(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pxor<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vxor_vv_i64m2(a, b, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pandnot<Packet2Xl>(const Packet2Xl& a, const Packet2Xl& b) {
+  return __riscv_vand_vv_i64m2(a, __riscv_vnot_v_i64m2(b, unpacket_traits<Packet2Xl>::size),
+                               unpacket_traits<Packet2Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xl parithmetic_shift_right(Packet2Xl a) {
+  return __riscv_vsra_vx_i64m2(a, N, unpacket_traits<Packet2Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xl plogical_shift_right(Packet2Xl a) {
+  return __riscv_vreinterpret_i64m2(
+      __riscv_vsrl_vx_u64m2(__riscv_vreinterpret_u64m2(a), N, unpacket_traits<Packet2Xl>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xl plogical_shift_left(Packet2Xl a) {
+  return __riscv_vsll_vx_i64m2(a, N, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pload<Packet2Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_i64m2(from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl ploadu<Packet2Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_i64m2(from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl ploaddup<Packet2Xl>(const numext::int64_t* from) {
+  Packet2Xul idx = __riscv_vid_v_u64m2(unpacket_traits<Packet2Xl>::size);
+  idx = __riscv_vsll_vx_u64m2(__riscv_vand_vx_u64m2(idx, 0xfffffffffffffffeu, unpacket_traits<Packet2Xl>::size), 2,
+                              unpacket_traits<Packet2Xl>::size);
+  // idx = 0 0 sizeof(int64_t) sizeof(int64_t) 2*sizeof(int64_t) 2*sizeof(int64_t) ...
+  return __riscv_vloxei64_v_i64m2(from, idx, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl ploadquad<Packet2Xl>(const numext::int64_t* from) {
+  Packet2Xul idx = __riscv_vid_v_u64m2(unpacket_traits<Packet2Xl>::size);
+  idx = __riscv_vsll_vx_u64m2(__riscv_vand_vx_u64m2(idx, 0xfffffffffffffffcu, unpacket_traits<Packet2Xl>::size), 1,
+                              unpacket_traits<Packet2Xl>::size);
+  return __riscv_vloxei64_v_i64m2(from, idx, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int64_t>(numext::int64_t* to, const Packet2Xl& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_i64m2(to, from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int64_t>(numext::int64_t* to, const Packet2Xl& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_i64m2(to, from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xl pgather<numext::int64_t, Packet2Xl>(const numext::int64_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse64_v_i64m2(from, stride * sizeof(numext::int64_t), unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int64_t, Packet2Xl>(numext::int64_t* to, const Packet2Xl& from,
+                                                                      Index stride) {
+  __riscv_vsse64(to, stride * sizeof(numext::int64_t), from, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t pfirst<Packet2Xl>(const Packet2Xl& a) {
+  return __riscv_vmv_x_s_i64m2_i64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl preverse(const Packet2Xl& a) {
+  Packet2Xul idx =
+      __riscv_vrsub_vx_u64m2(__riscv_vid_v_u64m2(unpacket_traits<Packet2Xl>::size),
+                             unpacket_traits<Packet2Xl>::size - 1, unpacket_traits<Packet2Xl>::size);
+  return __riscv_vrgather_vv_i64m2(a, idx, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pabs(const Packet2Xl& a) {
+  Packet2Xl mask = __riscv_vsra_vx_i64m2(a, 63, unpacket_traits<Packet2Xl>::size);
+  return __riscv_vsub_vv_i64m2(__riscv_vxor_vv_i64m2(a, mask, unpacket_traits<Packet2Xl>::size), mask,
+                               unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux<Packet2Xl>(const Packet2Xl& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i64m2_i64m1(
+      a, __riscv_vmv_v_x_i64m1(0, unpacket_traits<Packet2Xl>::size / 2), unpacket_traits<Packet2Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_mul<Packet2Xl>(const Packet2Xl& a) {
+  return predux_mul<Packet1Xl>(__riscv_vmul_vv_i64m1(__riscv_vget_v_i64m2_i64m1(a, 0), __riscv_vget_v_i64m2_i64m1(a, 1),
+                                                    unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_min<Packet2Xl>(const Packet2Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i64m2_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::max)(), unpacket_traits<Packet2Xl>::size / 2),
+      unpacket_traits<Packet2Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_max<Packet2Xl>(const Packet2Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i64m2_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::min)(), unpacket_traits<Packet2Xl>::size / 2),
+      unpacket_traits<Packet2Xl>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xl, N>& kernel) {
+  numext::int64_t buffer[unpacket_traits<Packet2Xl>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(numext::int64_t), kernel.packet[i], unpacket_traits<Packet2Xl>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_i64m2(&buffer[i * unpacket_traits<Packet2Xl>::size], unpacket_traits<Packet2Xl>::size);
+  }
+}
+
+template <typename Packet = Packet4Xl>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet4Xl>::value && (unpacket_traits<Packet4Xl>::size % 8) == 0,
+                        Packet2Xl>::type
+predux_half_dowto4(const Packet4Xl& a) {
+  return __riscv_vadd_vv_i64m2(__riscv_vget_v_i64m4_i64m2(a, 0), __riscv_vget_v_i64m4_i64m2(a, 1),
+                               unpacket_traits<Packet2Xl>::size);
+}
+
+template <typename Packet = Packet2Xl>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xl>::value && (unpacket_traits<Packet2Xl>::size % 8) == 0,
+                        Packet1Xl>::type
+predux_half_dowto4(const Packet2Xl& a) {
+  return __riscv_vadd_vv_i64m1(__riscv_vget_v_i64m2_i64m1(a, 0), __riscv_vget_v_i64m2_i64m1(a, 1),
+                               unpacket_traits<Packet1Xl>::size);
+}
+
+/********************************* Packet2Xd ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd ptrue<Packet2Xd>(const Packet2Xd& /*a*/) {
+  return __riscv_vreinterpret_f64m2(__riscv_vmv_v_x_u64m2(0xffffffffffffffffu, unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pzero<Packet2Xd>(const Packet2Xd& /*a*/) {
+  return __riscv_vfmv_v_f_f64m2(0.0, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pabs(const Packet2Xd& a) {
+  return __riscv_vfabs_v_f64m2(a, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pset1<Packet2Xd>(const double& from) {
+  return __riscv_vfmv_v_f_f64m2(from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pset1frombits<Packet2Xd>(numext::uint64_t from) {
+  return __riscv_vreinterpret_f64m2(__riscv_vmv_v_x_u64m2(from, unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd plset<Packet2Xd>(const double& a) {
+  Packet2Xd idx = __riscv_vfcvt_f_x_v_f64m2(
+      __riscv_vreinterpret_v_u64m2_i64m2(__riscv_vid_v_u64m2(unpacket_traits<Packet4Xi>::size)),
+      unpacket_traits<Packet2Xd>::size);
+  return __riscv_vfadd_vf_f64m2(idx, a, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd padd<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfadd_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd psub<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfsub_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pnegate(const Packet2Xd& a) {
+  return __riscv_vfneg_v_f64m2(a, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pconj(const Packet2Xd& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmul<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfmul_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pdiv<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfdiv_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmadd(const Packet2Xd& a, const Packet2Xd& b, const Packet2Xd& c) {
+  return __riscv_vfmadd_vv_f64m2(a, b, c, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmsub(const Packet2Xd& a, const Packet2Xd& b, const Packet2Xd& c) {
+  return __riscv_vfmsub_vv_f64m2(a, b, c, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pnmadd(const Packet2Xd& a, const Packet2Xd& b, const Packet2Xd& c) {
+  return __riscv_vfnmsub_vv_f64m2(a, b, c, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pnmsub(const Packet2Xd& a, const Packet2Xd& b, const Packet2Xd& c) {
+  return __riscv_vfnmadd_vv_f64m2(a, b, c, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmin<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  Packet2Xd nans =
+      __riscv_vfmv_v_f_f64m2((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet2Xd>::size);
+  PacketMask32 mask = __riscv_vmfeq_vv_f64m2_b32(a, a, unpacket_traits<Packet2Xd>::size);
+  PacketMask32 mask2 = __riscv_vmfeq_vv_f64m2_b32(b, b, unpacket_traits<Packet2Xd>::size);
+  mask = __riscv_vmand_mm_b32(mask, mask2, unpacket_traits<Packet2Xd>::size);
+
+  return __riscv_vfmin_vv_f64m2_tumu(mask, nans, a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmin<PropagateNaN, Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return pmin<Packet2Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmin<PropagateNumbers, Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfmin_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmax<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  Packet2Xd nans =
+      __riscv_vfmv_v_f_f64m2((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet2Xd>::size);
+  PacketMask32 mask = __riscv_vmfeq_vv_f64m2_b32(a, a, unpacket_traits<Packet2Xd>::size);
+  PacketMask32 mask2 = __riscv_vmfeq_vv_f64m2_b32(b, b, unpacket_traits<Packet2Xd>::size);
+  mask = __riscv_vmand_mm_b32(mask, mask2, unpacket_traits<Packet2Xd>::size);
+
+  return __riscv_vfmax_vv_f64m2_tumu(mask, nans, a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmax<PropagateNaN, Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return pmax<Packet2Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pmax<PropagateNumbers, Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vfmax_vv_f64m2(a, b, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcmp_le<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  PacketMask32 mask = __riscv_vmfle_vv_f64m2_b32(a, b, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vmerge_vvm_f64m2(pzero<Packet2Xd>(a), ptrue<Packet2Xd>(a), mask,
+                                  unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcmp_lt<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  PacketMask32 mask = __riscv_vmflt_vv_f64m2_b32(a, b, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vmerge_vvm_f64m2(pzero<Packet2Xd>(a), ptrue<Packet2Xd>(a), mask,
+                                  unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcmp_eq<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  PacketMask32 mask = __riscv_vmfeq_vv_f64m2_b32(a, b, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vmerge_vvm_f64m2(pzero<Packet2Xd>(a), ptrue<Packet2Xd>(a), mask,
+                                  unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcmp_lt_or_nan<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  PacketMask32 mask = __riscv_vmfge_vv_f64m2_b32(a, b, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vfmerge_vfm_f64m2(ptrue<Packet2Xd>(a), 0.0, mask, unpacket_traits<Packet2Xd>::size);
+}
+
+// Logical Operations are not supported for double, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pand<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vreinterpret_v_u64m2_f64m2(__riscv_vand_vv_u64m2(__riscv_vreinterpret_v_f64m2_u64m2(a),
+                                                                  __riscv_vreinterpret_v_f64m2_u64m2(b),
+                                                                  unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd por<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vreinterpret_v_u64m2_f64m2(__riscv_vor_vv_u64m2(__riscv_vreinterpret_v_f64m2_u64m2(a),
+                                                                 __riscv_vreinterpret_v_f64m2_u64m2(b),
+                                                                 unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pxor<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vreinterpret_v_u64m2_f64m2(__riscv_vxor_vv_u64m2(__riscv_vreinterpret_v_f64m2_u64m2(a),
+                                                                  __riscv_vreinterpret_v_f64m2_u64m2(b),
+                                                                  unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pandnot<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& b) {
+  return __riscv_vreinterpret_v_u64m2_f64m2(__riscv_vand_vv_u64m2(
+      __riscv_vreinterpret_v_f64m2_u64m2(a),
+      __riscv_vnot_v_u64m2(__riscv_vreinterpret_v_f64m2_u64m2(b), unpacket_traits<Packet2Xd>::size),
+      unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pload<Packet2Xd>(const double* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_f64m2(from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd ploadu<Packet2Xd>(const double* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_f64m2(from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd ploaddup<Packet2Xd>(const double* from) {
+  Packet2Xul idx = __riscv_vid_v_u64m2(unpacket_traits<Packet2Xd>::size);
+  idx = __riscv_vsll_vx_u64m2(__riscv_vand_vx_u64m2(idx, 0xfffffffffffffffeu, unpacket_traits<Packet2Xd>::size), 2,
+                              unpacket_traits<Packet2Xd>::size);
+  return __riscv_vloxei64_v_f64m2(from, idx, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd ploadquad<Packet2Xd>(const double* from) {
+  Packet2Xul idx = __riscv_vid_v_u64m2(unpacket_traits<Packet2Xd>::size);
+  idx = __riscv_vsll_vx_u64m2(__riscv_vand_vx_u64m2(idx, 0xfffffffffffffffcu, unpacket_traits<Packet2Xd>::size), 1,
+                              unpacket_traits<Packet2Xd>::size);
+  return __riscv_vloxei64_v_f64m2(from, idx, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2Xd& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_f64m2(to, from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2Xd& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_f64m2(to, from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xd pgather<double, Packet2Xd>(const double* from, Index stride) {
+  return __riscv_vlse64_v_f64m2(from, stride * sizeof(double), unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2Xd>(double* to, const Packet2Xd& from, Index stride) {
+  __riscv_vsse64(to, stride * sizeof(double), from, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE double pfirst<Packet2Xd>(const Packet2Xd& a) {
+  return __riscv_vfmv_f_s_f64m2_f64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd psqrt(const Packet2Xd& a) {
+  return __riscv_vfsqrt_v_f64m2(a, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd print<Packet2Xd>(const Packet2Xd& a) {
+  const Packet2Xd limit = pset1<Packet2Xd>(static_cast<double>(1ull << 52));
+  const Packet2Xd abs_a = pabs(a);
+
+  PacketMask32 mask = __riscv_vmfne_vv_f64m2_b32(a, a, unpacket_traits<Packet2Xd>::size);
+  const Packet2Xd x = __riscv_vfadd_vv_f64m2_tumu(mask, a, a, a, unpacket_traits<Packet2Xd>::size);
+  const Packet2Xd new_x = __riscv_vfcvt_f_x_v_f64m2(
+      __riscv_vfcvt_x_f_v_i64m2(a, unpacket_traits<Packet2Xd>::size), unpacket_traits<Packet2Xd>::size);
+
+  mask = __riscv_vmflt_vv_f64m2_b32(abs_a, limit, unpacket_traits<Packet2Xd>::size);
+  Packet2Xd signed_x = __riscv_vfsgnj_vv_f64m2(new_x, x, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vmerge_vvm_f64m2(x, signed_x, mask, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pfloor<Packet2Xd>(const Packet2Xd& a) {
+  Packet2Xd tmp = print<Packet2Xd>(a);
+  // If greater, subtract one.
+  PacketMask32 mask = __riscv_vmflt_vv_f64m2_b32(a, tmp, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vfsub_vf_f64m2_tumu(mask, tmp, tmp, 1.0, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd preverse(const Packet2Xd& a) {
+  Packet2Xul idx =
+      __riscv_vrsub_vx_u64m2(__riscv_vid_v_u64m2(unpacket_traits<Packet2Xd>::size),
+                             unpacket_traits<Packet2Xd>::size - 1, unpacket_traits<Packet2Xd>::size);
+  return __riscv_vrgather_vv_f64m2(a, idx, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pfrexp<Packet2Xd>(const Packet2Xd& a, Packet2Xd& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux<Packet2Xd>(const Packet2Xd& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f64m2_f64m1(
+      a, __riscv_vfmv_v_f_f64m1(0.0, unpacket_traits<Packet2Xd>::size / 2), unpacket_traits<Packet2Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul<Packet2Xd>(const Packet2Xd& a) {
+  return predux_mul<Packet1Xd>(__riscv_vfmul_vv_f64m1(__riscv_vget_v_f64m2_f64m1(a, 0), __riscv_vget_v_f64m2_f64m1(a, 1),
+                                                     unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<Packet2Xd>(const Packet2Xd& a) {
+  return (std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f64m2_f64m1(
+                        a,
+                        __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(),
+                                               unpacket_traits<Packet2Xd>::size / 2),
+                        unpacket_traits<Packet2Xd>::size)),
+                    (std::numeric_limits<double>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<Packet2Xd>(const Packet2Xd& a) {
+  return (std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f64m2_f64m1(
+                        a,
+                        __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(),
+                                               unpacket_traits<Packet2Xd>::size / 2),
+                        unpacket_traits<Packet2Xd>::size)),
+                    -(std::numeric_limits<double>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xd, N>& kernel) {
+  double buffer[unpacket_traits<Packet2Xd>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(double), kernel.packet[i], unpacket_traits<Packet2Xd>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_f64m2(&buffer[i * unpacket_traits<Packet2Xd>::size], unpacket_traits<Packet2Xd>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pldexp<Packet2Xd>(const Packet2Xd& a, const Packet2Xd& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+template <typename Packet = Packet4Xd>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet4Xd>::value && (unpacket_traits<Packet4Xd>::size % 8) == 0,
+                        Packet2Xd>::type
+predux_half_dowto4(const Packet4Xd& a) {
+  return __riscv_vfadd_vv_f64m2(__riscv_vget_v_f64m4_f64m2(a, 0), __riscv_vget_v_f64m4_f64m2(a, 1),
+                                unpacket_traits<Packet2Xd>::size);
+}
+
+template <typename Packet = Packet2Xd>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xd>::value && (unpacket_traits<Packet2Xd>::size % 8) == 0,
+                        Packet1Xd>::type
+predux_half_dowto4(const Packet2Xd& a) {
+  return __riscv_vfadd_vv_f64m1(__riscv_vget_v_f64m2_f64m1(a, 0), __riscv_vget_v_f64m2_f64m1(a, 1),
+                                unpacket_traits<Packet1Xd>::size);
+}
+
+/********************************* Packet2Xs ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pset1<Packet2Xs>(const numext::int16_t& from) {
+  return __riscv_vmv_v_x_i16m2(from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs plset<Packet2Xs>(const numext::int16_t& a) {
+  Packet2Xs idx = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vid_v_u16m2(unpacket_traits<Packet2Xs>::size));
+  return __riscv_vadd_vx_i16m2(idx, a, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pzero<Packet2Xs>(const Packet2Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m2(0, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs padd<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vadd_vv_i16m2(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs psub<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pnegate(const Packet2Xs& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pconj(const Packet2Xs& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pmul<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pdiv<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pmadd(const Packet2Xs& a, const Packet2Xs& b, const Packet2Xs& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pmsub(const Packet2Xs& a, const Packet2Xs& b, const Packet2Xs& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pnmadd(const Packet2Xs& a, const Packet2Xs& b, const Packet2Xs& c) {
+  return __riscv_vnmsub_vv_i16m2(a, b, c, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pnmsub(const Packet2Xs& a, const Packet2Xs& b, const Packet2Xs& c) {
+  return __riscv_vnmsub_vv_i16m2(a, b, pnegate(c), unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pmin<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pmax<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcmp_le<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  PacketMask8 mask = __riscv_vmsle_vv_i16m2_b8(a, b, unpacket_traits<Packet2Xs>::size);
+  return __riscv_vmerge_vxm_i16m2(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcmp_lt<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  PacketMask8 mask = __riscv_vmslt_vv_i16m2_b8(a, b, unpacket_traits<Packet2Xs>::size);
+  return __riscv_vmerge_vxm_i16m2(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcmp_eq<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  PacketMask8 mask = __riscv_vmseq_vv_i16m2_b8(a, b, unpacket_traits<Packet2Xs>::size);
+  return __riscv_vmerge_vxm_i16m2(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs ptrue<Packet2Xs>(const Packet2Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m2(static_cast<unsigned short>(0xffffu), unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pand<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vand_vv_i16m2(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs por<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vor_vv_i16m2(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pxor<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vxor_vv_i16m2(a, b, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pandnot<Packet2Xs>(const Packet2Xs& a, const Packet2Xs& b) {
+  return __riscv_vand_vv_i16m2(a, __riscv_vnot_v_i16m2(b, unpacket_traits<Packet2Xs>::size),
+                               unpacket_traits<Packet2Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xs parithmetic_shift_right(Packet2Xs a) {
+  return __riscv_vsra_vx_i16m2(a, N, unpacket_traits<Packet2Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xs plogical_shift_right(Packet2Xs a) {
+  return __riscv_vreinterpret_i16m2(
+      __riscv_vsrl_vx_u16m2(__riscv_vreinterpret_u16m2(a), N, unpacket_traits<Packet2Xs>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet2Xs plogical_shift_left(Packet2Xs a) {
+  return __riscv_vsll_vx_i16m2(a, N, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pload<Packet2Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle16_v_i16m2(from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs ploadu<Packet2Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle16_v_i16m2(from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs ploaddup<Packet2Xs>(const numext::int16_t* from) {
+  Packet2Xsu idx = __riscv_vid_v_u16m2(unpacket_traits<Packet2Xs>::size);
+  idx = __riscv_vand_vx_u16m2(idx, 0xfffeu, unpacket_traits<Packet2Xs>::size);
+  // idx = 0 0 sizeof(int16_t) sizeof(int16_t) 2*sizeof(int16_t) 2*sizeof(int16_t) ...
+  return __riscv_vloxei16_v_i16m2(from, idx, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs ploadquad<Packet2Xs>(const numext::int16_t* from) {
+  Packet2Xsu idx = __riscv_vid_v_u16m2(unpacket_traits<Packet2Xs>::size);
+  idx = __riscv_vsrl_vx_u16m2(__riscv_vand_vx_u16m2(idx, 0xfffcu, unpacket_traits<Packet2Xs>::size), 1,
+                              unpacket_traits<Packet2Xs>::size);
+  return __riscv_vloxei16_v_i16m2(from, idx, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int16_t>(numext::int16_t* to, const Packet2Xs& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse16_v_i16m2(to, from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int16_t>(numext::int16_t* to, const Packet2Xs& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse16_v_i16m2(to, from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xs pgather<numext::int16_t, Packet2Xs>(const numext::int16_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse16_v_i16m2(from, stride * sizeof(numext::int16_t), unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int16_t, Packet2Xs>(numext::int16_t* to, const Packet2Xs& from,
+                                                                      Index stride) {
+  __riscv_vsse16(to, stride * sizeof(numext::int16_t), from, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t pfirst<Packet2Xs>(const Packet2Xs& a) {
+  return __riscv_vmv_x_s_i16m2_i16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs preverse(const Packet2Xs& a) {
+  Packet2Xsu idx =
+      __riscv_vrsub_vx_u16m2(__riscv_vid_v_u16m2(unpacket_traits<Packet2Xs>::size),
+                             unpacket_traits<Packet2Xs>::size - 1, unpacket_traits<Packet2Xs>::size);
+  return __riscv_vrgather_vv_i16m2(a, idx, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pabs(const Packet2Xs& a) {
+  Packet2Xs mask = __riscv_vsra_vx_i16m2(a, 15, unpacket_traits<Packet2Xs>::size);
+  return __riscv_vsub_vv_i16m2(__riscv_vxor_vv_i16m2(a, mask, unpacket_traits<Packet2Xs>::size), mask,
+                               unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux<Packet2Xs>(const Packet2Xs& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i16m2_i16m1(
+      a, __riscv_vmv_v_x_i16m1(0, unpacket_traits<Packet2Xs>::size / 2), unpacket_traits<Packet2Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_mul<Packet2Xs>(const Packet2Xs& a) {
+  return predux_mul<Packet1Xs>(__riscv_vmul_vv_i16m1(__riscv_vget_v_i16m2_i16m1(a, 0), __riscv_vget_v_i16m2_i16m1(a, 1),
+                                                    unpacket_traits<Packet1Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_min<Packet2Xs>(const Packet2Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i16m2_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::max)(), unpacket_traits<Packet2Xs>::size / 2),
+      unpacket_traits<Packet2Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_max<Packet2Xs>(const Packet2Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i16m2_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::min)(), unpacket_traits<Packet2Xs>::size / 2),
+      unpacket_traits<Packet2Xs>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xs, N>& kernel) {
+  numext::int16_t buffer[unpacket_traits<Packet2Xs>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse16(&buffer[i], N * sizeof(numext::int16_t), kernel.packet[i], unpacket_traits<Packet2Xs>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle16_v_i16m2(&buffer[i * unpacket_traits<Packet2Xs>::size], unpacket_traits<Packet2Xs>::size);
+  }
+}
+
+template <typename Packet = Packet4Xs>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet4Xs>::value && (unpacket_traits<Packet4Xs>::size % 8) == 0,
+                        Packet2Xs>::type
+predux_half_dowto4(const Packet4Xs& a) {
+  return __riscv_vadd_vv_i16m2(__riscv_vget_v_i16m4_i16m2(a, 0), __riscv_vget_v_i16m4_i16m2(a, 1),
+                               unpacket_traits<Packet2Xs>::size);
+}
+
+template <typename Packet = Packet2Xs>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xs>::value && (unpacket_traits<Packet2Xs>::size % 8) == 0,
+                        Packet1Xs>::type
+predux_half_dowto4(const Packet2Xs& a) {
+  return __riscv_vadd_vv_i16m1(__riscv_vget_v_i16m2_i16m1(a, 0), __riscv_vget_v_i16m2_i16m1(a, 1),
+                               unpacket_traits<Packet1Xs>::size);
+}
+
+}  // namespace internal
+}  // namespace Eigen
+
+#endif  // EIGEN_PACKET2_MATH_RVV10_H
diff --git a/Eigen/src/Core/arch/RVV10/PacketMath4.h b/Eigen/src/Core/arch/RVV10/PacketMath4.h
new file mode 100644
index 0000000..30f5ca3
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/PacketMath4.h
@@ -0,0 +1,1431 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2024 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET4_MATH_RVV10_H
+#define EIGEN_PACKET4_MATH_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+
+/********************************* Packet4Xi ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pset1<Packet4Xi>(const numext::int32_t& from) {
+  return __riscv_vmv_v_x_i32m4(from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi plset<Packet4Xi>(const numext::int32_t& a) {
+  Packet4Xi idx = __riscv_vreinterpret_v_u32m4_i32m4(__riscv_vid_v_u32m4(unpacket_traits<Packet4Xi>::size));
+  return __riscv_vadd_vx_i32m4(idx, a, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pzero<Packet4Xi>(const Packet4Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m4(0, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi padd<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vadd_vv_i32m4(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi psub<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pnegate(const Packet4Xi& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pconj(const Packet4Xi& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pmul<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pdiv<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pmadd(const Packet4Xi& a, const Packet4Xi& b, const Packet4Xi& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pmsub(const Packet4Xi& a, const Packet4Xi& b, const Packet4Xi& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pnmadd(const Packet4Xi& a, const Packet4Xi& b, const Packet4Xi& c) {
+  return __riscv_vnmsub_vv_i32m4(a, b, c, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pnmsub(const Packet4Xi& a, const Packet4Xi& b, const Packet4Xi& c) {
+  return __riscv_vnmsub_vv_i32m4(a, b, pnegate(c), unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pmin<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pmax<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcmp_le<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  PacketMask8 mask = __riscv_vmsle_vv_i32m4_b8(a, b, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vmerge_vxm_i32m4(pzero(a), 0xffffffff, mask, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcmp_lt<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  PacketMask8 mask = __riscv_vmslt_vv_i32m4_b8(a, b, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vmerge_vxm_i32m4(pzero(a), 0xffffffff, mask, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcmp_eq<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  PacketMask8 mask = __riscv_vmseq_vv_i32m4_b8(a, b, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vmerge_vxm_i32m4(pzero(a), 0xffffffff, mask, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi ptrue<Packet4Xi>(const Packet4Xi& /*a*/) {
+  return __riscv_vmv_v_x_i32m4(0xffffffffu, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pand<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vand_vv_i32m4(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi por<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vor_vv_i32m4(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pxor<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vxor_vv_i32m4(a, b, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pandnot<Packet4Xi>(const Packet4Xi& a, const Packet4Xi& b) {
+  return __riscv_vand_vv_i32m4(a, __riscv_vnot_v_i32m4(b, unpacket_traits<Packet4Xi>::size),
+                               unpacket_traits<Packet4Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xi parithmetic_shift_right(Packet4Xi a) {
+  return __riscv_vsra_vx_i32m4(a, N, unpacket_traits<Packet4Xi>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xi plogical_shift_right(Packet4Xi a) {
+  return __riscv_vreinterpret_i32m4(
+      __riscv_vsrl_vx_u32m4(__riscv_vreinterpret_u32m4(a), N, unpacket_traits<Packet4Xi>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xi plogical_shift_left(Packet4Xi a) {
+  return __riscv_vsll_vx_i32m4(a, N, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pload<Packet4Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_i32m4(from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi ploadu<Packet4Xi>(const numext::int32_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_i32m4(from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi ploaddup<Packet4Xi>(const numext::int32_t* from) {
+  Packet4Xu idx = __riscv_vid_v_u32m4(unpacket_traits<Packet4Xi>::size);
+  idx = __riscv_vsll_vx_u32m4(__riscv_vand_vx_u32m4(idx, 0xfffffffeu, unpacket_traits<Packet4Xi>::size), 1,
+                              unpacket_traits<Packet4Xi>::size);
+  // idx = 0 0 sizeof(int32_t) sizeof(int32_t) 2*sizeof(int32_t) 2*sizeof(int32_t) ...
+  return __riscv_vloxei32_v_i32m4(from, idx, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi ploadquad<Packet4Xi>(const numext::int32_t* from) {
+  Packet4Xu idx = __riscv_vid_v_u32m4(unpacket_traits<Packet4Xi>::size);
+  idx = __riscv_vand_vx_u32m4(idx, 0xfffffffcu, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vloxei32_v_i32m4(from, idx, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const Packet4Xi& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_i32m4(to, from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const Packet4Xi& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_i32m4(to, from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4Xi pgather<numext::int32_t, Packet4Xi>(const numext::int32_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse32_v_i32m4(from, stride * sizeof(numext::int32_t), unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, Packet4Xi>(numext::int32_t* to, const Packet4Xi& from,
+                                                                      Index stride) {
+  __riscv_vsse32(to, stride * sizeof(numext::int32_t), from, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t pfirst<Packet4Xi>(const Packet4Xi& a) {
+  return __riscv_vmv_x_s_i32m4_i32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi preverse(const Packet4Xi& a) {
+  Packet4Xu idx =
+      __riscv_vrsub_vx_u32m4(__riscv_vid_v_u32m4(unpacket_traits<Packet4Xi>::size),
+                             unpacket_traits<Packet4Xi>::size - 1, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vrgather_vv_i32m4(a, idx, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pabs(const Packet4Xi& a) {
+  Packet4Xi mask = __riscv_vsra_vx_i32m4(a, 31, unpacket_traits<Packet4Xi>::size);
+  return __riscv_vsub_vv_i32m4(__riscv_vxor_vv_i32m4(a, mask, unpacket_traits<Packet4Xi>::size), mask,
+                               unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux<Packet4Xi>(const Packet4Xi& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i32m4_i32m1(
+      a, __riscv_vmv_v_x_i32m1(0, unpacket_traits<Packet4Xi>::size / 4), unpacket_traits<Packet4Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_mul<Packet4Xi>(const Packet4Xi& a) {
+  Packet1Xi half1 = __riscv_vmul_vv_i32m1(__riscv_vget_v_i32m4_i32m1(a, 0), __riscv_vget_v_i32m4_i32m1(a, 1),
+                                         unpacket_traits<Packet1Xi>::size);
+  Packet1Xi half2 = __riscv_vmul_vv_i32m1(__riscv_vget_v_i32m4_i32m1(a, 2), __riscv_vget_v_i32m4_i32m1(a, 3),
+                                         unpacket_traits<Packet1Xi>::size);
+  return predux_mul<Packet1Xi>(__riscv_vmul_vv_i32m1(half1, half2, unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_min<Packet4Xi>(const Packet4Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i32m4_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::max)(), unpacket_traits<Packet4Xi>::size / 4),
+      unpacket_traits<Packet4Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int32_t predux_max<Packet4Xi>(const Packet4Xi& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i32m4_i32m1(
+      a, __riscv_vmv_v_x_i32m1((std::numeric_limits<numext::int32_t>::min)(), unpacket_traits<Packet4Xi>::size / 4),
+      unpacket_traits<Packet4Xi>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4Xi, N>& kernel) {
+  numext::int32_t buffer[unpacket_traits<Packet4Xi>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(numext::int32_t), kernel.packet[i], unpacket_traits<Packet4Xi>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_i32m4(&buffer[i * unpacket_traits<Packet4Xi>::size], unpacket_traits<Packet4Xi>::size);
+  }
+}
+
+/********************************* Packet4Xf ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf ptrue<Packet4Xf>(const Packet4Xf& /*a*/) {
+  return __riscv_vreinterpret_f32m4(__riscv_vmv_v_x_u32m4(0xffffffffu, unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pzero<Packet4Xf>(const Packet4Xf& /*a*/) {
+  return __riscv_vfmv_v_f_f32m4(0.0f, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pabs(const Packet4Xf& a) {
+  return __riscv_vfabs_v_f32m4(a, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pset1<Packet4Xf>(const float& from) {
+  return __riscv_vfmv_v_f_f32m4(from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pset1frombits<Packet4Xf>(numext::uint32_t from) {
+  return __riscv_vreinterpret_f32m4(__riscv_vmv_v_x_u32m4(from, unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf plset<Packet4Xf>(const float& a) {
+  Packet4Xf idx = __riscv_vfcvt_f_x_v_f32m4(
+      __riscv_vreinterpret_v_u32m4_i32m4(__riscv_vid_v_u32m4(unpacket_traits<Packet4Xi>::size)),
+      unpacket_traits<Packet4Xf>::size);
+  return __riscv_vfadd_vf_f32m4(idx, a, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf padd<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfadd_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf psub<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfsub_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pnegate(const Packet4Xf& a) {
+  return __riscv_vfneg_v_f32m4(a, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pconj(const Packet4Xf& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmul<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfmul_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pdiv<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfdiv_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmadd(const Packet4Xf& a, const Packet4Xf& b, const Packet4Xf& c) {
+  return __riscv_vfmadd_vv_f32m4(a, b, c, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmsub(const Packet4Xf& a, const Packet4Xf& b, const Packet4Xf& c) {
+  return __riscv_vfmsub_vv_f32m4(a, b, c, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pnmadd(const Packet4Xf& a, const Packet4Xf& b, const Packet4Xf& c) {
+  return __riscv_vfnmsub_vv_f32m4(a, b, c, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pnmsub(const Packet4Xf& a, const Packet4Xf& b, const Packet4Xf& c) {
+  return __riscv_vfnmadd_vv_f32m4(a, b, c, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmin<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  Packet4Xf nans =
+      __riscv_vfmv_v_f_f32m4((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet4Xf>::size);
+  PacketMask8 mask = __riscv_vmfeq_vv_f32m4_b8(a, a, unpacket_traits<Packet4Xf>::size);
+  PacketMask8 mask2 = __riscv_vmfeq_vv_f32m4_b8(b, b, unpacket_traits<Packet4Xf>::size);
+  mask = __riscv_vmand_mm_b8(mask, mask2, unpacket_traits<Packet4Xf>::size);
+
+  return __riscv_vfmin_vv_f32m4_tumu(mask, nans, a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmin<PropagateNaN, Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return pmin<Packet4Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmin<PropagateNumbers, Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfmin_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmax<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  Packet4Xf nans =
+      __riscv_vfmv_v_f_f32m4((std::numeric_limits<float>::quiet_NaN)(), unpacket_traits<Packet4Xf>::size);
+  PacketMask8 mask = __riscv_vmfeq_vv_f32m4_b8(a, a, unpacket_traits<Packet4Xf>::size);
+  PacketMask8 mask2 = __riscv_vmfeq_vv_f32m4_b8(b, b, unpacket_traits<Packet4Xf>::size);
+  mask = __riscv_vmand_mm_b8(mask, mask2, unpacket_traits<Packet4Xf>::size);
+
+  return __riscv_vfmax_vv_f32m4_tumu(mask, nans, a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmax<PropagateNaN, Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return pmax<Packet4Xf>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pmax<PropagateNumbers, Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vfmax_vv_f32m4(a, b, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcmp_le<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  PacketMask8 mask = __riscv_vmfle_vv_f32m4_b8(a, b, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vmerge_vvm_f32m4(pzero<Packet4Xf>(a), ptrue<Packet4Xf>(a), mask,
+                                  unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcmp_lt<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  PacketMask8 mask = __riscv_vmflt_vv_f32m4_b8(a, b, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vmerge_vvm_f32m4(pzero<Packet4Xf>(a), ptrue<Packet4Xf>(a), mask,
+                                  unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcmp_eq<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  PacketMask8 mask = __riscv_vmfeq_vv_f32m4_b8(a, b, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vmerge_vvm_f32m4(pzero<Packet4Xf>(a), ptrue<Packet4Xf>(a), mask,
+                                  unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcmp_lt_or_nan<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  PacketMask8 mask = __riscv_vmfge_vv_f32m4_b8(a, b, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vfmerge_vfm_f32m4(ptrue<Packet4Xf>(a), 0.0f, mask, unpacket_traits<Packet4Xf>::size);
+}
+
+// Logical Operations are not supported for float, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pand<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vreinterpret_v_u32m4_f32m4(__riscv_vand_vv_u32m4(__riscv_vreinterpret_v_f32m4_u32m4(a),
+                                                                  __riscv_vreinterpret_v_f32m4_u32m4(b),
+                                                                  unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf por<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vreinterpret_v_u32m4_f32m4(__riscv_vor_vv_u32m4(__riscv_vreinterpret_v_f32m4_u32m4(a),
+                                                                 __riscv_vreinterpret_v_f32m4_u32m4(b),
+                                                                 unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pxor<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vreinterpret_v_u32m4_f32m4(__riscv_vxor_vv_u32m4(__riscv_vreinterpret_v_f32m4_u32m4(a),
+                                                                  __riscv_vreinterpret_v_f32m4_u32m4(b),
+                                                                  unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pandnot<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& b) {
+  return __riscv_vreinterpret_v_u32m4_f32m4(__riscv_vand_vv_u32m4(
+      __riscv_vreinterpret_v_f32m4_u32m4(a),
+      __riscv_vnot_v_u32m4(__riscv_vreinterpret_v_f32m4_u32m4(b), unpacket_traits<Packet4Xf>::size),
+      unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pload<Packet4Xf>(const float* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle32_v_f32m4(from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf ploadu<Packet4Xf>(const float* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle32_v_f32m4(from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf ploaddup<Packet4Xf>(const float* from) {
+  Packet4Xu idx = __riscv_vid_v_u32m4(unpacket_traits<Packet4Xf>::size);
+  idx = __riscv_vsll_vx_u32m4(__riscv_vand_vx_u32m4(idx, 0xfffffffeu, unpacket_traits<Packet4Xf>::size), 1,
+                              unpacket_traits<Packet4Xf>::size);
+  return __riscv_vloxei32_v_f32m4(from, idx, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf ploadquad<Packet4Xf>(const float* from) {
+  Packet4Xu idx = __riscv_vid_v_u32m4(unpacket_traits<Packet4Xf>::size);
+  idx = __riscv_vand_vx_u32m4(idx, 0xfffffffcu, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vloxei32_v_f32m4(from, idx, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4Xf& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse32_v_f32m4(to, from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4Xf& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse32_v_f32m4(to, from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4Xf pgather<float, Packet4Xf>(const float* from, Index stride) {
+  return __riscv_vlse32_v_f32m4(from, stride * sizeof(float), unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4Xf>(float* to, const Packet4Xf& from, Index stride) {
+  __riscv_vsse32(to, stride * sizeof(float), from, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE float pfirst<Packet4Xf>(const Packet4Xf& a) {
+  return __riscv_vfmv_f_s_f32m4_f32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf psqrt(const Packet4Xf& a) {
+  return __riscv_vfsqrt_v_f32m4(a, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf print<Packet4Xf>(const Packet4Xf& a) {
+  const Packet4Xf limit = pset1<Packet4Xf>(static_cast<float>(1 << 23));
+  const Packet4Xf abs_a = pabs(a);
+
+  PacketMask8 mask = __riscv_vmfne_vv_f32m4_b8(a, a, unpacket_traits<Packet4Xf>::size);
+  const Packet4Xf x = __riscv_vfadd_vv_f32m4_tumu(mask, a, a, a, unpacket_traits<Packet4Xf>::size);
+  const Packet4Xf new_x = __riscv_vfcvt_f_x_v_f32m4(
+      __riscv_vfcvt_x_f_v_i32m4(a, unpacket_traits<Packet4Xf>::size), unpacket_traits<Packet4Xf>::size);
+
+  mask = __riscv_vmflt_vv_f32m4_b8(abs_a, limit, unpacket_traits<Packet4Xf>::size);
+  Packet4Xf signed_x = __riscv_vfsgnj_vv_f32m4(new_x, x, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vmerge_vvm_f32m4(x, signed_x, mask, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pfloor<Packet4Xf>(const Packet4Xf& a) {
+  Packet4Xf tmp = print<Packet4Xf>(a);
+  // If greater, subtract one.
+  PacketMask8 mask = __riscv_vmflt_vv_f32m4_b8(a, tmp, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vfsub_vf_f32m4_tumu(mask, tmp, tmp, 1.0f, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf preverse(const Packet4Xf& a) {
+  Packet4Xu idx =
+      __riscv_vrsub_vx_u32m4(__riscv_vid_v_u32m4(unpacket_traits<Packet4Xf>::size),
+                             unpacket_traits<Packet4Xf>::size - 1, unpacket_traits<Packet4Xf>::size);
+  return __riscv_vrgather_vv_f32m4(a, idx, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pfrexp<Packet4Xf>(const Packet4Xf& a, Packet4Xf& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux<Packet4Xf>(const Packet4Xf& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f32m4_f32m1(
+      a, __riscv_vfmv_v_f_f32m1(0.0, unpacket_traits<Packet4Xf>::size / 4), unpacket_traits<Packet4Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul<Packet4Xf>(const Packet4Xf& a) {
+  Packet1Xf half1 = __riscv_vfmul_vv_f32m1(__riscv_vget_v_f32m4_f32m1(a, 0), __riscv_vget_v_f32m4_f32m1(a, 1),
+                                          unpacket_traits<Packet1Xf>::size);
+  Packet1Xf half2 = __riscv_vfmul_vv_f32m1(__riscv_vget_v_f32m4_f32m1(a, 2), __riscv_vget_v_f32m4_f32m1(a, 3),
+                                          unpacket_traits<Packet1Xf>::size);
+  return predux_mul<Packet1Xf>(__riscv_vfmul_vv_f32m1(half1, half2, unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<Packet4Xf>(const Packet4Xf& a) {
+  return (std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f32m4_f32m1(
+                        a,
+                        __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(),
+                                               unpacket_traits<Packet4Xf>::size / 4),
+                        unpacket_traits<Packet4Xf>::size)),
+                    (std::numeric_limits<float>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<Packet4Xf>(const Packet4Xf& a) {
+  return (std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f32m4_f32m1(
+                        a,
+                        __riscv_vfmv_v_f_f32m1((std::numeric_limits<float>::quiet_NaN)(),
+                                               unpacket_traits<Packet4Xf>::size / 4),
+                        unpacket_traits<Packet4Xf>::size)),
+                    -(std::numeric_limits<float>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4Xf, N>& kernel) {
+  float buffer[unpacket_traits<Packet4Xf>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse32(&buffer[i], N * sizeof(float), kernel.packet[i], unpacket_traits<Packet4Xf>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle32_v_f32m4(&buffer[i * unpacket_traits<Packet4Xf>::size], unpacket_traits<Packet4Xf>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pldexp<Packet4Xf>(const Packet4Xf& a, const Packet4Xf& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+/********************************* Packet4Xl ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pset1<Packet4Xl>(const numext::int64_t& from) {
+  return __riscv_vmv_v_x_i64m4(from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl plset<Packet4Xl>(const numext::int64_t& a) {
+  Packet4Xl idx = __riscv_vreinterpret_v_u64m4_i64m4(__riscv_vid_v_u64m4(unpacket_traits<Packet4Xl>::size));
+  return __riscv_vadd_vx_i64m4(idx, a, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pzero<Packet4Xl>(const Packet4Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m4(0, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl padd<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vadd_vv_i64m4(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl psub<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pnegate(const Packet4Xl& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pconj(const Packet4Xl& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pmul<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pdiv<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pmadd(const Packet4Xl& a, const Packet4Xl& b, const Packet4Xl& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pmsub(const Packet4Xl& a, const Packet4Xl& b, const Packet4Xl& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pnmadd(const Packet4Xl& a, const Packet4Xl& b, const Packet4Xl& c) {
+  return __riscv_vnmsub_vv_i64m4(a, b, c, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pnmsub(const Packet4Xl& a, const Packet4Xl& b, const Packet4Xl& c) {
+  return __riscv_vnmsub_vv_i64m4(a, b, pnegate(c), unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pmin<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pmax<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcmp_le<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  PacketMask16 mask = __riscv_vmsle_vv_i64m4_b16(a, b, unpacket_traits<Packet4Xl>::size);
+  return __riscv_vmerge_vxm_i64m4(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcmp_lt<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  PacketMask16 mask = __riscv_vmslt_vv_i64m4_b16(a, b, unpacket_traits<Packet4Xl>::size);
+  return __riscv_vmerge_vxm_i64m4(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcmp_eq<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  PacketMask16 mask = __riscv_vmseq_vv_i64m4_b16(a, b, unpacket_traits<Packet4Xl>::size);
+  return __riscv_vmerge_vxm_i64m4(pzero(a), 0xffffffffffffffff, mask, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl ptrue<Packet4Xl>(const Packet4Xl& /*a*/) {
+  return __riscv_vmv_v_x_i64m4(0xffffffffffffffffu, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pand<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vand_vv_i64m4(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl por<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vor_vv_i64m4(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pxor<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vxor_vv_i64m4(a, b, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pandnot<Packet4Xl>(const Packet4Xl& a, const Packet4Xl& b) {
+  return __riscv_vand_vv_i64m4(a, __riscv_vnot_v_i64m4(b, unpacket_traits<Packet4Xl>::size),
+                               unpacket_traits<Packet4Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xl parithmetic_shift_right(Packet4Xl a) {
+  return __riscv_vsra_vx_i64m4(a, N, unpacket_traits<Packet4Xl>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xl plogical_shift_right(Packet4Xl a) {
+  return __riscv_vreinterpret_i64m4(
+      __riscv_vsrl_vx_u64m4(__riscv_vreinterpret_u64m4(a), N, unpacket_traits<Packet4Xl>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xl plogical_shift_left(Packet4Xl a) {
+  return __riscv_vsll_vx_i64m4(a, N, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pload<Packet4Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_i64m4(from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl ploadu<Packet4Xl>(const numext::int64_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_i64m4(from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl ploaddup<Packet4Xl>(const numext::int64_t* from) {
+  Packet4Xul idx = __riscv_vid_v_u64m4(unpacket_traits<Packet4Xl>::size);
+  idx = __riscv_vsll_vx_u64m4(__riscv_vand_vx_u64m4(idx, 0xfffffffffffffffeu, unpacket_traits<Packet4Xl>::size), 2,
+                              unpacket_traits<Packet4Xl>::size);
+  // idx = 0 0 sizeof(int64_t) sizeof(int64_t) 2*sizeof(int64_t) 2*sizeof(int64_t) ...
+  return __riscv_vloxei64_v_i64m4(from, idx, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl ploadquad<Packet4Xl>(const numext::int64_t* from) {
+  Packet4Xul idx = __riscv_vid_v_u64m4(unpacket_traits<Packet4Xl>::size);
+  idx = __riscv_vsll_vx_u64m4(__riscv_vand_vx_u64m4(idx, 0xfffffffffffffffcu, unpacket_traits<Packet4Xl>::size), 1,
+                              unpacket_traits<Packet4Xl>::size);
+  return __riscv_vloxei64_v_i64m4(from, idx, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int64_t>(numext::int64_t* to, const Packet4Xl& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_i64m4(to, from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int64_t>(numext::int64_t* to, const Packet4Xl& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_i64m4(to, from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4Xl pgather<numext::int64_t, Packet4Xl>(const numext::int64_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse64_v_i64m4(from, stride * sizeof(numext::int64_t), unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int64_t, Packet4Xl>(numext::int64_t* to, const Packet4Xl& from,
+                                                                      Index stride) {
+  __riscv_vsse64(to, stride * sizeof(numext::int64_t), from, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t pfirst<Packet4Xl>(const Packet4Xl& a) {
+  return __riscv_vmv_x_s_i64m4_i64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl preverse(const Packet4Xl& a) {
+  Packet4Xul idx =
+      __riscv_vrsub_vx_u64m4(__riscv_vid_v_u64m4(unpacket_traits<Packet4Xl>::size),
+                             unpacket_traits<Packet4Xl>::size - 1, unpacket_traits<Packet4Xl>::size);
+  return __riscv_vrgather_vv_i64m4(a, idx, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pabs(const Packet4Xl& a) {
+  Packet4Xl mask = __riscv_vsra_vx_i64m4(a, 63, unpacket_traits<Packet4Xl>::size);
+  return __riscv_vsub_vv_i64m4(__riscv_vxor_vv_i64m4(a, mask, unpacket_traits<Packet4Xl>::size), mask,
+                               unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux<Packet4Xl>(const Packet4Xl& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i64m4_i64m1(
+      a, __riscv_vmv_v_x_i64m1(0, unpacket_traits<Packet4Xl>::size / 4), unpacket_traits<Packet4Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_mul<Packet4Xl>(const Packet4Xl& a) {
+  Packet1Xl half1 = __riscv_vmul_vv_i64m1(__riscv_vget_v_i64m4_i64m1(a, 0), __riscv_vget_v_i64m4_i64m1(a, 1),
+                                         unpacket_traits<Packet1Xl>::size);
+  Packet1Xl half2 = __riscv_vmul_vv_i64m1(__riscv_vget_v_i64m4_i64m1(a, 2), __riscv_vget_v_i64m4_i64m1(a, 3),
+                                         unpacket_traits<Packet1Xl>::size);
+  return predux_mul<Packet1Xl>(__riscv_vmul_vv_i64m1(half1, half2, unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_min<Packet4Xl>(const Packet4Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i64m4_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::max)(), unpacket_traits<Packet4Xl>::size / 4),
+      unpacket_traits<Packet4Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int64_t predux_max<Packet4Xl>(const Packet4Xl& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i64m4_i64m1(
+      a, __riscv_vmv_v_x_i64m1((std::numeric_limits<numext::int64_t>::min)(), unpacket_traits<Packet4Xl>::size / 4),
+      unpacket_traits<Packet4Xl>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4Xl, N>& kernel) {
+  numext::int64_t buffer[unpacket_traits<Packet4Xl>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(numext::int64_t), kernel.packet[i], unpacket_traits<Packet4Xl>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_i64m4(&buffer[i * unpacket_traits<Packet4Xl>::size], unpacket_traits<Packet4Xl>::size);
+  }
+}
+
+/********************************* Packet4Xd ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd ptrue<Packet4Xd>(const Packet4Xd& /*a*/) {
+  return __riscv_vreinterpret_f64m4(__riscv_vmv_v_x_u64m4(0xffffffffffffffffu, unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pzero<Packet4Xd>(const Packet4Xd& /*a*/) {
+  return __riscv_vfmv_v_f_f64m4(0.0, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pabs(const Packet4Xd& a) {
+  return __riscv_vfabs_v_f64m4(a, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pset1<Packet4Xd>(const double& from) {
+  return __riscv_vfmv_v_f_f64m4(from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pset1frombits<Packet4Xd>(numext::uint64_t from) {
+  return __riscv_vreinterpret_f64m4(__riscv_vmv_v_x_u64m4(from, unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd plset<Packet4Xd>(const double& a) {
+  Packet4Xd idx = __riscv_vfcvt_f_x_v_f64m4(
+      __riscv_vreinterpret_v_u64m4_i64m4(__riscv_vid_v_u64m4(unpacket_traits<Packet4Xi>::size)),
+      unpacket_traits<Packet4Xd>::size);
+  return __riscv_vfadd_vf_f64m4(idx, a, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd padd<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfadd_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd psub<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfsub_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pnegate(const Packet4Xd& a) {
+  return __riscv_vfneg_v_f64m4(a, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pconj(const Packet4Xd& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmul<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfmul_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pdiv<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfdiv_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmadd(const Packet4Xd& a, const Packet4Xd& b, const Packet4Xd& c) {
+  return __riscv_vfmadd_vv_f64m4(a, b, c, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmsub(const Packet4Xd& a, const Packet4Xd& b, const Packet4Xd& c) {
+  return __riscv_vfmsub_vv_f64m4(a, b, c, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pnmadd(const Packet4Xd& a, const Packet4Xd& b, const Packet4Xd& c) {
+  return __riscv_vfnmsub_vv_f64m4(a, b, c, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pnmsub(const Packet4Xd& a, const Packet4Xd& b, const Packet4Xd& c) {
+  return __riscv_vfnmadd_vv_f64m4(a, b, c, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmin<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  Packet4Xd nans =
+      __riscv_vfmv_v_f_f64m4((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet4Xd>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f64m4_b16(a, a, unpacket_traits<Packet4Xd>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f64m4_b16(b, b, unpacket_traits<Packet4Xd>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<Packet4Xd>::size);
+
+  return __riscv_vfmin_vv_f64m4_tumu(mask, nans, a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmin<PropagateNaN, Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return pmin<Packet4Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmin<PropagateNumbers, Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfmin_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmax<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  Packet4Xd nans =
+      __riscv_vfmv_v_f_f64m4((std::numeric_limits<double>::quiet_NaN)(), unpacket_traits<Packet4Xd>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f64m4_b16(a, a, unpacket_traits<Packet4Xd>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f64m4_b16(b, b, unpacket_traits<Packet4Xd>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<Packet4Xd>::size);
+
+  return __riscv_vfmax_vv_f64m4_tumu(mask, nans, a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmax<PropagateNaN, Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return pmax<Packet4Xd>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pmax<PropagateNumbers, Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vfmax_vv_f64m4(a, b, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcmp_le<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  PacketMask16 mask = __riscv_vmfle_vv_f64m4_b16(a, b, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vmerge_vvm_f64m4(pzero<Packet4Xd>(a), ptrue<Packet4Xd>(a), mask,
+                                  unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcmp_lt<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  PacketMask16 mask = __riscv_vmflt_vv_f64m4_b16(a, b, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vmerge_vvm_f64m4(pzero<Packet4Xd>(a), ptrue<Packet4Xd>(a), mask,
+                                  unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcmp_eq<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  PacketMask16 mask = __riscv_vmfeq_vv_f64m4_b16(a, b, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vmerge_vvm_f64m4(pzero<Packet4Xd>(a), ptrue<Packet4Xd>(a), mask,
+                                  unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcmp_lt_or_nan<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  PacketMask16 mask = __riscv_vmfge_vv_f64m4_b16(a, b, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vfmerge_vfm_f64m4(ptrue<Packet4Xd>(a), 0.0, mask, unpacket_traits<Packet4Xd>::size);
+}
+
+// Logical Operations are not supported for double, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pand<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vreinterpret_v_u64m4_f64m4(__riscv_vand_vv_u64m4(__riscv_vreinterpret_v_f64m4_u64m4(a),
+                                                                  __riscv_vreinterpret_v_f64m4_u64m4(b),
+                                                                  unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd por<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vreinterpret_v_u64m4_f64m4(__riscv_vor_vv_u64m4(__riscv_vreinterpret_v_f64m4_u64m4(a),
+                                                                 __riscv_vreinterpret_v_f64m4_u64m4(b),
+                                                                 unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pxor<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vreinterpret_v_u64m4_f64m4(__riscv_vxor_vv_u64m4(__riscv_vreinterpret_v_f64m4_u64m4(a),
+                                                                  __riscv_vreinterpret_v_f64m4_u64m4(b),
+                                                                  unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pandnot<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& b) {
+  return __riscv_vreinterpret_v_u64m4_f64m4(__riscv_vand_vv_u64m4(
+      __riscv_vreinterpret_v_f64m4_u64m4(a),
+      __riscv_vnot_v_u64m4(__riscv_vreinterpret_v_f64m4_u64m4(b), unpacket_traits<Packet4Xd>::size),
+      unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pload<Packet4Xd>(const double* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle64_v_f64m4(from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd ploadu<Packet4Xd>(const double* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle64_v_f64m4(from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd ploaddup<Packet4Xd>(const double* from) {
+  Packet4Xul idx = __riscv_vid_v_u64m4(unpacket_traits<Packet4Xd>::size);
+  idx = __riscv_vsll_vx_u64m4(__riscv_vand_vx_u64m4(idx, 0xfffffffffffffffeu, unpacket_traits<Packet4Xd>::size), 2,
+                              unpacket_traits<Packet4Xd>::size);
+  return __riscv_vloxei64_v_f64m4(from, idx, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd ploadquad<Packet4Xd>(const double* from) {
+  Packet4Xul idx = __riscv_vid_v_u64m4(unpacket_traits<Packet4Xd>::size);
+  idx = __riscv_vsll_vx_u64m4(__riscv_vand_vx_u64m4(idx, 0xfffffffffffffffcu, unpacket_traits<Packet4Xd>::size), 1,
+                              unpacket_traits<Packet4Xd>::size);
+  return __riscv_vloxei64_v_f64m4(from, idx, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4Xd& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse64_v_f64m4(to, from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4Xd& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse64_v_f64m4(to, from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4Xd pgather<double, Packet4Xd>(const double* from, Index stride) {
+  return __riscv_vlse64_v_f64m4(from, stride * sizeof(double), unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4Xd>(double* to, const Packet4Xd& from, Index stride) {
+  __riscv_vsse64(to, stride * sizeof(double), from, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE double pfirst<Packet4Xd>(const Packet4Xd& a) {
+  return __riscv_vfmv_f_s_f64m4_f64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd psqrt(const Packet4Xd& a) {
+  return __riscv_vfsqrt_v_f64m4(a, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd print<Packet4Xd>(const Packet4Xd& a) {
+  const Packet4Xd limit = pset1<Packet4Xd>(static_cast<double>(1ull << 52));
+  const Packet4Xd abs_a = pabs(a);
+
+  PacketMask16 mask = __riscv_vmfne_vv_f64m4_b16(a, a, unpacket_traits<Packet4Xd>::size);
+  const Packet4Xd x = __riscv_vfadd_vv_f64m4_tumu(mask, a, a, a, unpacket_traits<Packet4Xd>::size);
+  const Packet4Xd new_x = __riscv_vfcvt_f_x_v_f64m4(
+      __riscv_vfcvt_x_f_v_i64m4(a, unpacket_traits<Packet4Xd>::size), unpacket_traits<Packet4Xd>::size);
+
+  mask = __riscv_vmflt_vv_f64m4_b16(abs_a, limit, unpacket_traits<Packet4Xd>::size);
+  Packet4Xd signed_x = __riscv_vfsgnj_vv_f64m4(new_x, x, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vmerge_vvm_f64m4(x, signed_x, mask, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pfloor<Packet4Xd>(const Packet4Xd& a) {
+  Packet4Xd tmp = print<Packet4Xd>(a);
+  // If greater, subtract one.
+  PacketMask16 mask = __riscv_vmflt_vv_f64m4_b16(a, tmp, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vfsub_vf_f64m4_tumu(mask, tmp, tmp, 1.0, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd preverse(const Packet4Xd& a) {
+  Packet4Xul idx =
+      __riscv_vrsub_vx_u64m4(__riscv_vid_v_u64m4(unpacket_traits<Packet4Xd>::size),
+                             unpacket_traits<Packet4Xd>::size - 1, unpacket_traits<Packet4Xd>::size);
+  return __riscv_vrgather_vv_f64m4(a, idx, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pfrexp<Packet4Xd>(const Packet4Xd& a, Packet4Xd& exponent) {
+  return pfrexp_generic(a, exponent);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux<Packet4Xd>(const Packet4Xd& a) {
+  return __riscv_vfmv_f(__riscv_vfredusum_vs_f64m4_f64m1(
+      a, __riscv_vfmv_v_f_f64m1(0.0, unpacket_traits<Packet4Xd>::size / 4), unpacket_traits<Packet4Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul<Packet4Xd>(const Packet4Xd& a) {
+  Packet1Xd half1 = __riscv_vfmul_vv_f64m1(__riscv_vget_v_f64m4_f64m1(a, 0), __riscv_vget_v_f64m4_f64m1(a, 1),
+                                          unpacket_traits<Packet1Xd>::size);
+  Packet1Xd half2 = __riscv_vfmul_vv_f64m1(__riscv_vget_v_f64m4_f64m1(a, 2), __riscv_vget_v_f64m4_f64m1(a, 3),
+                                          unpacket_traits<Packet1Xd>::size);
+  return predux_mul<Packet1Xd>(__riscv_vfmul_vv_f64m1(half1, half2, unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<Packet4Xd>(const Packet4Xd& a) {
+  return (std::min)(__riscv_vfmv_f(__riscv_vfredmin_vs_f64m4_f64m1(
+                        a,
+                        __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(),
+                                               unpacket_traits<Packet4Xd>::size / 4),
+                        unpacket_traits<Packet4Xd>::size)),
+                    (std::numeric_limits<double>::max)());
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<Packet4Xd>(const Packet4Xd& a) {
+  return (std::max)(__riscv_vfmv_f(__riscv_vfredmax_vs_f64m4_f64m1(
+                        a,
+                        __riscv_vfmv_v_f_f64m1((std::numeric_limits<double>::quiet_NaN)(),
+                                               unpacket_traits<Packet4Xd>::size / 4),
+                        unpacket_traits<Packet4Xd>::size)),
+                    -(std::numeric_limits<double>::max)());
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4Xd, N>& kernel) {
+  double buffer[unpacket_traits<Packet4Xd>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse64(&buffer[i], N * sizeof(double), kernel.packet[i], unpacket_traits<Packet4Xd>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle64_v_f64m4(&buffer[i * unpacket_traits<Packet4Xd>::size], unpacket_traits<Packet4Xd>::size);
+  }
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pldexp<Packet4Xd>(const Packet4Xd& a, const Packet4Xd& exponent) {
+  return pldexp_generic(a, exponent);
+}
+
+/********************************* Packet4Xs ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pset1<Packet4Xs>(const numext::int16_t& from) {
+  return __riscv_vmv_v_x_i16m4(from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs plset<Packet4Xs>(const numext::int16_t& a) {
+  Packet4Xs idx = __riscv_vreinterpret_v_u16m4_i16m4(__riscv_vid_v_u16m4(unpacket_traits<Packet4Xs>::size));
+  return __riscv_vadd_vx_i16m4(idx, a, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pzero<Packet4Xs>(const Packet4Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m4(0, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs padd<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vadd_vv_i16m4(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs psub<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vsub(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pnegate(const Packet4Xs& a) {
+  return __riscv_vneg(a, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pconj(const Packet4Xs& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pmul<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vmul(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pdiv<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vdiv(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pmadd(const Packet4Xs& a, const Packet4Xs& b, const Packet4Xs& c) {
+  return __riscv_vmadd(a, b, c, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pmsub(const Packet4Xs& a, const Packet4Xs& b, const Packet4Xs& c) {
+  return __riscv_vmadd(a, b, pnegate(c), unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pnmadd(const Packet4Xs& a, const Packet4Xs& b, const Packet4Xs& c) {
+  return __riscv_vnmsub_vv_i16m4(a, b, c, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pnmsub(const Packet4Xs& a, const Packet4Xs& b, const Packet4Xs& c) {
+  return __riscv_vnmsub_vv_i16m4(a, b, pnegate(c), unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pmin<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vmin(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pmax<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vmax(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pcmp_le<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  PacketMask4 mask = __riscv_vmsle_vv_i16m4_b4(a, b, unpacket_traits<Packet4Xs>::size);
+  return __riscv_vmerge_vxm_i16m4(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pcmp_lt<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  PacketMask4 mask = __riscv_vmslt_vv_i16m4_b4(a, b, unpacket_traits<Packet4Xs>::size);
+  return __riscv_vmerge_vxm_i16m4(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pcmp_eq<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  PacketMask4 mask = __riscv_vmseq_vv_i16m4_b4(a, b, unpacket_traits<Packet4Xs>::size);
+  return __riscv_vmerge_vxm_i16m4(pzero(a), static_cast<short>(0xffff), mask, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs ptrue<Packet4Xs>(const Packet4Xs& /*a*/) {
+  return __riscv_vmv_v_x_i16m4(static_cast<unsigned short>(0xffffu), unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pand<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vand_vv_i16m4(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs por<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vor_vv_i16m4(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pxor<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vxor_vv_i16m4(a, b, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pandnot<Packet4Xs>(const Packet4Xs& a, const Packet4Xs& b) {
+  return __riscv_vand_vv_i16m4(a, __riscv_vnot_v_i16m4(b, unpacket_traits<Packet4Xs>::size),
+                               unpacket_traits<Packet4Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xs parithmetic_shift_right(Packet4Xs a) {
+  return __riscv_vsra_vx_i16m4(a, N, unpacket_traits<Packet4Xs>::size);
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xs plogical_shift_right(Packet4Xs a) {
+  return __riscv_vreinterpret_i16m4(
+      __riscv_vsrl_vx_u16m4(__riscv_vreinterpret_u16m4(a), N, unpacket_traits<Packet4Xs>::size));
+}
+
+template <int N>
+EIGEN_STRONG_INLINE Packet4Xs plogical_shift_left(Packet4Xs a) {
+  return __riscv_vsll_vx_i16m4(a, N, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pload<Packet4Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle16_v_i16m4(from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs ploadu<Packet4Xs>(const numext::int16_t* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle16_v_i16m4(from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs ploaddup<Packet4Xs>(const numext::int16_t* from) {
+  Packet4Xsu idx = __riscv_vid_v_u16m4(unpacket_traits<Packet4Xs>::size);
+  idx = __riscv_vand_vx_u16m4(idx, 0xfffeu, unpacket_traits<Packet4Xs>::size);
+  // idx = 0 0 sizeof(int16_t) sizeof(int16_t) 2*sizeof(int16_t) 2*sizeof(int16_t) ...
+  return __riscv_vloxei16_v_i16m4(from, idx, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs ploadquad<Packet4Xs>(const numext::int16_t* from) {
+  Packet4Xsu idx = __riscv_vid_v_u16m4(unpacket_traits<Packet4Xs>::size);
+  idx = __riscv_vsrl_vx_u16m4(__riscv_vand_vx_u16m4(idx, 0xfffcu, unpacket_traits<Packet4Xs>::size), 1,
+                              unpacket_traits<Packet4Xs>::size);
+  return __riscv_vloxei16_v_i16m4(from, idx, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<numext::int16_t>(numext::int16_t* to, const Packet4Xs& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse16_v_i16m4(to, from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<numext::int16_t>(numext::int16_t* to, const Packet4Xs& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse16_v_i16m4(to, from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet4Xs pgather<numext::int16_t, Packet4Xs>(const numext::int16_t* from,
+                                                                             Index stride) {
+  return __riscv_vlse16_v_i16m4(from, stride * sizeof(numext::int16_t), unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<numext::int16_t, Packet4Xs>(numext::int16_t* to, const Packet4Xs& from,
+                                                                      Index stride) {
+  __riscv_vsse16(to, stride * sizeof(numext::int16_t), from, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t pfirst<Packet4Xs>(const Packet4Xs& a) {
+  return __riscv_vmv_x_s_i16m4_i16(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs preverse(const Packet4Xs& a) {
+  Packet4Xsu idx =
+      __riscv_vrsub_vx_u16m4(__riscv_vid_v_u16m4(unpacket_traits<Packet4Xs>::size),
+                             unpacket_traits<Packet4Xs>::size - 1, unpacket_traits<Packet4Xs>::size);
+  return __riscv_vrgather_vv_i16m4(a, idx, unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pabs(const Packet4Xs& a) {
+  Packet4Xs mask = __riscv_vsra_vx_i16m4(a, 15, unpacket_traits<Packet4Xs>::size);
+  return __riscv_vsub_vv_i16m4(__riscv_vxor_vv_i16m4(a, mask, unpacket_traits<Packet4Xs>::size), mask,
+                               unpacket_traits<Packet4Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux<Packet4Xs>(const Packet4Xs& a) {
+  return __riscv_vmv_x(__riscv_vredsum_vs_i16m4_i16m1(
+      a, __riscv_vmv_v_x_i16m1(0, unpacket_traits<Packet4Xs>::size / 4), unpacket_traits<Packet4Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_mul<Packet4Xs>(const Packet4Xs& a) {
+  Packet1Xs half1 = __riscv_vmul_vv_i16m1(__riscv_vget_v_i16m4_i16m1(a, 0), __riscv_vget_v_i16m4_i16m1(a, 1),
+                                         unpacket_traits<Packet1Xs>::size);
+  Packet1Xs half2 = __riscv_vmul_vv_i16m1(__riscv_vget_v_i16m4_i16m1(a, 2), __riscv_vget_v_i16m4_i16m1(a, 3),
+                                         unpacket_traits<Packet1Xs>::size);
+  return predux_mul<Packet1Xs>(__riscv_vmul_vv_i16m1(half1, half2, unpacket_traits<Packet1Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_min<Packet4Xs>(const Packet4Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmin_vs_i16m4_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::max)(), unpacket_traits<Packet4Xs>::size / 4),
+      unpacket_traits<Packet4Xs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE numext::int16_t predux_max<Packet4Xs>(const Packet4Xs& a) {
+  return __riscv_vmv_x(__riscv_vredmax_vs_i16m4_i16m1(
+      a, __riscv_vmv_v_x_i16m1((std::numeric_limits<numext::int16_t>::min)(), unpacket_traits<Packet4Xs>::size / 4),
+      unpacket_traits<Packet4Xs>::size));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4Xs, N>& kernel) {
+  numext::int16_t buffer[unpacket_traits<Packet4Xs>::size * N] = {0};
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse16(&buffer[i], N * sizeof(numext::int16_t), kernel.packet[i], unpacket_traits<Packet4Xs>::size);
+  }
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle16_v_i16m4(&buffer[i * unpacket_traits<Packet4Xs>::size], unpacket_traits<Packet4Xs>::size);
+  }
+}
+
+}  // namespace internal
+}  // namespace Eigen
+
+#endif  // EIGEN_PACKET4_MATH_RVV10_H
diff --git a/Eigen/src/Core/arch/RVV10/PacketMathFP16.h b/Eigen/src/Core/arch/RVV10/PacketMathFP16.h
new file mode 100644
index 0000000..fbda191
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/PacketMathFP16.h
@@ -0,0 +1,922 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2025 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_FP16_RVV10_H
+#define EIGEN_PACKET_MATH_FP16_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+
+typedef vfloat16m1_t Packet1Xh __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL)));
+typedef vfloat16m2_t Packet2Xh __attribute__((riscv_rvv_vector_bits(EIGEN_RISCV64_RVV_VL * 2)));
+
+#if EIGEN_RISCV64_DEFAULT_LMUL == 1
+typedef Packet1Xh PacketXh;
+
+template <>
+struct packet_traits<Eigen::half> : default_packet_traits {
+  typedef Packet1Xh type;
+  typedef Packet1Xh half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<Eigen::half, EIGEN_RISCV64_RVV_VL, 1>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasSin = EIGEN_FAST_MATH,
+    HasCos = EIGEN_FAST_MATH,
+    HasLog = 0,
+    HasExp = 0,
+    HasSqrt = 1,
+    HasTanh = EIGEN_FAST_MATH,
+    HasErf = 0
+  };
+};
+
+#else
+typedef Packet2Xh PacketXh;
+
+template <>
+struct packet_traits<Eigen::half> : default_packet_traits {
+  typedef Packet2Xh type;
+  typedef Packet1Xh half;
+
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = rvv_packet_size_selector<Eigen::half, EIGEN_RISCV64_RVV_VL, 2>::size,
+
+    HasAdd = 1,
+    HasSub = 1,
+    HasShift = 1,
+    HasMul = 1,
+    HasNegate = 1,
+    HasAbs = 1,
+    HasArg = 0,
+    HasAbs2 = 1,
+    HasMin = 1,
+    HasMax = 1,
+    HasConj = 1,
+    HasSetLinear = 0,
+    HasBlend = 0,
+    HasReduxp = 0,
+
+    HasCmp = 1,
+    HasDiv = 1,
+    HasRound = 1,
+
+    HasSin = EIGEN_FAST_MATH,
+    HasCos = EIGEN_FAST_MATH,
+    HasLog = 0,
+    HasExp = 0,
+    HasSqrt = 1,
+    HasTanh = EIGEN_FAST_MATH,
+    HasErf = 0
+  };
+};
+#endif
+
+template <>
+struct unpacket_traits<Packet1Xh> {
+  typedef Eigen::half type;
+  typedef Packet1Xh half;  // Half not yet implemented
+  typedef PacketXs integer_packet;
+  typedef numext::uint8_t mask_t;
+
+  enum {
+    size = rvv_packet_size_selector<Eigen::half, EIGEN_RISCV64_RVV_VL, 1>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 1>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+template <>
+struct unpacket_traits<Packet2Xh> {
+  typedef Eigen::half type;
+  typedef Packet1Xh half;
+  typedef Packet2Xs integer_packet;
+  typedef numext::uint8_t mask_t;
+
+  enum {
+    size = rvv_packet_size_selector<Eigen::half, EIGEN_RISCV64_RVV_VL, 2>::size,
+    alignment = rvv_packet_alignment_selector<EIGEN_RISCV64_RVV_VL, 2>::alignment,
+    vectorizable = true,
+    masked_load_available = false,
+    masked_store_available = false
+  };
+};
+
+/********************************* PacketXh ************************************/
+
+template <>
+EIGEN_STRONG_INLINE PacketXh ptrue<PacketXh>(const PacketXh& /*a*/) {
+  return __riscv_vreinterpret_f16m1(__riscv_vmv_v_x_u16m1(0xffffu, unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pzero<PacketXh>(const PacketXh& /*a*/) {
+  return __riscv_vfmv_v_f_f16m1(static_cast<Eigen::half>(0.0), unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pabs(const PacketXh& a) {
+  return __riscv_vfabs_v_f16m1(a, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pset1<PacketXh>(const Eigen::half& from) {
+  return __riscv_vfmv_v_f_f16m1(static_cast<_Float16>(from), unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pset1frombits<PacketXh>(numext::uint16_t from) {
+  return __riscv_vreinterpret_f16m1(__riscv_vmv_v_x_u16m1(from, unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh plset<PacketXh>(const Eigen::half& a) {
+  PacketXh idx =
+      __riscv_vfcvt_f_x_v_f16m1(__riscv_vid_v_i16m1(unpacket_traits<PacketXs>::size), unpacket_traits<PacketXh>::size);
+  return __riscv_vfadd_vf_f16m1(idx, a, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh padd<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfadd_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh psub<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfsub_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pnegate(const PacketXh& a) {
+  return __riscv_vfneg_v_f16m1(a, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pconj(const PacketXh& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmul<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfmul_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pdiv<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfdiv_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmadd(const PacketXh& a, const PacketXh& b, const PacketXh& c) {
+  return __riscv_vfmadd_vv_f16m1(a, b, c, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmsub(const PacketXh& a, const PacketXh& b, const PacketXh& c) {
+  return __riscv_vfmsub_vv_f16m1(a, b, c, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pnmadd(const PacketXh& a, const PacketXh& b, const PacketXh& c) {
+  return __riscv_vfnmsub_vv_f16m1(a, b, c, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pnmsub(const PacketXh& a, const PacketXh& b, const PacketXh& c) {
+  return __riscv_vfnmadd_vv_f16m1(a, b, c, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmin<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketXh nans =
+      __riscv_vfmv_v_f_f16m1((std::numeric_limits<Eigen::half>::quiet_NaN)(), unpacket_traits<PacketXh>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f16m1_b16(a, a, unpacket_traits<PacketXh>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f16m1_b16(b, b, unpacket_traits<PacketXh>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<PacketXh>::size);
+
+  return __riscv_vfmin_vv_f16m1_tum(mask, nans, a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmin<PropagateNaN, PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return pmin<PacketXh>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmin<PropagateNumbers, PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfmin_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmax<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketXh nans =
+      __riscv_vfmv_v_f_f16m1((std::numeric_limits<Eigen::half>::quiet_NaN)(), unpacket_traits<PacketXh>::size);
+  PacketMask16 mask = __riscv_vmfeq_vv_f16m1_b16(a, a, unpacket_traits<PacketXh>::size);
+  PacketMask16 mask2 = __riscv_vmfeq_vv_f16m1_b16(b, b, unpacket_traits<PacketXh>::size);
+  mask = __riscv_vmand_mm_b16(mask, mask2, unpacket_traits<PacketXh>::size);
+
+  return __riscv_vfmax_vv_f16m1_tum(mask, nans, a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmax<PropagateNaN, PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return pmax<PacketXh>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pmax<PropagateNumbers, PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vfmax_vv_f16m1(a, b, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pcmp_le<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketMask16 mask = __riscv_vmfle_vv_f16m1_b16(a, b, unpacket_traits<PacketXh>::size);
+  return __riscv_vmerge_vvm_f16m1(pzero<PacketXh>(a), ptrue<PacketXh>(a), mask, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pcmp_lt<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketMask16 mask = __riscv_vmflt_vv_f16m1_b16(a, b, unpacket_traits<PacketXh>::size);
+  return __riscv_vmerge_vvm_f16m1(pzero<PacketXh>(a), ptrue<PacketXh>(a), mask, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pcmp_eq<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketMask16 mask = __riscv_vmfeq_vv_f16m1_b16(a, b, unpacket_traits<PacketXh>::size);
+  return __riscv_vmerge_vvm_f16m1(pzero<PacketXh>(a), ptrue<PacketXh>(a), mask, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pcmp_lt_or_nan<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  PacketMask16 mask = __riscv_vmfge_vv_f16m1_b16(a, b, unpacket_traits<PacketXh>::size);
+  return __riscv_vfmerge_vfm_f16m1(ptrue<PacketXh>(a), static_cast<Eigen::half>(0.0), mask,
+                                   unpacket_traits<PacketXh>::size);
+}
+
+// Logical Operations are not supported for half, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE PacketXh pand<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vreinterpret_v_u16m1_f16m1(__riscv_vand_vv_u16m1(
+      __riscv_vreinterpret_v_f16m1_u16m1(a), __riscv_vreinterpret_v_f16m1_u16m1(b), unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh por<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vreinterpret_v_u16m1_f16m1(__riscv_vor_vv_u16m1(
+      __riscv_vreinterpret_v_f16m1_u16m1(a), __riscv_vreinterpret_v_f16m1_u16m1(b), unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pxor<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vreinterpret_v_u16m1_f16m1(__riscv_vxor_vv_u16m1(
+      __riscv_vreinterpret_v_f16m1_u16m1(a), __riscv_vreinterpret_v_f16m1_u16m1(b), unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pandnot<PacketXh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vreinterpret_v_u16m1_f16m1(__riscv_vand_vv_u16m1(
+      __riscv_vreinterpret_v_f16m1_u16m1(a),
+      __riscv_vnot_v_u16m1(__riscv_vreinterpret_v_f16m1_u16m1(b), unpacket_traits<PacketXh>::size),
+      unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pload<PacketXh>(const Eigen::half* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle16_v_f16m1(reinterpret_cast<const _Float16*>(from),
+                                                        unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh ploadu<PacketXh>(const Eigen::half* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle16_v_f16m1(reinterpret_cast<const _Float16*>(from),
+                                                          unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh ploaddup<PacketXh>(const Eigen::half* from) {
+  PacketXsu idx = __riscv_vid_v_u16m1(unpacket_traits<PacketXh>::size);
+  idx = __riscv_vand_vx_u16m1(idx, 0xfffeu, unpacket_traits<PacketXh>::size);
+  return __riscv_vloxei16_v_f16m1(reinterpret_cast<const _Float16*>(from), idx, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh ploadquad<PacketXh>(const Eigen::half* from) {
+  PacketXsu idx = __riscv_vid_v_u16m1(unpacket_traits<PacketXh>::size);
+  idx = __riscv_vsrl_vx_u16m1(__riscv_vand_vx_u16m1(idx, 0xfffcu, unpacket_traits<PacketXh>::size), 1,
+                              unpacket_traits<PacketXh>::size);
+  return __riscv_vloxei16_v_f16m1(reinterpret_cast<const _Float16*>(from), idx, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const PacketXh& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse16_v_f16m1(reinterpret_cast<_Float16*>(to), from,
+                                                  unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const PacketXh& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse16_v_f16m1(reinterpret_cast<_Float16*>(to), from,
+                                                    unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline PacketXh pgather<Eigen::half, PacketXh>(const Eigen::half* from, Index stride) {
+  return __riscv_vlse16_v_f16m1(reinterpret_cast<const _Float16*>(from), stride * sizeof(Eigen::half),
+                                unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<Eigen::half, PacketXh>(Eigen::half* to, const PacketXh& from, Index stride) {
+  __riscv_vsse16(reinterpret_cast<_Float16*>(to), stride * sizeof(Eigen::half), from, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half pfirst<PacketXh>(const PacketXh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f_s_f16m1_f16(a));
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh psqrt(const PacketXh& a) {
+  return __riscv_vfsqrt_v_f16m1(a, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh print<PacketXh>(const PacketXh& a) {
+  const PacketXh limit = pset1<PacketXh>(static_cast<Eigen::half>(1 << 10));
+  const PacketXh abs_a = pabs(a);
+
+  PacketMask16 mask = __riscv_vmfne_vv_f16m1_b16(a, a, unpacket_traits<PacketXh>::size);
+  const PacketXh x = __riscv_vfadd_vv_f16m1_tum(mask, a, a, a, unpacket_traits<PacketXh>::size);
+  const PacketXh new_x = __riscv_vfcvt_f_x_v_f16m1(__riscv_vfcvt_x_f_v_i16m1(a, unpacket_traits<PacketXh>::size),
+                                                   unpacket_traits<PacketXh>::size);
+
+  mask = __riscv_vmflt_vv_f16m1_b16(abs_a, limit, unpacket_traits<PacketXh>::size);
+  PacketXh signed_x = __riscv_vfsgnj_vv_f16m1(new_x, x, unpacket_traits<PacketXh>::size);
+  return __riscv_vmerge_vvm_f16m1(x, signed_x, mask, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pfloor<PacketXh>(const PacketXh& a) {
+  PacketXh tmp = print<PacketXh>(a);
+  // If greater, subtract one.
+  PacketMask16 mask = __riscv_vmflt_vv_f16m1_b16(a, tmp, unpacket_traits<PacketXh>::size);
+  return __riscv_vfsub_vf_f16m1_tum(mask, tmp, tmp, static_cast<Eigen::half>(1.0), unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh preverse(const PacketXh& a) {
+  PacketXsu idx = __riscv_vrsub_vx_u16m1(__riscv_vid_v_u16m1(unpacket_traits<PacketXh>::size),
+                                         unpacket_traits<PacketXh>::size - 1, unpacket_traits<PacketXh>::size);
+  return __riscv_vrgather_vv_f16m1(a, idx, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux<PacketXh>(const PacketXh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredusum_vs_f16m1_f16m1(
+      a, __riscv_vfmv_v_f_f16m1(static_cast<Eigen::half>(0.0), unpacket_traits<PacketXh>::size),
+      unpacket_traits<PacketXh>::size)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_mul<PacketXh>(const PacketXh& a) {
+  // Multiply the vector by its reverse
+  PacketXh prod = __riscv_vfmul_vv_f16m1(preverse(a), a, unpacket_traits<PacketXh>::size);
+  PacketXh half_prod;
+
+  if (EIGEN_RISCV64_RVV_VL >= 1024) {
+    half_prod = __riscv_vslidedown_vx_f16m1(prod, 16, unpacket_traits<PacketXh>::size);
+    prod = __riscv_vfmul_vv_f16m1(prod, half_prod, unpacket_traits<PacketXh>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 512) {
+    half_prod = __riscv_vslidedown_vx_f16m1(prod, 8, unpacket_traits<PacketXh>::size);
+    prod = __riscv_vfmul_vv_f16m1(prod, half_prod, unpacket_traits<PacketXh>::size);
+  }
+  if (EIGEN_RISCV64_RVV_VL >= 256) {
+    half_prod = __riscv_vslidedown_vx_f16m1(prod, 4, unpacket_traits<PacketXh>::size);
+    prod = __riscv_vfmul_vv_f16m1(prod, half_prod, unpacket_traits<PacketXh>::size);
+  }
+  // Last reduction
+  half_prod = __riscv_vslidedown_vx_f16m1(prod, 2, unpacket_traits<PacketXh>::size);
+  prod = __riscv_vfmul_vv_f16m1(prod, half_prod, unpacket_traits<PacketXh>::size);
+
+  half_prod = __riscv_vslidedown_vx_f16m1(prod, 1, unpacket_traits<PacketXh>::size);
+  prod = __riscv_vfmul_vv_f16m1(prod, half_prod, unpacket_traits<PacketXh>::size);
+
+  // The reduction is done to the first element.
+  return pfirst(prod);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_min<PacketXh>(const PacketXh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredmin_vs_f16m1_f16m1(
+      a, __riscv_vfmv_v_f_f16m1((std::numeric_limits<Eigen::half>::max)(), unpacket_traits<PacketXh>::size),
+      unpacket_traits<PacketXh>::size)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_max<PacketXh>(const PacketXh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredmax_vs_f16m1_f16m1(
+      a, __riscv_vfmv_v_f_f16m1(-(std::numeric_limits<Eigen::half>::max)(), unpacket_traits<PacketXh>::size),
+      unpacket_traits<PacketXh>::size)));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXh, N>& kernel) {
+  Eigen::half buffer[unpacket_traits<PacketXh>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse16(reinterpret_cast<_Float16*>(&buffer[i]), N * sizeof(Eigen::half), kernel.packet[i],
+                   unpacket_traits<PacketXh>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] = __riscv_vle16_v_f16m1(reinterpret_cast<_Float16*>(&buffer[i * unpacket_traits<PacketXh>::size]),
+                                             unpacket_traits<PacketXh>::size);
+  }
+}
+
+EIGEN_STRONG_INLINE Packet2Xf half2float(const PacketXh& a) {
+  return __riscv_vfwcvt_f_f_v_f32m2(a, unpacket_traits<Packet2Xf>::size);
+}
+
+EIGEN_STRONG_INLINE PacketXh float2half(const Packet2Xf& a) {
+  return __riscv_vfncvt_f_f_w_f16m1(a, unpacket_traits<PacketXh>::size);
+}
+
+/********************************* Packet2Xh ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh ptrue<Packet2Xh>(const Packet2Xh& /*a*/) {
+  return __riscv_vreinterpret_f16m2(__riscv_vmv_v_x_u16m2(0xffffu, unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pzero<Packet2Xh>(const Packet2Xh& /*a*/) {
+  return __riscv_vfmv_v_f_f16m2(static_cast<Eigen::half>(0.0), unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pabs(const Packet2Xh& a) {
+  return __riscv_vfabs_v_f16m2(a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pset1<Packet2Xh>(const Eigen::half& from) {
+  return __riscv_vfmv_v_f_f16m2(static_cast<_Float16>(from), unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pset1frombits<Packet2Xh>(numext::uint16_t from) {
+  return __riscv_vreinterpret_f16m2(__riscv_vmv_v_x_u16m2(from, unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh plset<Packet2Xh>(const Eigen::half& a) {
+  Packet2Xh idx = __riscv_vfcvt_f_x_v_f16m2(__riscv_vid_v_i16m2(unpacket_traits<Packet4Xs>::size),
+                                               unpacket_traits<Packet2Xh>::size);
+  return __riscv_vfadd_vf_f16m2(idx, a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh padd<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfadd_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh psub<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfsub_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pnegate(const Packet2Xh& a) {
+  return __riscv_vfneg_v_f16m2(a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pconj(const Packet2Xh& a) {
+  return a;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmul<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfmul_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pdiv<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfdiv_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmadd(const Packet2Xh& a, const Packet2Xh& b, const Packet2Xh& c) {
+  return __riscv_vfmadd_vv_f16m2(a, b, c, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmsub(const Packet2Xh& a, const Packet2Xh& b, const Packet2Xh& c) {
+  return __riscv_vfmsub_vv_f16m2(a, b, c, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pnmadd(const Packet2Xh& a, const Packet2Xh& b, const Packet2Xh& c) {
+  return __riscv_vfnmsub_vv_f16m2(a, b, c, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pnmsub(const Packet2Xh& a, const Packet2Xh& b, const Packet2Xh& c) {
+  return __riscv_vfnmadd_vv_f16m2(a, b, c, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmin<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  Packet2Xh nans =
+      __riscv_vfmv_v_f_f16m2((std::numeric_limits<Eigen::half>::quiet_NaN)(), unpacket_traits<Packet2Xh>::size);
+  PacketMask8 mask = __riscv_vmfeq_vv_f16m2_b8(a, a, unpacket_traits<Packet2Xh>::size);
+  PacketMask8 mask2 = __riscv_vmfeq_vv_f16m2_b8(b, b, unpacket_traits<Packet2Xh>::size);
+  mask = __riscv_vmand_mm_b8(mask, mask2, unpacket_traits<Packet2Xh>::size);
+
+  return __riscv_vfmin_vv_f16m2_tum(mask, nans, a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmin<PropagateNaN, Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return pmin<Packet2Xh>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmin<PropagateNumbers, Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfmin_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmax<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  Packet2Xh nans =
+      __riscv_vfmv_v_f_f16m2((std::numeric_limits<Eigen::half>::quiet_NaN)(), unpacket_traits<Packet2Xh>::size);
+  PacketMask8 mask = __riscv_vmfeq_vv_f16m2_b8(a, a, unpacket_traits<Packet2Xh>::size);
+  PacketMask8 mask2 = __riscv_vmfeq_vv_f16m2_b8(b, b, unpacket_traits<Packet2Xh>::size);
+  mask = __riscv_vmand_mm_b8(mask, mask2, unpacket_traits<Packet2Xh>::size);
+
+  return __riscv_vfmax_vv_f16m2_tum(mask, nans, a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmax<PropagateNaN, Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return pmax<Packet2Xh>(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pmax<PropagateNumbers, Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vfmax_vv_f16m2(a, b, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcmp_le<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  PacketMask8 mask = __riscv_vmfle_vv_f16m2_b8(a, b, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vmerge_vvm_f16m2(pzero<Packet2Xh>(a), ptrue<Packet2Xh>(a), mask,
+                                  unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcmp_lt<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  PacketMask8 mask = __riscv_vmflt_vv_f16m2_b8(a, b, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vmerge_vvm_f16m2(pzero<Packet2Xh>(a), ptrue<Packet2Xh>(a), mask,
+                                  unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcmp_eq<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  PacketMask8 mask = __riscv_vmfeq_vv_f16m2_b8(a, b, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vmerge_vvm_f16m2(pzero<Packet2Xh>(a), ptrue<Packet2Xh>(a), mask,
+                                  unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcmp_lt_or_nan<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  PacketMask8 mask = __riscv_vmfge_vv_f16m2_b8(a, b, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vfmerge_vfm_f16m2(ptrue<Packet2Xh>(a), static_cast<Eigen::half>(0.0), mask,
+                                   unpacket_traits<Packet2Xh>::size);
+}
+
+// Logical Operations are not supported for half, so reinterpret casts
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pand<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vreinterpret_v_u16m2_f16m2(__riscv_vand_vv_u16m2(__riscv_vreinterpret_v_f16m2_u16m2(a),
+                                                                  __riscv_vreinterpret_v_f16m2_u16m2(b),
+                                                                  unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh por<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vreinterpret_v_u16m2_f16m2(__riscv_vor_vv_u16m2(__riscv_vreinterpret_v_f16m2_u16m2(a),
+                                                                 __riscv_vreinterpret_v_f16m2_u16m2(b),
+                                                                 unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pxor<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vreinterpret_v_u16m2_f16m2(__riscv_vxor_vv_u16m2(__riscv_vreinterpret_v_f16m2_u16m2(a),
+                                                                  __riscv_vreinterpret_v_f16m2_u16m2(b),
+                                                                  unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pandnot<Packet2Xh>(const Packet2Xh& a, const Packet2Xh& b) {
+  return __riscv_vreinterpret_v_u16m2_f16m2(__riscv_vand_vv_u16m2(
+      __riscv_vreinterpret_v_f16m2_u16m2(a),
+      __riscv_vnot_v_u16m2(__riscv_vreinterpret_v_f16m2_u16m2(b), unpacket_traits<Packet2Xh>::size),
+      unpacket_traits<Packet2Xh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pload<Packet2Xh>(const Eigen::half* from) {
+  EIGEN_DEBUG_ALIGNED_LOAD return __riscv_vle16_v_f16m2(reinterpret_cast<const _Float16*>(from),
+                                                        unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh ploadu<Packet2Xh>(const Eigen::half* from) {
+  EIGEN_DEBUG_UNALIGNED_LOAD return __riscv_vle16_v_f16m2(reinterpret_cast<const _Float16*>(from),
+                                                          unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh ploaddup<Packet2Xh>(const Eigen::half* from) {
+  Packet2Xsu idx = __riscv_vid_v_u16m2(unpacket_traits<Packet2Xh>::size);
+  idx = __riscv_vand_vx_u16m2(idx, 0xfffeu, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vloxei16_v_f16m2(reinterpret_cast<const _Float16*>(from), idx, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh ploadquad<Packet2Xh>(const Eigen::half* from) {
+  Packet2Xsu idx = __riscv_vid_v_u16m2(unpacket_traits<Packet2Xh>::size);
+  idx = __riscv_vsrl_vx_u16m2(__riscv_vand_vx_u16m2(idx, 0xfffcu, unpacket_traits<Packet2Xh>::size), 1,
+                              unpacket_traits<Packet2Xs>::size);
+  return __riscv_vloxei16_v_f16m2(reinterpret_cast<const _Float16*>(from), idx, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet2Xh& from) {
+  EIGEN_DEBUG_ALIGNED_STORE __riscv_vse16_v_f16m2(reinterpret_cast<_Float16*>(to), from,
+                                                  unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet2Xh& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE __riscv_vse16_v_f16m2(reinterpret_cast<_Float16*>(to), from,
+                                                    unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline Packet2Xh pgather<Eigen::half, Packet2Xh>(const Eigen::half* from, Index stride) {
+  return __riscv_vlse16_v_f16m2(reinterpret_cast<const _Float16*>(from), stride * sizeof(Eigen::half),
+                                unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_DEVICE_FUNC inline void pscatter<Eigen::half, Packet2Xh>(Eigen::half* to, const Packet2Xh& from,
+                                                                  Index stride) {
+  __riscv_vsse16(reinterpret_cast<_Float16*>(to), stride * sizeof(Eigen::half), from,
+                 unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half pfirst<Packet2Xh>(const Packet2Xh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f_s_f16m2_f16(a));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh psqrt(const Packet2Xh& a) {
+  return __riscv_vfsqrt_v_f16m2(a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh print<Packet2Xh>(const Packet2Xh& a) {
+  const Packet2Xh limit = pset1<Packet2Xh>(static_cast<Eigen::half>(1 << 10));
+  const Packet2Xh abs_a = pabs(a);
+
+  PacketMask8 mask = __riscv_vmfne_vv_f16m2_b8(a, a, unpacket_traits<Packet2Xh>::size);
+  const Packet2Xh x = __riscv_vfadd_vv_f16m2_tum(mask, a, a, a, unpacket_traits<Packet2Xh>::size);
+  const Packet2Xh new_x = __riscv_vfcvt_f_x_v_f16m2(
+      __riscv_vfcvt_x_f_v_i16m2(a, unpacket_traits<Packet2Xh>::size), unpacket_traits<Packet2Xh>::size);
+
+  mask = __riscv_vmflt_vv_f16m2_b8(abs_a, limit, unpacket_traits<Packet2Xh>::size);
+  Packet2Xh signed_x = __riscv_vfsgnj_vv_f16m2(new_x, x, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vmerge_vvm_f16m2(x, signed_x, mask, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pfloor<Packet2Xh>(const Packet2Xh& a) {
+  Packet2Xh tmp = print<Packet2Xh>(a);
+  // If greater, subtract one.
+  PacketMask8 mask = __riscv_vmflt_vv_f16m2_b8(a, tmp, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vfsub_vf_f16m2_tum(mask, tmp, tmp, static_cast<Eigen::half>(1.0), unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh preverse(const Packet2Xh& a) {
+  Packet2Xsu idx =
+      __riscv_vrsub_vx_u16m2(__riscv_vid_v_u16m2(unpacket_traits<Packet2Xh>::size),
+                             unpacket_traits<Packet2Xh>::size - 1, unpacket_traits<Packet2Xh>::size);
+  return __riscv_vrgather_vv_f16m2(a, idx, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux<Packet2Xh>(const Packet2Xh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredusum_vs_f16m2_f16m1(
+      a, __riscv_vfmv_v_f_f16m1(static_cast<Eigen::half>(0.0), unpacket_traits<Packet2Xh>::size / 4),
+      unpacket_traits<Packet2Xh>::size)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet2Xh>(const Packet2Xh& a) {
+  return predux_mul<PacketXh>(__riscv_vfmul_vv_f16m1(__riscv_vget_v_f16m2_f16m1(a, 0), __riscv_vget_v_f16m2_f16m1(a, 1),
+                                                     unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_min<Packet2Xh>(const Packet2Xh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredmin_vs_f16m2_f16m1(
+      a, __riscv_vfmv_v_f_f16m1((std::numeric_limits<Eigen::half>::max)(), unpacket_traits<Packet2Xh>::size / 4),
+      unpacket_traits<Packet2Xh>::size)));
+}
+
+template <>
+EIGEN_STRONG_INLINE Eigen::half predux_max<Packet2Xh>(const Packet2Xh& a) {
+  return static_cast<Eigen::half>(__riscv_vfmv_f(__riscv_vfredmax_vs_f16m2_f16m1(
+      a, __riscv_vfmv_v_f_f16m1(-(std::numeric_limits<Eigen::half>::max)(), unpacket_traits<Packet2Xh>::size / 4),
+      unpacket_traits<Packet2Xh>::size)));
+}
+
+template <int N>
+EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2Xh, N>& kernel) {
+  Eigen::half buffer[unpacket_traits<Packet2Xh>::size * N];
+  int i = 0;
+
+  for (i = 0; i < N; i++) {
+    __riscv_vsse16(reinterpret_cast<_Float16*>(&buffer[i]), N * sizeof(Eigen::half), kernel.packet[i],
+                   unpacket_traits<Packet2Xh>::size);
+  }
+
+  for (i = 0; i < N; i++) {
+    kernel.packet[i] =
+        __riscv_vle16_v_f16m2(reinterpret_cast<_Float16*>(&buffer[i * unpacket_traits<Packet2Xh>::size]),
+                              unpacket_traits<Packet2Xh>::size);
+  }
+}
+
+EIGEN_STRONG_INLINE Packet4Xf half2float(const Packet2Xh& a) {
+  return __riscv_vfwcvt_f_f_v_f32m4(a, unpacket_traits<Packet4Xf>::size);
+}
+
+EIGEN_STRONG_INLINE Packet2Xh float2half(const Packet4Xf& a) {
+  return __riscv_vfncvt_f_f_w_f16m2(a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <typename Packet = Packet2Xh>
+EIGEN_STRONG_INLINE
+typename std::enable_if<std::is_same<Packet, Packet2Xh>::value && (unpacket_traits<Packet2Xh>::size % 8) == 0,
+                        PacketXh>::type
+predux_half_dowto4(const Packet2Xh& a) {
+  return __riscv_vfadd_vv_f16m1(__riscv_vget_v_f16m2_f16m1(a, 0), __riscv_vget_v_f16m2_f16m1(a, 1),
+                                unpacket_traits<PacketXh>::size);
+}
+
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, pcos)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, pexp)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, pexpm1)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, plog)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, plog1p)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, plog2)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, preciprocal)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, prsqrt)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, psin)
+F16_PACKET_FUNCTION(Packet2Xf, PacketXh, ptanh)
+
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, pcos)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, pexp)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, pexpm1)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, plog)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, plog1p)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, plog2)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, preciprocal)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, prsqrt)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, psin)
+F16_PACKET_FUNCTION(Packet4Xf, Packet2Xh, ptanh)
+
+/********************************* casting ************************************/
+
+template <>
+struct type_casting_traits<_Float16, numext::int16_t> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+struct type_casting_traits<numext::int16_t, _Float16> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_STRONG_INLINE PacketXh pcast<PacketXs, PacketXh>(const PacketXs& a) {
+  return __riscv_vfcvt_f_x_v_f16m1(a, unpacket_traits<PacketXs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXs pcast<PacketXh, PacketXs>(const PacketXh& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i16m1(a, unpacket_traits<PacketXh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXh preinterpret<PacketXh, PacketXs>(const PacketXs& a) {
+  return __riscv_vreinterpret_v_i16m1_f16m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE PacketXs preinterpret<PacketXs, PacketXh>(const PacketXh& a) {
+  return __riscv_vreinterpret_v_f16m1_i16m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcast<Packet2Xs, Packet2Xh>(const Packet2Xs& a) {
+  return __riscv_vfcvt_f_x_v_f16m2(a, unpacket_traits<Packet2Xs>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcast<Packet2Xh, Packet2Xs>(const Packet2Xh& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i16m2(a, unpacket_traits<Packet2Xh>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh preinterpret<Packet2Xh, Packet2Xs>(const Packet2Xs& a) {
+  return __riscv_vreinterpret_v_i16m2_f16m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs preinterpret<Packet2Xs, Packet2Xh>(const Packet2Xh& a) {
+  return __riscv_vreinterpret_v_f16m2_i16m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pcast<PacketXh, Packet4Xs>(const PacketXh& a, const PacketXh& b, const PacketXh& c,
+                                                               const PacketXh& d) {
+  return __riscv_vcreate_v_i16m1_i16m4(__riscv_vfcvt_rtz_x_f_v_i16m1(a, unpacket_traits<PacketXh>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i16m1(b, unpacket_traits<PacketXh>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i16m1(c, unpacket_traits<PacketXh>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i16m1(d, unpacket_traits<PacketXh>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcast<PacketXs, Packet2Xh>(const PacketXs& a, const PacketXs& b) {
+  return __riscv_vcreate_v_f16m1_f16m2(__riscv_vfcvt_f_x_v_f16m1(a, unpacket_traits<PacketXs>::size),
+                                       __riscv_vfcvt_f_x_v_f16m1(b, unpacket_traits<PacketXs>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xh pcast<PacketXh, Packet2Xh>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vcreate_v_f16m1_f16m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcast<PacketXh, Packet2Xs>(const PacketXh& a, const PacketXh& b) {
+  return __riscv_vcreate_v_i16m1_i16m2(__riscv_vfcvt_rtz_x_f_v_i16m1(a, unpacket_traits<PacketXh>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i16m1(b, unpacket_traits<PacketXh>::size));
+}
+
+}  // namespace internal
+}  // namespace Eigen
+
+#endif  // EIGEN_PACKET_MATH_FP16_RVV10_H
diff --git a/Eigen/src/Core/arch/RVV10/TypeCasting.h b/Eigen/src/Core/arch/RVV10/TypeCasting.h
new file mode 100644
index 0000000..2b0d3db
--- /dev/null
+++ b/Eigen/src/Core/arch/RVV10/TypeCasting.h
@@ -0,0 +1,284 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2024 Kseniya Zaytseva <kseniya.zaytseva@syntacore.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TYPE_CASTING_RVV10_H
+#define EIGEN_TYPE_CASTING_RVV10_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+namespace internal {
+
+/********************************* 32 bits ************************************/
+
+template <>
+struct type_casting_traits<float, numext::int32_t> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+struct type_casting_traits<numext::int32_t, float> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf pcast<Packet1Xi, Packet1Xf>(const Packet1Xi& a) {
+  return __riscv_vfcvt_f_x_v_f32m1(a, unpacket_traits<Packet1Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi pcast<Packet1Xf, Packet1Xi>(const Packet1Xf& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i32m1(a, unpacket_traits<Packet1Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xf preinterpret<Packet1Xf, Packet1Xi>(const Packet1Xi& a) {
+  return __riscv_vreinterpret_v_i32m1_f32m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xi preinterpret<Packet1Xi, Packet1Xf>(const Packet1Xf& a) {
+  return __riscv_vreinterpret_v_f32m1_i32m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcast<Packet4Xi, Packet4Xf>(const Packet4Xi& a) {
+  return __riscv_vfcvt_f_x_v_f32m4(a, unpacket_traits<Packet4Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcast<Packet4Xf, Packet4Xi>(const Packet4Xf& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i32m4(a, unpacket_traits<Packet4Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf preinterpret<Packet4Xf, Packet4Xi>(const Packet4Xi& a) {
+  return __riscv_vreinterpret_v_i32m4_f32m4(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi preinterpret<Packet4Xi, Packet4Xf>(const Packet4Xf& a) {
+  return __riscv_vreinterpret_v_f32m4_i32m4(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcast<Packet2Xi, Packet2Xf>(const Packet2Xi& a) {
+  return __riscv_vfcvt_f_x_v_f32m2(a, unpacket_traits<Packet2Xi>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcast<Packet2Xf, Packet2Xi>(const Packet2Xf& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i32m2(a, unpacket_traits<Packet2Xf>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf preinterpret<Packet2Xf, Packet2Xi>(const Packet2Xi& a) {
+  return __riscv_vreinterpret_v_i32m2_f32m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi preinterpret<Packet2Xi, Packet2Xf>(const Packet2Xf& a) {
+  return __riscv_vreinterpret_v_f32m2_i32m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcast<Packet1Xi, Packet4Xi>(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c,
+                                                               const Packet1Xi& d) {
+  return __riscv_vcreate_v_i32m1_i32m4(a, b, c, d);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcast<Packet1Xi, Packet4Xf>(const Packet1Xi& a, const Packet1Xi& b, const Packet1Xi& c,
+                                                               const Packet1Xi& d) {
+  return __riscv_vcreate_v_f32m1_f32m4(__riscv_vfcvt_f_x_v_f32m1(a, unpacket_traits<Packet1Xi>::size),
+                                       __riscv_vfcvt_f_x_v_f32m1(b, unpacket_traits<Packet1Xi>::size),
+                                       __riscv_vfcvt_f_x_v_f32m1(c, unpacket_traits<Packet1Xi>::size),
+                                       __riscv_vfcvt_f_x_v_f32m1(d, unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xf pcast<Packet1Xf, Packet4Xf>(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c,
+                                                               const Packet1Xf& d) {
+  return __riscv_vcreate_v_f32m1_f32m4(a, b, c, d);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xi pcast<Packet1Xf, Packet4Xi>(const Packet1Xf& a, const Packet1Xf& b, const Packet1Xf& c,
+                                                               const Packet1Xf& d) {
+  return __riscv_vcreate_v_i32m1_i32m4(__riscv_vfcvt_rtz_x_f_v_i32m1(a, unpacket_traits<Packet1Xf>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i32m1(b, unpacket_traits<Packet1Xf>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i32m1(c, unpacket_traits<Packet1Xf>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i32m1(d, unpacket_traits<Packet1Xf>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcast<Packet1Xi, Packet2Xi>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vcreate_v_i32m1_i32m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcast<Packet1Xi, Packet2Xf>(const Packet1Xi& a, const Packet1Xi& b) {
+  return __riscv_vcreate_v_f32m1_f32m2(__riscv_vfcvt_f_x_v_f32m1(a, unpacket_traits<Packet1Xi>::size),
+                                       __riscv_vfcvt_f_x_v_f32m1(b, unpacket_traits<Packet1Xi>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xf pcast<Packet1Xf, Packet2Xf>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vcreate_v_f32m1_f32m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xi pcast<Packet1Xf, Packet2Xi>(const Packet1Xf& a, const Packet1Xf& b) {
+  return __riscv_vcreate_v_i32m1_i32m2(__riscv_vfcvt_rtz_x_f_v_i32m1(a, unpacket_traits<Packet1Xf>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i32m1(b, unpacket_traits<Packet1Xf>::size));
+}
+
+/********************************* 64 bits ************************************/
+
+template <>
+struct type_casting_traits<double, numext::int64_t> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+struct type_casting_traits<numext::int64_t, double> {
+  enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
+};
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd pcast<Packet1Xl, Packet1Xd>(const Packet1Xl& a) {
+  return __riscv_vfcvt_f_x_v_f64m1(a, unpacket_traits<Packet1Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl pcast<Packet1Xd, Packet1Xl>(const Packet1Xd& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i64m1(a, unpacket_traits<Packet1Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xd preinterpret<Packet1Xd, Packet1Xl>(const Packet1Xl& a) {
+  return __riscv_vreinterpret_v_i64m1_f64m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet1Xl preinterpret<Packet1Xl, Packet1Xd>(const Packet1Xd& a) {
+  return __riscv_vreinterpret_v_f64m1_i64m1(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcast<Packet4Xl, Packet4Xd>(const Packet4Xl& a) {
+  return __riscv_vfcvt_f_x_v_f64m4(a, unpacket_traits<Packet4Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcast<Packet4Xd, Packet4Xl>(const Packet4Xd& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i64m4(a, unpacket_traits<Packet4Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd preinterpret<Packet4Xd, Packet4Xl>(const Packet4Xl& a) {
+  return __riscv_vreinterpret_v_i64m4_f64m4(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl preinterpret<Packet4Xl, Packet4Xd>(const Packet4Xd& a) {
+  return __riscv_vreinterpret_v_f64m4_i64m4(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcast<Packet2Xl, Packet2Xd>(const Packet2Xl& a) {
+  return __riscv_vfcvt_f_x_v_f64m2(a, unpacket_traits<Packet2Xl>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcast<Packet2Xd, Packet2Xl>(const Packet2Xd& a) {
+  return __riscv_vfcvt_rtz_x_f_v_i64m2(a, unpacket_traits<Packet2Xd>::size);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd preinterpret<Packet2Xd, Packet2Xl>(const Packet2Xl& a) {
+  return __riscv_vreinterpret_v_i64m2_f64m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl preinterpret<Packet2Xl, Packet2Xd>(const Packet2Xd& a) {
+  return __riscv_vreinterpret_v_f64m2_i64m2(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcast<Packet1Xl, Packet4Xl>(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c,
+                                                               const Packet1Xl& d) {
+  return __riscv_vcreate_v_i64m1_i64m4(a, b, c, d);
+  ;
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcast<Packet1Xl, Packet4Xd>(const Packet1Xl& a, const Packet1Xl& b, const Packet1Xl& c,
+                                                               const Packet1Xl& d) {
+  return __riscv_vcreate_v_f64m1_f64m4(__riscv_vfcvt_f_x_v_f64m1(a, unpacket_traits<Packet1Xl>::size),
+                                       __riscv_vfcvt_f_x_v_f64m1(b, unpacket_traits<Packet1Xl>::size),
+                                       __riscv_vfcvt_f_x_v_f64m1(c, unpacket_traits<Packet1Xl>::size),
+                                       __riscv_vfcvt_f_x_v_f64m1(d, unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xd pcast<Packet1Xd, Packet4Xd>(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c,
+                                                               const Packet1Xd& d) {
+  return __riscv_vcreate_v_f64m1_f64m4(a, b, c, d);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xl pcast<Packet1Xd, Packet4Xl>(const Packet1Xd& a, const Packet1Xd& b, const Packet1Xd& c,
+                                                               const Packet1Xd& d) {
+  return __riscv_vcreate_v_i64m1_i64m4(__riscv_vfcvt_rtz_x_f_v_i64m1(a, unpacket_traits<Packet1Xd>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i64m1(b, unpacket_traits<Packet1Xd>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i64m1(c, unpacket_traits<Packet1Xd>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i64m1(d, unpacket_traits<Packet1Xd>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcast<Packet1Xl, Packet2Xl>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vcreate_v_i64m1_i64m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcast<Packet1Xl, Packet2Xd>(const Packet1Xl& a, const Packet1Xl& b) {
+  return __riscv_vcreate_v_f64m1_f64m2(__riscv_vfcvt_f_x_v_f64m1(a, unpacket_traits<Packet1Xl>::size),
+                                       __riscv_vfcvt_f_x_v_f64m1(b, unpacket_traits<Packet1Xl>::size));
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xd pcast<Packet1Xd, Packet2Xd>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vcreate_v_f64m1_f64m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xl pcast<Packet1Xd, Packet2Xl>(const Packet1Xd& a, const Packet1Xd& b) {
+  return __riscv_vcreate_v_i64m1_i64m2(__riscv_vfcvt_rtz_x_f_v_i64m1(a, unpacket_traits<Packet1Xd>::size),
+                                       __riscv_vfcvt_rtz_x_f_v_i64m1(b, unpacket_traits<Packet1Xd>::size));
+}
+
+/********************************* 16 bits ************************************/
+
+template <>
+EIGEN_STRONG_INLINE Packet2Xs pcast<Packet1Xs, Packet2Xs>(const Packet1Xs& a, const Packet1Xs& b) {
+  return __riscv_vcreate_v_i16m1_i16m2(a, b);
+}
+
+template <>
+EIGEN_STRONG_INLINE Packet4Xs pcast<Packet1Xs, Packet4Xs>(const Packet1Xs& a, const Packet1Xs& b, const Packet1Xs& c,
+                                                               const Packet1Xs& d) {
+  return __riscv_vcreate_v_i16m1_i16m4(a, b, c, d);
+}
+
+}  // namespace internal
+}  // namespace Eigen
+
+#endif  // EIGEN_TYPE_CASTING_RVV10_H
diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h
index 26d2bca..80ad82e 100644
--- a/Eigen/src/Core/util/ConfigureVectorization.h
+++ b/Eigen/src/Core/util/ConfigureVectorization.h
@@ -80,6 +80,8 @@
 #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
 #elif defined __HVX__ && (__HVX_LENGTH__ == 128)
 #define EIGEN_IDEAL_MAX_ALIGN_BYTES 128
+#elif defined(EIGEN_RISCV64_USE_RVV10)
+#define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
 #else
 #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
 #endif
@@ -116,7 +118,7 @@
 // Only static alignment is really problematic (relies on nonstandard compiler extensions),
 // try to keep heap alignment even when we have to disable static alignment.
 #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || \
-                         EIGEN_ARCH_MIPS || EIGEN_ARCH_LOONGARCH64)
+                         EIGEN_ARCH_MIPS || EIGEN_ARCH_LOONGARCH64 || EIGEN_ARCH_RISCV)
 #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
 #else
 #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
@@ -418,14 +420,55 @@
 #define EIGEN_VECTORIZE_SVE
 #include <arm_sve.h>
 
-// Since we depend on knowing SVE vector lengths at compile-time, we need
-// to ensure a fixed lengths is set
+// Since we depend on knowing SVE vector length at compile-time, we need
+// to ensure a fixed length is set
 #if defined __ARM_FEATURE_SVE_BITS
 #define EIGEN_ARM64_SVE_VL __ARM_FEATURE_SVE_BITS
 #else
 #error "Eigen requires a fixed SVE lector length but EIGEN_ARM64_SVE_VL is not set."
 #endif
 
+#elif defined(EIGEN_ARCH_RISCV)
+
+#if defined(__riscv_zfh)
+#define EIGEN_HAS_BUILTIN_FLOAT16
+#endif
+
+// We currently require RVV to be enabled explicitly via EIGEN_RISCV64_USE_RVV and
+// will not select the backend automatically
+#if (defined EIGEN_RISCV64_USE_RVV10)
+
+#define EIGEN_VECTORIZE
+#define EIGEN_VECTORIZE_RVV10
+#include <riscv_vector.h>
+
+// Since we depend on knowing RVV vector length at compile-time, we need
+// to ensure a fixed length is set
+#if defined(__riscv_v_fixed_vlen)
+#define EIGEN_RISCV64_RVV_VL __riscv_v_fixed_vlen
+#if __riscv_v_fixed_vlen >= 256
+#undef EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+#endif
+#else
+#error "Eigen requires a fixed RVV vector length but -mrvv-vector-bits=zvl is not set."
+#endif
+
+#undef EIGEN_STACK_ALLOCATION_LIMIT
+#define EIGEN_STACK_ALLOCATION_LIMIT 196608
+
+#if defined(__riscv_zvfh) && defined(__riscv_zfh)
+#define EIGEN_VECTORIZE_RVV10FP16
+#elif defined(__riscv_zvfh)
+#if defined(__GNUC__) || defined(__clang__)
+#warning "The Eigen::Half vectorization requires Zfh and Zvfh extensions."
+#elif defined(_MSC_VER)
+#pragma message("The Eigen::Half vectorization requires Zfh and Zvfh extensions.")
+#endif
+#endif
+
+#endif  // defined(EIGEN_ARCH_RISCV)
+
 #elif (defined __s390x__ && defined __VEC__)
 
 #define EIGEN_VECTORIZE
@@ -510,6 +553,13 @@
 #include <hip/hip_bfloat16.h>
 #endif
 
+#if defined(__riscv)
+// Defines the default LMUL for RISC-V
+#ifndef EIGEN_RISCV64_DEFAULT_LMUL
+#define EIGEN_RISCV64_DEFAULT_LMUL 1
+#endif
+#endif
+
 /** \brief Namespace containing all symbols from the %Eigen library. */
 // IWYU pragma: private
 #include "../InternalHeaderCheck.h"
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index fcc2db8..8aba62b 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -475,6 +475,7 @@
   SVE = 0x6,
   HVX = 0x7,
   LSX = 0x8,
+  RVV10 = 0x9,
 #if defined EIGEN_VECTORIZE_SSE
   Target = SSE
 #elif defined EIGEN_VECTORIZE_ALTIVEC
@@ -491,6 +492,8 @@
   Target = HVX
 #elif defined EIGEN_VECTORIZE_LSX
   Target = LSX
+#elif defined EIGEN_VECTORIZE_RVV10
+  Target = RVV10
 #else
   Target = Generic
 #endif
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index e644211..9cd8250 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -420,6 +420,13 @@
 #define EIGEN_ARCH_PPC 0
 #endif
 
+/// \internal EIGEN_ARCH_RISCV set to 1 if the architecture is RISC-V.
+#if defined(__riscv)
+#define EIGEN_ARCH_RISCV 1
+#else
+#define EIGEN_ARCH_RISCV 0
+#endif
+
 //------------------------------------------------------------------------------------------
 // Operating system identification, EIGEN_OS_*
 //------------------------------------------------------------------------------------------
@@ -1023,7 +1030,7 @@
 #define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
 
 #if !defined(EIGEN_ASM_COMMENT)
-#if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64)
+#if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_RISCV)
 #define EIGEN_ASM_COMMENT(X) __asm__("#" X)
 #else
 #define EIGEN_ASM_COMMENT(X)
diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h
index 09bffa4..d97477b 100644
--- a/Eigen/src/Jacobi/Jacobi.h
+++ b/Eigen/src/Jacobi/Jacobi.h
@@ -305,7 +305,7 @@
     typedef typename packet_traits<OtherScalar>::type OtherPacket;
 
     constexpr int RequiredAlignment =
-        (std::max)(unpacket_traits<Packet>::alignment, unpacket_traits<OtherPacket>::alignment);
+        (std::max<int>)(unpacket_traits<Packet>::alignment, unpacket_traits<OtherPacket>::alignment);
     constexpr Index PacketSize = packet_traits<Scalar>::size;
 
     /*** dynamic-size vectorized paths ***/