Compare commits

...

1 Commits

Author SHA1 Message Date
1598dcb62a fix
Some checks failed
build / linux (clang, ubuntu-latest) (push) Has been cancelled
build / linux (gcc, ubuntu-latest) (push) Has been cancelled
build / macos (clang, macos-latest) (push) Has been cancelled
2025-06-18 10:04:50 +08:00
2 changed files with 83 additions and 77 deletions

View File

@ -114,17 +114,17 @@ static ALWAYS_INLINE __m128i _mm_madd_epi16(__m128i u, __m128i v) {
#define _mm_and_si128( _u_,_v_) (__m128i)vandq_u32( (uint32x4_t)(_u_), (uint32x4_t)(_v_))
#define _mm_xor_si128( _u_,_v_) (__m128i)veorq_u32( (uint32x4_t)(_u_), (uint32x4_t)(_v_))
//---------------------------------------------- Shift ----------------------------------------------------------------------------
#define mm_slli_epi8( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)> 7?vdupq_n_u8( 0):vshlq_n_u8( (uint8x16_t)(_v_), (_c_)))) // parameter c MUST be a constant / vshlq_n_u8: __constrange(0-(N-1))
#define mm_slli_epi16( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>15?vdupq_n_u16(0):vshlq_n_u16((uint16x8_t)(_v_), (_c_))))
#define mm_slli_epi32( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>31?vdupq_n_u32(0):vshlq_n_u32((uint32x4_t)(_v_), (_c_))))
#define mm_slli_epi64( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>63?vdupq_n_u64(0):vshlq_n_u64((uint64x2_t)(_v_), (_c_))))
#define _mm_slli_si128( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>15?vdupq_n_u8( 0):vextq_u8(vdupq_n_u8(0), (uint8x16_t)(_v_), 16-(_c_) )) ) // vextq_u8: __constrange(0-15)
#define mm_slli_epi8(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>7?__lsx_vreplgr2vr_b(0):__lsx_vslli_b((__m128i)(_u_), (_c_))))
#define mm_slli_epi16(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>15?__lsx_vreplgr2vr_h(0):__lsx_vslli_h((__m128i)(_u_), (_c_))))
#define mm_slli_epi32(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>31?__lsx_vreplgr2vr_w(0):__lsx_vslli_w((__m128i)(_u_), (_c_))))
#define mm_slli_epi64(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>63?__lsx_vreplgr2vr_d(0):__lsx_vslli_d((__m128i)(_u_), (_c_))))
#define _mm_slli_si128(_v_, _c_) (__m128i)((_c_)<1?(_v_):(__m128i)((_c_)>15?__lsx_vreplgr2vr_b(0):__lsx_vbsll_v(_v_, _c_)))
#define mm_srli_epi8( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)> 7?vdupq_n_u8( 0):vshrq_n_u8( (uint8x16_t)(_v_), (_c_)))) // vshrq_n: __constrange(1-N)
#define mm_srli_epi16( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>15?vdupq_n_u16(0):vshrq_n_u16((uint16x8_t)(_v_), (_c_))))
#define mm_srli_epi32( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>31?vdupq_n_u32(0):vshrq_n_u32((uint32x4_t)(_v_), (_c_))))
#define mm_srli_epi64( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>63?vdupq_n_u64(0):vshlq_n_u64((uint64x2_t)(_v_), (_c_))))
#define _mm_srli_si128( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)((_c_)>15?vdupq_n_u8(0):vextq_u8((uint8x16_t)(_v_), vdupq_n_u8(0), (_c_) )) ) // vextq_u8: __constrange(0-15)
#define mm_srli_epi8(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>7?__lsx_vreplgr2vr_b(0):__lsx_vsrli_b((__m128i)(_u_), (_c_))))
#define mm_srli_epi16(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>15?__lsx_vreplgr2vr_h(0):__lsx_vsrli_h((__m128i)(_u_), (_c_))))
#define mm_srli_epi32(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>31?__lsx_vreplgr2vr_w(0):__lsx_vsrli_w((__m128i)(_u_), (_c_))))
#define mm_srli_epi64(_u_, _c_) (__m128i)((_c_)<1?(_u_):(__m128i)((_c_)>63?__lsx_vreplgr2vr_d(0):__lsx_vsrli_d((__m128i)(_u_), (_c_))))
#define _mm_srli_si128(_v_, _c_) (__m128i)((_c_)<1?(_v_):(__m128i)((_c_)>15?__lsx_vreplgr2vr_b(0):__lsx_vbsrl_v(_v_, _c_)))
#define mm_srai_epi8( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)vshrq_n_s8( (int8x16_t)(_v_), (_c_))) // c <= 8 (vshrq_n:1-N)
#define mm_srai_epi16( _v_,_c_) (__m128i)((_c_)<1?(_v_):(uint32x4_t)vshrq_n_s16((int16x8_t)(_v_), (_c_))) // c <= 16
@ -335,43 +335,31 @@ static ALWAYS_INLINE __m128i _mm_set_epi64x( uint64_t u1, uint64_t u0) { uint
#define _mm_set1_epi8( _u8_ ) (__m128i)__lsx_vreplgr2vr_b(_u8_)
#define _mm_set1_epi16( _u16_) (__m128i)__lsx_vreplgr2vr_h(_u16_)
#define _mm_set1_epi32( _u32_) __lsx_vreplgr2vr_w(_u32_)
#define _mm_set1_epi32( _u32_) (__m128i)__lsx_vreplgr2vr_w(_u32_)
#define _mm_set1_epi64x(_u64_) (__m128i)__lsx_vreplgr2vr_d(_u64_)
#define _mm_setzero_si128() __lsx_vreplgr2vr_w( 0 )
#define _mm_cvtss_f32(_v_) __lsx_vpickve2gr_s((__m128)(_v_), 0)
#define _mm_cvtss_f32(_v_) __lsx_vpickve2gr_w((__m128)(_v_), 0)
#define _mm_setzero_ps() (__m128)__lsx_vldi(0)
#define _mm_set1_ps(_f32_) (__m128)__lsx_vreplfr2vr_s(_f32_)
#define _mm_set1_ps(_f32_) (__m128)__lsx_vreplgr2vr_w(_f32_)
//---------------------------------------------- Arithmetic -----------------------------------------------------------------------
#define _mm_add_epi8( _u_,_v_) (__m128i)__lsx_vadd_b((__m128i)(_u_), (__m128i)(_v_))
#define _mm_add_epi16( _u_,_v_) (__m128i)__lsx_vadd_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_add_epi32( _u_,_v_) __lsx_vadd_w( _u_, _v_ )
#define _mm_add_epi32( _u_,_v_) (__m128i)__lsx_vadd_w((__m128i)(_u_), (__m128i)(_v_))
#define _mm_add_epi64( _u_,_v_) (__m128i)__lsx_vadd_d((__m128i)(_u_), (__m128i)(_v_))
#define _mm_sub_epi8( _u_,_v_) (__m128i)__lsx_vsub_b((__m128i)(_u_), (__m128i)(_v_))
#define _mm_sub_epi16( _u_,_v_) (__m128i)__lsx_vsub_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_sub_epi32( _u_,_v_) (__m128i)__lsx_vsub_w((__m128i)(_u_), (__m128i)(_v_))
#define _mm_sub_epi64( _u_,_v_) (__m128i)__lsx_vsub_d((__m128i)(_u_), (__m128i)(_v_))
#define _mm_subs_epu8( _u_,_v_) (__m128i)__lsx_vsub_bu((__m128i)(_u_), (__m128i)(_v_))
#define _mm_subs_epu8( _u_,_v_) (__m128i)__lsx_vssub_bu((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mullo_epi16(_u_, _v_) (__m128i)__lsx_vmulwev_h_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mullo_epi32(_u_,_v_) (__m128i)__lsx_vmulwev_w_w((__m128i)(_u_), (__m128i)(_v_))
#define mm_mullo_epu32(_u_,_v_) (__m128i)__lsx_vmulwev_w_wu((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mullo_epi16(_u_, _v_) (__m128i)__lsx_vmul_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mullo_epi32(_u_,_v_) (__m128i)__lsx_vmul_w((__m128i)(_u_), (__m128i)(_v_))
#define mm_mullo_epu32(_u_,_v_) (__m128i)__lsx_vmul_w((__m128i)(_u_), (__m128i)(_v_))
//#define _mm_mulhi_epi16s(_u_,_v_) (__m128i)__lsx_vmulwh_h_h((__m128i)(_u_), (__m128i)(_v_))
static ALWAYS_INLINE __m128i _mm_mulhi_epi16(__m128i u, __m128i v) {
__m128i evens = __lsx_vmulwev_h_w(u, v); // a[0]*b[0], a[2]*b[2], ...
__m128i odds = __lsx_vmulwod_h_w(u, v); // a[1]*b[1], a[3]*b[3], ...
// 2. 右移 16 位,提取高 16 位
evens = __lsx_vsrai_w(evens, 16);
odds = __lsx_vsrai_w(odds, 16);
// 3. 重新打包成 16 位结果
__m128i res = __lsx_vpickev_h(odds, evens); // 交错组合
return res;
}
#define _mm_mulhi_epi16s(_u_,_v_) (__m128i)__lsx_vmuh_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mulhi_epi16(_u_,_v_) (__m128i)__lsx_vmuh_h((__m128i)(_u_), (__m128i)(_v_))
#define _mm_mul_epu32(_u_, _v_) (__m128i)__lsx_vmulwev_d_wu((__m128i)(_u_), (__m128i)(_v_))
#define _mm_adds_epu16(_u_, _v_) (__m128i)__lsx_vsadd_hu((__m128i)(_u_), (__m128i)(_v_))
@ -452,34 +440,52 @@ static ALWAYS_INLINE __m128i _mm_madd_epi16(__m128i u, __m128i v) {
#define _mm_cmpgt_epu16(_u_, _v_) (__m128i)__lsx_vslt_hu((__m128i)(_v_), (__m128i)(_u_))
#define mm_cmpgt_epu32(_u_, _v_) (__m128i)__lsx_vslt_wu((__m128i)(_v_), (__m128i)(_u_))
//---------------------------------------------- Load -----------------------------------------------------------------------------
#define _mm_loadl_epi64(_u64p_) (__m128i)__lsx_vldrepl_d(_u64p_, 0) // 加载并广播到低64位
#define _mm_loadl_epi64(_u64p_) (__m128i)__lsx_vinsgr2vr_d(__lsx_vreplgr2vr_d(0),*(const uint64_t*)(_u64p_), 0) // 加载并广播到低64位
#define mm_loadu_epi64p(_u64p_, _u_) (__m128i)__lsx_vinsgr2vr_d((__m128i)(_u_), *(const uint64_t*)(_u64p_), 0)
#define _mm_loadu_si128(_ip_) (__m128i)__lsx_vldx((const __m128i*)(_ip_), 0)
#define _mm_load_si128(_ip_) (__m128i)__lsx_vld((const __m128i*)(_ip_), 0)
#define _mm_loadu_si128(_ip_) (__m128i)__lsx_vld((const int32_t*)(_ip_), 0)
#define _mm_load_si128(_ip_) (__m128i)__lsx_vld((const int32_t*)(_ip_), 0)
#define _mm_load_ps(_ip_) (__m128)__lsx_vld((const float*)(_ip_), 0)
#define _mm_loadu_ps(_ip_) (__m128)__lsx_vldx((const float*)(_ip_), 0)
#define _mm_load1_ps(_ip_) (__m128)__lsx_vreplfr2vr_s(*(const float*)(_ip_))
#define _mm_loadl_pi(_u_, _ip_) (__m128)__lsx_vpickve2gr_w((__m128)(_u_), 1); (__m128)__lsx_vinsgr2vr_w((__m128)(_u_), *(const float*)(_ip_), 0)
#define _mm_loadh_pi(_u_, _ip_) (__m128)__lsx_vpickve2gr_w((__m128)(_u_), 0); (__m128)__lsx_vinsgr2vr_w((__m128)(_u_), *(const float*)(_ip_), 1)
#define _mm_loadu_ps(_ip_) (__m128)__lsx_vld((const float*)(_ip_), 0)
#define _mm_load1_ps(_ip_) (__m128)__lsx_vreplgr2vr_w(*(const float*)(_ip_))
#define _mm_loadl_pi(_u_, _ip_) (__m128)__lsx_vinsgr2vr_d((__m128i)(_u_), *(const float*)(_ip_), 0)
#define _mm_loadh_pi(_u_, _ip_) (__m128)__lsx_vinsgr2vr_d((__m128i)(_u_), *(const float*)(_ip_), 1)
//---------------------------------------------- Store ----------------------------------------------------------------------------
#define _mm_storel_epi64(_ip_, _u_) __lsx_vstelm_d((__m128i)(_u_), (uint64_t*)(_ip_), 0, 0)
#define _mm_storeu_si128(_ip_, _u_) __lsx_vstx((__m128i)(_u_), (__m128i*)(_ip_), 0)
#define _mm_storeu_si128(_ip_, _u_) __lsx_vstx((__m128i)(_u_), (int32_t*)(_ip_), 0)
#define _mm_store_ps(_ip_, _u_) __lsx_vst((float32x4_t)(_u_), (float*)(_ip_), 0)
#define _mm_storeu_ps(_ip_, _u_) __lsx_vstx((float32x4_t)(_u_), (float*)(_ip_), 0)
#define _mm_store_ss(_ip_, _u_) __lsx_vstelm_w((__m128)(_u_), (float*)(_ip_), 0, 0)
#define _mm_store_ps(_ip_, _u_) __lsx_vst((v4f32)(_u_), (float*)(_ip_), 0)
#define _mm_storeu_ps(_ip_, _u_) __lsx_vstx((v4f32)(_u_), (float*)(_ip_), 0)
#define _mm_store_ss(_ip_, _u_) __lsx_vstelm_w((__m128i)(_u_), (float*)(_ip_), 0, 0)
//---------------------------------------------- Convert --------------------------------------------------------------------------
#define mm_cvtsi64_si128p(_u64p_, _u_) (__m128i)__lsx_vinsgr2vr_d((__m128i)(_u_), *(const uint64_t*)(_u64p_), 0)
#define mm_cvtsi64_si128p(_u64p_,_v_) mm_loadu_epi64p(_u64p_,_v_)
#define _mm_cvtsi64_si128(_u_) (__m128i)__lsx_vreplgr2vr_d(_u_)
//---------------------------------------------- Reverse bits/bytes ---------------------------------------------------------------
#define mm_rbit_epi8(_v_) (__m128i)__lsx_vbitrev_b((__m128i)(_v_), (__m128i)(_v_))
static ALWAYS_INLINE __m128i mm_rbit_epi8(__m128i _v_) {
uint64_t low_src = __lsx_vpickve2gr_du(_v_, 0);
uint64_t low = 0;
asm volatile(
"bitrev.8b %[out], %[in]\n\t"
:[out]"+r"(low)
:[in]"r"(low_src)
);
uint64_t high_src = __lsx_vpickve2gr_du(_v_, 1);
uint64_t high = 0;
asm volatile(
"bitrev.8b %[out], %[in]\n\t"
:[out]"+r"(high)
:[in]"r"(high_src)
);
return __lsx_vinsgr2vr_d(__lsx_vinsgr2vr_d(__lsx_vreplgr2vr_w(0),low,0),high,1);
}
#define mm_rev_epi16(_v_) (__m128i)__lsx_vshuf_b((__m128i)(_v_), (__m128i)(_v_), (__m128i){0x0100, 0x0302, 0x0504, 0x0706, 0x0908, 0x0B0A, 0x0D0C, 0x0F0E})
#define mm_rev_epi32(_v_) (__m128i)__lsx_vshuf_b((__m128i)(_v_), (__m128i)(_v_), (__m128i){0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C})
#define mm_rev_epi64(_v_) (__m128i)__lsx_vshuf_b((__m128i)(_v_), (__m128i)(_v_), (__m128i){0x0706050403020100, 0x0F0E0D0C0B0A0908})
//--------------------------------------------- Insert/extract --------------------------------------------------------------------
#define mm_extract_epi32x(_u_, _u32_, _id_) (*(uint32_t*)&(_u32_) = __lsx_vpickve2gr_wu((__m128i)(_u_), (_id_))
#define _mm_extract_epi64x(_u_, _u64_, _id_) (*(uint64_t*)&(_u64_) = __lsx_vpickve2gr_du((__m128i)(_u_), (_id_))
#define mm_extract_epi32x(_u_, _u32_, _id_) __lsx_vstelm_w((__m128i)(_u_), (uint32_t*)&(_u32_), 0, (_id_))
#define _mm_extract_epi64x(_u_, _u64_, _id_) __lsx_vstelm_d((__m128i)(_u_), (uint64_t*)&(_u64_), 0, (_id_))
#define _mm_extract_epi8(_u_, _id_) __lsx_vpickve2gr_b((__m128i)(_u_), (_id_))
#define _mm_extract_epi16(_u_, _id_) __lsx_vpickve2gr_h((__m128i)(_u_), (_id_))
@ -490,48 +496,45 @@ static ALWAYS_INLINE __m128i _mm_madd_epi16(__m128i u, __m128i v) {
#define _mm_insert_epu32p(_u_, _u32p_, _id_) (__m128i)__lsx_vinsgr2vr_w((__m128i)(_u_), *(const uint32_t*)(_u32p_), (_id_))
#define mm_insert_epi32p(_u_, _u32p_, _id_) (__m128i)__lsx_vinsgr2vr_w((__m128i)(_u_), *(const int32_t*)(_u32p_), (_id_))
#define _mm_cvtsi32_si128(_x_) (__m128i)__lsx_vinsgr2vr_w(__lsx_vldi(0), (_x_), 0)
#define _mm_cvtsi32_si128(_x_) (__m128i)__lsx_vinsgr2vr_w(__lsx_vreplgr2vr_w(0), (_x_), 0)
#define _mm_blendv_epi8(_u_, _v_, _m_) (__m128i)__lsx_vbitsel_v((__m128i)(_u_), (__m128i)(_v_), (__m128i)(_m_))
//---------------------------------------------- Miscellaneous --------------------------------------------------------------------
#define _mm_alignr_epi8(_u_, _v_, _m_) (__m128i)__lsx_vshuf_b((__m128i)(_v_), (__m128i)(_u_), (__m128i){_m_,_m_+1,_m_+2,_m_+3,_m_+4,_m_+5,_m_+6,_m_+7, _m_+8,_m_+9,_m_+10,_m_+11,_m_+12,_m_+13,_m_+14,_m_+15})
#define _mm_packs_epi16(_u_, _v_) (__m128i)__lsx_vpickev_b(__lsx_vssrlrni_b_h((__m128i)(_v_), (__m128i)(_u_), 0), __lsx_vssrlrni_b_h((__m128i)(_v_), (__m128i)(_u_), 0))
#define _mm_packs_epi32(_u_, _v_) (__m128i)__lsx_vpickev_h(__lsx_vssrlrni_h_w((__m128i)(_v_), (__m128i)(_u_), 0), __lsx_vssrlrni_h_w((__m128i)(_v_), (__m128i)(_u_), 0))
#define _mm_packs_epi16(_u_, _v_) (__m128i)__lsx_vilvl_d(__lsx_vssrlrni_b_h((__m128i)(_u_), (__m128i)(_v_), 0), __lsx_vssrlrni_b_h((__m128i)(_v_), (__m128i)(_u_), 0))
#define _mm_packs_epi32(_u_, _v_) (__m128i)__lsx_vilvl_d(__lsx_vssrlrni_h_w((__m128i)(_u_), (__m128i)(_v_), 0), __lsx_vssrlrni_h_w((__m128i)(_v_), (__m128i)(_u_), 0))
#define _mm_packs_epu16(_u_, _v_) (__m128i)__lsx_vilvl_b((__m128i)(_v_), (__m128i)(_u_))
#define _mm_packus_epi16(_u_, _v_) (__m128i)__lsx_vpickev_b(__lsx_vssrlni_bu_h((__m128i)(_v_), (__m128i)(_u_), 0), __lsx_vssrlni_bu_h((__m128i)(_v_), (__m128i)(_u_), 0))
/* static ALWAYS_INLINE uint16_t _mm_movemask_epi8(__m128i v) { */
/* const __m128i zero = __lsx_vldi(0); */
/* const __m128i mask = __lsx_vldi(0x0102040810204080); */
/* __m128i signs = __lsx_vsrli_b(v, 7); // 提取符号位到bit0 */
/* __m128i masked = __lsx_vand_v(signs, mask); // 应用位权重 */
/* __m128i sum = __lsx_vhaddw_wu_hu(__lsx_vhaddw_hu_bu(masked, zero), zero); */
/* return __lsx_vpickve2gr_hu(sum, 0) & 0xFFFF; */
/* } */
#define _mm_packs_epu16(_u_, _v_) (__m128i)__lsx_vilvl_d((__m128i)(_v_), (__m128i)(_u_))
#define _mm_packus_epi16(_u_, _v_) (__m128i)__lsx_vilvl_d(__lsx_vssrlni_bu_h((__m128i)(_u_), (__m128i)(_v_), 0), __lsx_vssrlni_bu_h((__m128i)(_v_), (__m128i)(_u_), 0))
static ALWAYS_INLINE uint16_t _mm_movemask_epi8(__m128i v) {
// 步骤1:提取每个字节的最高位(符号位)
__m128i signs = __lsx_vsrli_b(v, 7); // 所有字节右移7位,符号位变bit0
__m128i signs = __lsx_vsrai_b(v, 7); // 所有字节算术右移7位, 保留符合位
// 步骤2:创建位掩码 (LSB-first: 0x01, 0x02, 0x04,...)
const __m128i mask = __lsx_vld((void*)(uint64_t[]){0x0102040810204080}, 0);
static const uint8_t mask_data[16] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, // 低8字节
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 // 高8字节
};
__m128i mask = __lsx_vld((const void*)mask_data, 0); // 从内存加载掩码
// 步骤3:应用位掩码
__m128i masked = __lsx_vand_v(signs, mask);
// 步骤4:水平相加(8-bit → 16-bit → 32-bit)
__m128i sum16 = __lsx_vhaddw_hu_bu(masked, __lsx_vldi(0));
__m128i sum32 = __lsx_vhaddw_wu_hu(sum16, __lsx_vldi(0));
__m128i sum16 = __lsx_vhaddw_hu_bu(masked, masked);
__m128i sum32 = __lsx_vhaddw_wu_hu(sum16, sum16);
__m128i sum64 = __lsx_vhaddw_du_wu(sum32, sum32);
// 步骤5:提取低16位结果
return __lsx_vpickve2gr_hu(sum32, 0) & 0xFFFF;
return (uint16_t)__lsx_vpickve2gr_bu(sum64, 0) | (((uint16_t)__lsx_vpickve2gr_bu(sum64, 8)) << 8);
}
//-------- Neon movemask ------ All lanes must be 0 or -1 (=0xff, 0xffff or 0xffffffff)
static ALWAYS_INLINE uint8_t mm_movemask_epi8s(__m128i sv) {
const __m128i mask = __lsx_vldi(0x0102040810204080);
static const uint64_t mask_data[2] = {0x0102040810204080ULL, 0x0102040810204080ULL};
const __m128i mask = __lsx_vld((const void*)mask_data, 0);
__m128i tmp = __lsx_vand_v(sv, mask);
tmp = __lsx_vhaddw_hu_bu(tmp, __lsx_vldi(0));
tmp = __lsx_vhaddw_wu_hu(tmp, __lsx_vldi(0));
@ -539,7 +542,8 @@ static ALWAYS_INLINE uint8_t mm_movemask_epi8s(__m128i sv) {
}
static ALWAYS_INLINE uint16_t mm_movemask_epu16(__m128i v) {
const __m128i mask = __lsx_vldi(0x0102040810204080);
static const uint64_t mask_data[2] = {0x0102040810204080ULL, 0x0102040810204080ULL};
const __m128i mask = __lsx_vld((const void*)mask_data, 0);
__m128i tmp = __lsx_vand_v(v, mask);
tmp = __lsx_vhaddw_wu_hu(tmp, __lsx_vldi(0));
return (uint16_t)__lsx_vpickve2gr_d(__lsx_vhaddw_du_wu(tmp, __lsx_vldi(0)), 0);
@ -547,18 +551,20 @@ static ALWAYS_INLINE uint16_t mm_movemask_epu16(__m128i v) {
static ALWAYS_INLINE uint32_t mm_movemask_epu32(__m128i v) {
// 1. 加载位掩码常量 (0x00000001, 0x00000002, 0x00000004, 0x00000008)
const __m128i mask = __lsx_vldi(0x0000000100000002);
__lsx_vinsgr2vr_d(mask, 0x0000000400000008, 1); // 设置高64位掩码
static const uint32_t mask_data[4] = {
0x00000001, 0x00000002, 0x00000004, 0x00000008
};
__m128i mask = __lsx_vld((const void*)mask_data, 0); // 加载32位x4的掩码
// 2. 应用位掩码
__m128i masked = __lsx_vand_v(v, mask);
// 3. 水平相加
__m128i sum2 = __lsx_vhaddw_du_wu(masked, __lsx_vldi(0)); // 4x32 -> 2x64
__m128i sum1 = __lsx_vhaddw_qu_du(sum2, __lsx_vldi(0)); // 2x64 -> 1x128
__m128i sum1 = __lsx_vhaddw_du_wu(masked, masked); // 4x32 -> 2x64
__m128i sum2 = __lsx_vhaddw_qu_du(sum1, sum1); // 2x64 -> 1x128
// 4. 提取结果
return (uint32_t)__lsx_vpickve2gr_d(sum1, 0);
return (uint32_t)__lsx_vpickve2gr_b(sum2, 0);
}
static ALWAYS_INLINE uint64_t mm_movemask_epu64(__m128i v) {
@ -577,10 +583,10 @@ static ALWAYS_INLINE uint64_t mm_movemask_epu64(__m128i v) {
#define mm_shuffle_nnnn_epi32(_v_, _m_) (__m128i)__lsx_vreplvei_w((__m128i)(_v_), (_m_))
#ifdef USE_MACROS
#define mm_shuffle_2031_epi32(_u_) ({__m128i rev = __lsx_vshuf4i_w(v, 0x1B); __lsx_vshuf4i_w(rev, 0xD8);})
#define mm_shuffle_3120_epi32(_u_) __lsx_vshuf4i_w(v, 0xD8)
#define mm_shuffle_2031_epi32(_u_) (__m128i)__lsx_vshuf4i_w(v, 0x8D)
#define mm_shuffle_3120_epi32(_u_) (__m128i)__lsx_vshuf4i_w(v, 0xD8)
#else
static ALWAYS_INLINE __m128i mm_shuffle_2031_epi32(__m128i v) {__m128i rev = __lsx_vshuf4i_w(v, 0x1B); return __lsx_vshuf4i_w(rev, 0xD8);}
static ALWAYS_INLINE __m128i mm_shuffle_2031_epi32(__m128i v) {return __lsx_vshuf4i_w(v, 0x8D);}
static ALWAYS_INLINE __m128i mm_shuffle_3120_epi32(__m128i v) {return __lsx_vshuf4i_w(v, 0xD8);}
#endif

View File

@ -87,7 +87,7 @@ ifeq ($(ARCH),loongarch64)
endif
CFLAGS+=$(DEBUG) $(OPT)
#CFLAGS+=-Wno-macro-redefined -Wno-incompatible-pointer-types -Wno-tautological-constant-out-of-range-compare -Wno-discarded-qualifiers
CFLAGS+=-w -Wall -pedantic -Wno-macro-redefined -Wno-incompatible-pointer-types
CFLAGS+=-w -Wall -pedantic -Wno-macro-redefined -Wno-incompatible-pointer-types -save-temps
CXXFLAGS+=-w
#-Wall -Wincompatible-pointer-types
ifeq ($(OS),$(filter $(OS),Linux GNU/kFreeBSD GNU OpenBSD FreeBSD DragonFly NetBSD MSYS_NT Haiku))