diff --git a/ext/beplugc_.c b/ext/beplugc_.c index d5a4cc6..e11939b 100644 --- a/ext/beplugc_.c +++ b/ext/beplugc_.c @@ -79,7 +79,7 @@ #endif //encode(void *encoded, size_t encoded_buffer_length, const integer *source, size_t source_integers) #if C_QMX - case P_QMX: { JASS::compress_integer_qmx_improved qmx; unsigned r=qmx.encode(out+4, outsize, (uint32_t *)in, (size_t)n); ctou32(out)=r; return out+4+r; } + case P_QMX: { JASS::compress_integer_qmx_improved qmx; unsigned r=qmx.encode(out+4, outsize, (uint32_t *)in, n); ctou32(out)=r; return out+4+r; } #endif #if C_SIMDCOMP diff --git a/ext/beplugcs_.c b/ext/beplugcs_.c index cd89e71..577101e 100644 --- a/ext/beplugcs_.c +++ b/ext/beplugcs_.c @@ -30,7 +30,7 @@ #endif #if C_QMX - case P_QMX: { bitdienc32( in+1, --n, pa, in[0], mdelta); vbxput32(out, in[0]); JASS::compress_integer_qmx_improved qmx; unsigned r=qmx.encode(out+4, outsize, (uint32_t *)pa, (uint64_t)n); return out+4+r; } + case P_QMX: { bitdienc32( in+1, --n, pa, in[0], mdelta); vbxput32(out, in[0]); JASS::compress_integer_qmx_improved qmx; if(n) { unsigned r=qmx.encode(out+4, outsize, (uint32_t *)pa, n); ctou32(out) = r; return out+4+r; } return out; } #endif #if C_SIMDCOMP diff --git a/ext/beplugds_.c b/ext/beplugds_.c index 39d96b9..2550b15 100644 --- a/ext/beplugds_.c +++ b/ext/beplugds_.c @@ -28,7 +28,7 @@ bitdidec32(out, n, -mdelta, mdelta); break;*/ #endif #if C_QMX - case P_QMX: { vbxget32(in, x); *out = x; unsigned l = *(unsigned *)in; JASS::compress_integer_qmx_improved qmx; qmx.decode(out+1, n-1, in+4, ctou32(in)); bitdidec32(out+1, n-1, x, mdelta); return in+4+ctou32(in);} + case P_QMX: { vbxget32(in, x); *out = x; if(n>1) { unsigned l = *(unsigned *)in; JASS::compress_integer_qmx_improved qmx; qmx.decode(out+1, n-1, in+4, l); bitdidec32(out+1, n-1, x, mdelta); return in+=4+l; } return in; } #endif #if C_SIMDCOMP diff --git a/ext/beplugr_.h b/ext/beplugr_.h index 4681787..a75a970 100644 --- a/ext/beplugr_.h +++ b/ext/beplugr_.h @@ -14,7 +14,7 @@ { P_STREAMVBYTE, "StreamVbyte", C_STREAMVBYTE, 0, 0,"","Variable byte SIMD" }, { FP_FASTPFOR, "FP_FastPFor", C_FASTPFOR, 0, 0,"","PFor scalar (inefficient for small blocks)" }, - { FP_SIMDFASTPFOR,"FP_SimdFastPFor", C_FASTPFOR, 0, 0,"","PFor SIMD (inefficient for small blocks)" }, + { FP_SIMDFASTPFOR,"FP_SimdFastPFor", C_FASTPFOR, 0, 0,"","PFor SIMD (inefficient for small blocks)" }, { FP_OPTPFOR, "FP_OptPFor", C_FASTPFOR, 0, 0,"","OptPFor scalar" }, { FP_SIMDOPTPFOR, "FP_SIMDOptPFor", C_FASTPFOR, 0, 0,"","OptPFor SIMD" }, { FP_VBYTE, "FP_VByte", C_FASTPFOR, 0, 0,"","Variable byte" }, @@ -23,7 +23,7 @@ { SC_SIMDPACK128, "SC_SIMDPack128", C_SIMDCOMP, BLK_V128,0,"","Bit packing (SSE4.1)"}, { SC_SIMDPACK256, "SC_SIMDPack256", C_SIMDCOMP, BLK_V256,0,"","Bit packing (SSE4.1)"}, - { SC_FOR, "SC_For", C_SIMDCOMP, BLK_V128,0,"","For (SSE4.1)"}, + { SC_FOR, "SC_For", C_SIMDCOMP, BLK_V128,0,"","For (SSE4.1)"}, { SC_FORDA, "SC_ForDA", C_SIMDCOMP, BLK_V128,0,"","For direct access (SSE4.1)"}, //{ CL_FASTPFORD1, "CL.SIMDPFORD1", C_SIMDCOMP, "", 0,BLK_V128}, @@ -40,9 +40,9 @@ { LI_HORPACK, "LI_HorPack", C_LITTLEPACK, 0, 0,"","Bit packing (sse4.1 horizontal)" }, { LI_BMIPACK, "LI_BMIPack256", C_LITTLEPACK, 0, 0,"","Bit packing (avx2)" }, - { P_QMX, "qmx", C_QMX, 0, 0,"","QMX SIMD (inefficient for small blocks)" }, + { P_QMX, "qmx", C_QMX, 0, 0,"","QMX SIMD (inefficient for small blocks)" }, //-------- lz77 + [delta] + transpose/shuffle --------------- - { P_LZT, "LzTurbo", C_LZTURBO, BLK_SIZE, 0, "20,21,22,32" }, + { P_LZT, "LzTurbo", C_LZTURBO, BLK_SIZE, 0, "20,21,22,32" }, { P_VSHUF, "VSimpleANS", C_LZTURBO, BLK_SIZE, 0, "20,21,22,32" }, { LZ4_, "lz4", C_LZ4, BLK_SIZE, 0, "", "lz4" },