TurboRLE: decode

This commit is contained in:
x
2019-12-21 14:06:33 +01:00
parent 2fb3f27263
commit dd18fa7f5d

354
trled.c
View File

@ -24,7 +24,7 @@
TurboRLE - "Most efficient and fastest Run Length Encoding"
**/
#ifndef USIZE
#include <string.h>
#include <string.h>
#ifdef __AVX2__
#include <immintrin.h>
#elif defined(__AVX__)
@ -53,213 +53,213 @@
#endif
#include "trle.h"
#include "trle_.h"
#include "trle_.h"
//------------------------------------- RLE 8 with Escape char ------------------------------------------------------------------
//#define MEMSAFE
#define SRLE8 32
#define SRLE8 32
#define USIZE 8
#include "trled.c"
#if SRLE8
#define rmemset8(_op_, _c_, _i_) while(_i_--) *_op_++ = _c_
unsigned _srled8(const unsigned char *__restrict in, unsigned char *__restrict out, unsigned outlen, unsigned char e) {
uint8_t *ip = in, *op = out, c, *oe = out+outlen;
unsigned _srled8(const unsigned char *__restrict in, unsigned char *__restrict out, unsigned outlen, unsigned char e) {
uint8_t *ip = in, *op = out, c, *oe = out+outlen;
uint32_t r;
#ifdef __AVX2__
#ifdef __AVX2__
__m256i ev = _mm256_set1_epi8(e);
#elif defined(__SSE__)
#elif defined(__SSE__)
__m128i ev = _mm_set1_epi8(e);
#endif
#endif
if(outlen >= SRLE8)
while(op < out+(outlen-SRLE8)) {
while(op < out+(outlen-SRLE8)) {
#if defined(__AVX2__) || defined(__SSE__) //|| defined(__ARM_NEON)
uint32_t mask;
#ifdef __AVX2__
__m256i v = _mm256_loadu_si256((__m256i*)ip); _mm256_storeu_si256((__m256i *)op, v); mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, ev)); if(mask) goto a; op += 32; ip += 32;
#elif defined(__SSE__)
__m128i v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
__m128i v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
#if SRLE8 >= 32
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
#endif
#endif
PREFETCH(ip+512, 0);
PREFETCH(ip+512, 0);
continue;
a: r = ctz32(mask); ip += r+1; PREFETCH(ip+512, 0);
op += r;
op += r;
#else
if(likely((c = *ip++) != e)) { *op++ = c; continue; }
#endif
#endif
vlget32(ip, r);
if(likely(r) >= 3) {
uint8_t c = *ip++;
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset8(op, e, r);
if(likely(r) >= 3) {
uint8_t c = *ip++;
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset8(op, e, r);
}
}
while(op < out+outlen)
if(likely((c = *ip++) != e)) {
*op++ = c;
} else {
vlget32(ip, r);
if(likely(r) >= 3) {
c = *ip++;
r = r-3+4;
rmemset8(op, c, r);
} else { r++;
rmemset8(op, e, r);
}
}
while(op < out+outlen)
if(likely((c = *ip++) != e)) {
*op++ = c;
} else {
vlget32(ip, r);
if(likely(r) >= 3) {
c = *ip++;
r = r-3+4;
rmemset8(op, c, r);
} else { r++;
rmemset8(op, e, r);
}
}
return ip - in;
}
}
static inline unsigned _srled8x(const unsigned char *__restrict in, unsigned char *__restrict out, unsigned outlen, unsigned char e, unsigned char ix) {
uint8_t *ip = in, *op = out, c, *oe = out+outlen;
uint8_t *ip = in, *op = out, c, *oe = out+outlen;
uint32_t r;
#ifdef __AVX2__
#ifdef __AVX2__
__m256i ev = _mm256_set1_epi8(e);
#elif defined(__SSE__)
#elif defined(__SSE__)
__m128i ev = _mm_set1_epi8(e);
#elif defined(__ARM_NEON)
uint8x8_t ev = vdup_n_u8(e);
#endif
#endif
if(outlen >= SRLE8)
while(op < out+(outlen-SRLE8)) {
while(op < out+(outlen-SRLE8)) {
#if defined(__AVX2__) || defined(__SSE__) //|| defined(__ARM_NEON)
uint32_t mask;
#ifdef __AVX2__
__m256i v = _mm256_loadu_si256((__m256i*)ip); _mm256_storeu_si256((__m256i *)op, v); mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, ev)); if(mask) goto a; op += 32; ip += 32;
#elif defined(__SSE__)
__m128i v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
__m128i v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
#if SRLE8 >= 32
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(_mm_cmpeq_epi8(v, ev)); if(mask) goto a; ip += 16; op += 16;
#endif
#endif
PREFETCH(ip+512, 0);
PREFETCH(ip+512, 0);
continue;
a: r = ctz32(mask); ip += r+1; PREFETCH(ip+512, 0);
op += r;
op += r;
#else
if(likely((c = *ip++) != e)) { *op++ = c; continue; }
#endif
#endif
vlget32(ip, r);
int f = r&1; r >>= 1;
if(likely(r) >= 3) {
uint8_t y=ip[0],c = f?y:ix; ip+=f;
r = r-3+4;
rmemset(op, c, r);
} else { r++; rmemset8(op, e, r);
/*switch(r) {
case 3: *op++ = c;
case 2: *op++ = c;
case 1: *op++ = c;
case 0: *op++ = c;
}*/
int f = r&1; r >>= 1;
if(likely(r) >= 3) {
uint8_t y=ip[0],c = f?y:ix; ip+=f;
r = r-3+4;
rmemset(op, c, r);
} else { r++; rmemset8(op, e, r);
/*switch(r) {
case 3: *op++ = c;
case 2: *op++ = c;
case 1: *op++ = c;
case 0: *op++ = c;
}*/
}
}
while(op < out+outlen)
if(likely((c = *ip++) != e)) {
*op++ = c;
} else {
vlget32(ip, r);
int f = r&1; r >>= 1;
if(likely(r) >= 3) {
uint8_t c = f?ip[0]:ix; ip+=f;
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset8(op, e, r);
while(op < out+outlen)
if(likely((c = *ip++) != e)) {
*op++ = c;
} else {
vlget32(ip, r);
int f = r&1; r >>= 1;
if(likely(r) >= 3) {
uint8_t c = f?ip[0]:ix; ip+=f;
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset8(op, e, r);
}
}
}
return ip - in;
}
}
#endif
unsigned _srled(const unsigned char *__restrict in, unsigned char *__restrict out, unsigned outlen) {
return _srled8(in+1, out, outlen, *in);
return _srled8(in+1, out, outlen, *in);
}
unsigned srled(const unsigned char *__restrict in, unsigned inlen, unsigned char *__restrict out, unsigned outlen) { unsigned l;
if(inlen == outlen)
memcpy(out, in, outlen); //No compression
else if(inlen == 1)
if(inlen == outlen)
memcpy(out, in, outlen); //No compression
else if(inlen == 1)
memset(out, in[0], outlen); //Only 1 char
else _srled8x(in+2, out, outlen, in[0], in[1]); //AS((ip-in) == inlen,"FatalI l>inlen %d ", (ip-in) - inlen);
else _srled8x(in+2, out, outlen, in[0], in[1]); //AS((ip-in) == inlen,"FatalI l>inlen %d ", (ip-in) - inlen);
return inlen;
}
//------------------------------------- TurboRLE ------------------------------------------
unsigned _trled(const unsigned char *__restrict in, unsigned char *__restrict out, unsigned outlen) {
uint8_t rmap[256] = {0}, *op = out, *ip = in;
unsigned m = 0, i, c;
unsigned m = 0, i, c;
if(!outlen)
return 0;
if(!(c = *ip++))
if(!(c = *ip++))
return _srled8(ip+1, out, outlen, *ip)+2;
for(i = 0; i != c; i++) { uint8_t *pb = &rmap[i<<3]; unsigned u = ip[i],v;
v = (u >> 0) & 1; m += v; pb[0] = v?m:0;
v = (u >> 1) & 1; m += v; pb[1] = v?m:0;
v = (u >> 2) & 1; m += v; pb[2] = v?m:0;
v = (u >> 3) & 1; m += v; pb[3] = v?m:0;
v = (u >> 4) & 1; m += v; pb[4] = v?m:0;
v = (u >> 5) & 1; m += v; pb[5] = v?m:0;
v = (u >> 6) & 1; m += v; pb[6] = v?m:0;
v = (u >> 7) & 1; m += v; pb[7] = v?m:0;
v = (u >> 0) & 1; m += v; pb[0] = v?m:0;
v = (u >> 1) & 1; m += v; pb[1] = v?m:0;
v = (u >> 2) & 1; m += v; pb[2] = v?m:0;
v = (u >> 3) & 1; m += v; pb[3] = v?m:0;
v = (u >> 4) & 1; m += v; pb[4] = v?m:0;
v = (u >> 5) & 1; m += v; pb[5] = v?m:0;
v = (u >> 6) & 1; m += v; pb[6] = v?m:0;
v = (u >> 7) & 1; m += v; pb[7] = v?m:0;
}
ip += c;
ip += c;
for(i = c*8; i != 256; i++) rmap[i] = ++m;
m--;
unsigned char ix = *ip++;
unsigned char ix = *ip++;
if(outlen >= 32)
while(op < out+(outlen-32)) {
#if __WORDSIZE == 64
uint64_t z = (uint64_t)rmap[ip[7]]<<56 | (uint64_t)rmap[ip[6]] << 48 | (uint64_t)rmap[ip[5]] << 40 | (uint64_t)rmap[ip[4]] << 32 | (uint32_t)rmap[ip[3]] << 24 | (uint32_t)rmap[ip[2]] << 16| (uint32_t)rmap[ip[1]] << 8| rmap[ip[0]];
ctou64(op) = ctou64(ip); if(z) goto a; ip += 8; op += 8;
#if __WORDSIZE == 64
uint64_t z = (uint64_t)rmap[ip[7]]<<56 | (uint64_t)rmap[ip[6]] << 48 | (uint64_t)rmap[ip[5]] << 40 | (uint64_t)rmap[ip[4]] << 32 | (uint32_t)rmap[ip[3]] << 24 | (uint32_t)rmap[ip[2]] << 16| (uint32_t)rmap[ip[1]] << 8| rmap[ip[0]];
ctou64(op) = ctou64(ip); if(z) goto a; ip += 8; op += 8;
continue;
a: z = ctz64(z)>>3;
a: z = ctz64(z)>>3;
#else
uint32_t z = (uint32_t)rmap[ip[3]] << 24 | (uint32_t)rmap[ip[2]] << 16| (uint32_t)rmap[ip[1]] << 8| rmap[ip[0]];
ctou32(op) = ctou32(ip); if(z) goto a; ip += 4; op += 4;
continue;
a: z = ctz32(z)>>3;
uint32_t z = (uint32_t)rmap[ip[3]] << 24 | (uint32_t)rmap[ip[2]] << 16| (uint32_t)rmap[ip[1]] << 8| rmap[ip[0]];
ctou32(op) = ctou32(ip); if(z) goto a; ip += 4; op += 4;
continue;
a: z = ctz32(z)>>3;
#endif
ip += z; op += z;
c = rmap[*ip++];
vlzget(ip, i, m, c-1);
if(i&1) c = ix; else c = *ip++; i = (i>>1) + TMIN;
rmemset(op,c,i); PREFETCH(ip+512, 0);
}
c = rmap[*ip++];
vlzget(ip, i, m, c-1);
if(i&1) c = ix; else c = *ip++; i = (i>>1) + TMIN;
rmemset(op,c,i); PREFETCH(ip+512, 0);
}
while(op < out+outlen) {
if(likely(!(c = rmap[*ip]))) *op++ = *ip++;
else {
ip++;
vlzget(ip, i, m, c-1);
if(i&1) c = ix; else c = *ip++; i = (i>>1) + TMIN;
rmemset8(op,c,i);
}
}
while(op < out+outlen) {
if(likely(!(c = rmap[*ip]))) *op++ = *ip++;
else {
ip++;
vlzget(ip, i, m, c-1);
if(i&1) c = ix; else c = *ip++; i = (i>>1) + TMIN;
rmemset8(op,c,i);
}
}
return ip - in;
}
unsigned trled(const unsigned char *__restrict in, unsigned inlen, unsigned char *__restrict out, unsigned outlen) {
if(inlen == outlen)
memcpy(out, in, outlen);
else if(inlen == 1)
if(inlen == outlen)
memcpy(out, in, outlen);
else if(inlen == 1)
memset(out, in[0], outlen);
else {
unsigned l = _trled(in, out, outlen); //AS(l == inlen,"FatalI l>inlen %d ", l - inlen);
unsigned l = _trled(in, out, outlen); //AS(l == inlen,"FatalI l>inlen %d ", l - inlen);
}
return inlen;
}
@ -268,7 +268,7 @@ unsigned trled(const unsigned char *__restrict in, unsigned inlen, unsigned char
#undef rmemset
#undef SRLE8
//------------------------------------- RLE 16, 32, 64 --------------------------------------------------
//------------------------------------- RLE 16, 32, 64 --------------------------------------------------
#define USIZE 16
#include "trled.c"
#undef rmemset
@ -285,7 +285,7 @@ unsigned trled(const unsigned char *__restrict in, unsigned inlen, unsigned char
#include "trled.c"
#undef rmemset
#undef USIZE
#else // ---------------------------- include 16, 32, 64----------------------------------------------
#ifdef MEMSAFE
#define rmemset(_op_, _c_, _i_) while(_i_--) *_op_++ = _c_
@ -313,99 +313,99 @@ unsigned trled(const unsigned char *__restrict in, unsigned inlen, unsigned char
TEMPLATE2(ctou, USIZE)(_up) = _c_; _up += USIZE/8;\
} while(_up < (uint8_t *)_op_);\
} while(0)
#endif
#endif
#define uint_t TEMPLATE3(uint, USIZE, _t)
#define ctout(_x_) *(uint_t *)(_x_)
#if !SRLE8
unsigned TEMPLATE2(_srled, USIZE)(const unsigned char *__restrict in, unsigned char *__restrict cout, unsigned outlen, uint_t e) {
uint_t *out = (uint_t *)cout, *op = out, c;
uint_t *out = (uint_t *)cout, *op = out, c;
const unsigned char *ip = in;
#ifdef __AVX2__
#ifdef __AVX2__
#define _mm256_set1_epi64 _mm256_set1_epi64x
__m256i ev = TEMPLATE2(_mm256_set1_epi, USIZE)(e);
__m256i ev = TEMPLATE2(_mm256_set1_epi, USIZE)(e);
#elif (defined(__SSE__) /*|| defined(__ARM_NEON)*/)
// #if USIZE != 64
#define _mm_set1_epi64 _mm_set1_epi64x
__m128i ev = TEMPLATE2(_mm_set1_epi, USIZE)(e);
__m128i ev = TEMPLATE2(_mm_set1_epi, USIZE)(e);
// #endif
#endif
#endif
if(outlen >= sizeof(uint_t)*8)
while(op < out+outlen/sizeof(uint_t)-sizeof(uint_t)*8) { int r;
while(op < out+outlen/sizeof(uint_t)-sizeof(uint_t)*8) { int r;
#if __AVX2__ != 0 && USIZE != 64
uint32_t mask;
__m256i v = _mm256_loadu_si256((__m256i*)ip); _mm256_storeu_si256((__m256i *)op, v); mask = _mm256_movemask_epi8(TEMPLATE2(_mm256_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 32; op += 256/USIZE;
v = _mm256_loadu_si256((__m256i*)ip); _mm256_storeu_si256((__m256i *)op, v); mask = _mm256_movemask_epi8(TEMPLATE2(_mm256_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 32; op += 256/USIZE;
PREFETCH(ip+512, 0);
PREFETCH(ip+512, 0);
continue;
a: r = ctz32(mask)/(USIZE/8);
op += r;
ip += (r+1)*sizeof(uint_t); PREFETCH(ip+512, 0);
a: r = ctz32(mask)/(USIZE/8);
op += r;
ip += (r+1)*sizeof(uint_t); PREFETCH(ip+512, 0);
#elif (__SSE__ != 0 /*|| __ARM_NEON != 0*/) && USIZE != 64
uint32_t mask;
uint32_t mask;
__m128i v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(TEMPLATE2(_mm_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 16; op += 128/USIZE;
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(TEMPLATE2(_mm_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 16; op += 128/USIZE;
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(TEMPLATE2(_mm_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 16; op += 128/USIZE;
v = _mm_loadu_si128((__m128i*)ip); _mm_storeu_si128((__m128i *)op, v); mask = _mm_movemask_epi8(TEMPLATE2(_mm_cmpeq_epi,USIZE)(v, ev)); if(mask) goto a; ip += 16; op += 128/USIZE;
PREFETCH(ip+512, 0);
PREFETCH(ip+512, 0);
continue;
a: r = ctz32(mask)/(USIZE/8);
a: r = ctz32(mask)/(USIZE/8);
op += r;
ip += (r+1)*sizeof(uint_t); PREFETCH(ip+512, 0);
#else
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c; PREFETCH(ip +512, 0);
continue;
a: ip += sizeof(uint_t); PREFETCH(ip +512, 0);
#endif
ip += (r+1)*sizeof(uint_t); PREFETCH(ip+512, 0);
#else
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c;
if(((c = ctout(ip)) == e)) goto a; ip += sizeof(uint_t); *op++ = c; PREFETCH(ip +512, 0);
continue;
a: ip += sizeof(uint_t); PREFETCH(ip +512, 0);
#endif
vlget32(ip, r);
if(likely(r) >= 3) {
c = ctout(ip); ip += sizeof(uint_t);
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset(op, e, r);
}
}
if(likely(r) >= 3) {
c = ctout(ip); ip += sizeof(uint_t);
r = r-3+4;
rmemset(op, c, r);
} else { r++;
rmemset(op, e, r);
}
}
while(op < out+outlen/sizeof(uint_t)) {
while(op < out+outlen/sizeof(uint_t)) {
c = ctout(ip); ip += sizeof(uint_t);
if(likely(c != e)) {
*op++ = c;
} else {
int r;
if(likely(c != e)) {
*op++ = c;
} else {
int r;
vlget32(ip, r);
if(likely(r) >= 3) {
c = ctout(ip); ip += sizeof(uint_t);
r = r-3+4;
rmemset(op, c, r);
} else {
if(likely(r) >= 3) {
c = ctout(ip); ip += sizeof(uint_t);
r = r-3+4;
rmemset(op, c, r);
} else {
r++;
rmemset(op, e, r);
}
rmemset(op, e, r);
}
}
}
{ unsigned char *p = (unsigned char *)op;
while(p < cout+outlen) *p++ = *ip++;
}
{ unsigned char *p = (unsigned char *)op;
while(p < cout+outlen) *p++ = *ip++;
}
return ip - in;
}
#endif
unsigned TEMPLATE2(srled, USIZE)(const unsigned char *__restrict in, unsigned inlen, unsigned char *__restrict out, unsigned outlen, uint_t e) {
if(inlen == outlen)
memcpy(out, in, outlen);
else if(inlen == 1)
if(inlen == outlen)
memcpy(out, in, outlen);
else if(inlen == 1)
memset(out, in[0], outlen);
else
else
return TEMPLATE2(_srled, USIZE)(in, out, outlen, e);
return inlen;
}