In the question Optimizing Array Compaction, the top answer states:
SSE/AVX registers with latest instruction sets allow a better approach. We can use the result of PMOVMSKB directly, transforming it to the control register for something like PSHUFB.
The first thing to do is find a fast scalar function. Here is a version which does not use a branch.
inline int compact(int *x, int *y, const int n) {
int cnt = 0;
for(int i=0; i<n; i++) {
int cut = x[i]!=0;
y[cnt] = cut*x[i];
cnt += cut;
}
return cnt;
}
The best result with SIMD probably depends on the distribution of zeros. If it's sparse or dense . The following code should work well for distribution which are sparse or dense. For example long runs of zeros and non-zeros. If the distribution is more even I don't know if this code will have any benefit. But it will give the correct result anyway.
Here is a AVX2 version I tested.
int compact_AVX2(int *x, int *y, int n) {
int i =0, cnt = 0;
for(i=0; i<n-8; i+=8) {
__m256i x4 = _mm256_loadu_si256((__m256i*)&x[i]);
__m256i cmp = _mm256_cmpeq_epi32(x4, _mm256_setzero_si256());
int mask = _mm256_movemask_epi8(cmp);
if(mask == -1) continue; //all zeros
if(mask) {
cnt += compact(&x[i],&y[cnt], 8);
}
else {
_mm256_storeu_si256((__m256i*)&y[cnt], x4);
cnt +=8;
}
}
cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 8
return cnt;
}
Here is the SSE2 version I tested.
int compact_SSE2(int *x, int *y, int n) {
int i =0, cnt = 0;
for(i=0; i<n-4; i+=4) {
__m128i x4 = _mm_loadu_si128((__m128i*)&x[i]);
__m128i cmp = _mm_cmpeq_epi32(x4, _mm_setzero_si128());
int mask = _mm_movemask_epi8(cmp);
if(mask == 0xffff) continue; //all zeroes
if(mask) {
cnt += compact(&x[i],&y[cnt], 4);
}
else {
_mm_storeu_si128((__m128i*)&y[cnt], x4);
cnt +=4;
}
}
cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 4
return cnt;
}
Here is a full test
#include <stdio.h>
#include <stdlib.h>
#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)
#include <x86intrin.h>
#else
#include <immintrin.h>
#endif
#define N 50
inline int compact(int *x, int *y, const int n) {
int cnt = 0;
for(int i=0; i<n; i++) {
int cut = x[i]!=0;
y[cnt] = cut*x[i];
cnt += cut;
}
return cnt;
}
int compact_SSE2(int *x, int *y, int n) {
int i =0, cnt = 0;
for(i=0; i<n-4; i+=4) {
__m128i x4 = _mm_loadu_si128((__m128i*)&x[i]);
__m128i cmp = _mm_cmpeq_epi32(x4, _mm_setzero_si128());
int mask = _mm_movemask_epi8(cmp);
if(mask == 0xffff) continue; //all zeroes
if(mask) {
cnt += compact(&x[i],&y[cnt], 4);
}
else {
_mm_storeu_si128((__m128i*)&y[cnt], x4);
cnt +=4;
}
}
cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 4
return cnt;
}
int compact_AVX2(int *x, int *y, int n) {
int i =0, cnt = 0;
for(i=0; i<n-8; i+=8) {
__m256i x4 = _mm256_loadu_si256((__m256i*)&x[i]);
__m256i cmp = _mm256_cmpeq_epi32(x4, _mm256_setzero_si256());
int mask = _mm256_movemask_epi8(cmp);
if(mask == -1) continue; //all zeros
if(mask) {
cnt += compact(&x[i],&y[cnt], 8);
}
else {
_mm256_storeu_si256((__m256i*)&y[cnt], x4);
cnt +=8;
}
}
cnt += compact(&x[i], &y[cnt], n-i); // cleanup for n not a multiple of 8
return cnt;
}
int main() {
int x[N], y[N];
for(int i=0; i<N; i++) x[i] = rand()%10;
//int cnt = compact_SSE2(x,y,N);
int cnt = compact_AVX2(x,y,N);
for(int i=0; i<N; i++) printf("%d ", x[i]); printf("\n");
for(int i=0; i<cnt; i++) printf("%d ", y[i]); printf("\n");
}