- _mm_storeu_si128 ((__m128i *) (d0 + 0 * 16), r0);
- _mm_storeu_si128 ((__m128i *) (d0 + 1 * 16), r1);
- _mm_storeu_si128 ((__m128i *) (d0 + 2 * 16), r2);
- _mm_storeu_si128 ((__m128i *) (d0 + 3 * 16), r3);
+ mask += add;
+ d1 = u8x32_blend (d1, s1, u8x32_is_greater (lv, mask));
+ u8x32_store_unaligned (d1, dst + 32);
+
+#elif defined (CLIB_HAVE_VEC128)
+ u8x16 s0, s1, s2, s3, d0, d1, d2, d3;
+ u8x16 mask = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+ u8x16 lv = u8x16_splat (len);
+ u8x16 add = u8x16_splat (16);
+
+ s0 = u8x16_load_unaligned (src);
+ s1 = u8x16_load_unaligned (src + 16);
+ s2 = u8x16_load_unaligned (src + 32);
+ s3 = u8x16_load_unaligned (src + 48);
+ d0 = u8x16_load_unaligned (dst);
+ d1 = u8x16_load_unaligned (dst + 16);
+ d2 = u8x16_load_unaligned (dst + 32);
+ d3 = u8x16_load_unaligned (dst + 48);
+
+ d0 = u8x16_blend (d0, s0, u8x16_is_greater (lv, mask));
+ u8x16_store_unaligned (d0, dst);
+
+ if (max_len <= 16)
+ return;