#ifndef included_vector_altivec_h
#define included_vector_altivec_h
-/* Splats. */
-#define _(t,n,ti,fi,tr,fr) \
- always_inline t##x##n t##x##n##_splat (t v) \
- { return (t##x##n) __builtin_altivec_##fi ((ti) v); } \
- \
- always_inline t##x##n t##x##n##_splat_word (t##x##n x, int word_index) \
- { return (t##x##n) __builtin_altivec_##fr ((tr) x, word_index); }
-
-#define u16x8_splat(i) ((u16x8) __builtin_altivec_vspltish (i))
-#define i16x8_splat(i) ((i16x8) __builtin_altivec_vspltish (i))
-#define u32x4_splat(i) ((u32x4) __builtin_altivec_vspltisw (i))
-#define i32x4_splat(i) ((i32x4) __builtin_altivec_vspltisw (i))
-
-#define u16x8_splat_word(x,i) ((u16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
-#define i16x8_splat_word(x,i) ((i16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
-#define u32x4_splat_word(x,i) ((u32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
-#define i32x4_splat_word(x,i) ((i32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
-
-#undef _
-
/* 128 bit shifts. */
#define _(t,ti,lr,f) \
always_inline t t##_##lr (t x, t y) \
_(i16x8, i16, left, vslo)
_(u16x8, u16, right, vsro) _(i16x8, i16, right, vsro)
#undef _
- always_inline
- u32
- u32x4_get0 (u32x4 x)
-{
- u32x4_union_t y;
- y.as_u32x4 = x;
- return y.as_u32[3];
-}
/* Interleave. */
#define _(t,it,lh,f) \
return u16x8_is_equal (x, zero);
}
-always_inline u32x4
-u32x4_is_zero (u32x4 x)
-{
- u32x4 zero = { 0 };
- return u32x4_is_equal (x, zero);
-}
-
-always_inline u32
-u32x4_zero_byte_mask (u32x4 x)
-{
- u32x4 cmp = u32x4_is_zero (x);
- u32x4 tmp = { 0x000f, 0x00f0, 0x0f00, 0xf000, };
- cmp &= tmp;
- cmp |= u32x4_word_shift_right (cmp, 2);
- cmp |= u32x4_word_shift_right (cmp, 1);
- return u32x4_get0 (cmp);
-}
-
#endif /* included_vector_altivec_h */
/*