2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2009 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #ifndef included_vector_altivec_h
39 #define included_vector_altivec_h
42 #define _(t,n,ti,fi,tr,fr) \
43 always_inline t##x##n t##x##n##_splat (t v) \
44 { return (t##x##n) __builtin_altivec_##fi ((ti) v); } \
46 always_inline t##x##n t##x##n##_splat_word (t##x##n x, int word_index) \
47 { return (t##x##n) __builtin_altivec_##fr ((tr) x, word_index); }
49 #define u16x8_splat(i) ((u16x8) __builtin_altivec_vspltish (i))
50 #define i16x8_splat(i) ((i16x8) __builtin_altivec_vspltish (i))
51 #define u32x4_splat(i) ((u32x4) __builtin_altivec_vspltisw (i))
52 #define i32x4_splat(i) ((i32x4) __builtin_altivec_vspltisw (i))
54 #define u16x8_splat_word(x,i) ((u16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
55 #define i16x8_splat_word(x,i) ((i16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
56 #define u32x4_splat_word(x,i) ((u32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
57 #define i32x4_splat_word(x,i) ((i32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
62 #define _(t,ti,lr,f) \
63 always_inline t t##_##lr (t x, t y) \
64 { return (t) __builtin_altivec_##f ((ti) x, (ti) y); } \
66 always_inline t t##_i##lr (t x, int i) \
69 return t##_##lr (x, j); \
72 _ (u16x8, i16x8, shift_left, vslh);
73 _ (u32x4, i32x4, shift_left, vslw);
74 _ (u16x8, i16x8, shift_right, vsrh);
75 _ (u32x4, i32x4, shift_right, vsrw);
76 _ (i16x8, i16x8, shift_right, vsrah);
77 _ (i32x4, i32x4, shift_right, vsraw);
78 _ (u16x8, i16x8, rotate_left, vrlh);
79 _ (i16x8, i16x8, rotate_left, vrlh);
80 _ (u32x4, i32x4, rotate_left, vrlw);
81 _ (i32x4, i32x4, rotate_left, vrlw);
85 #define _(t,it,lr,f) \
86 always_inline t t##_word_shift_##lr (t x, int n_words) \
88 i32x4 n_bits = {0,0,0,n_words * BITS (it)}; \
89 return (t) __builtin_altivec_##f ((i32x4) x, n_bits); \
92 _ (u32x4, u32, left, vslo)
93 _ (i32x4, i32, left, vslo)
94 _ (u32x4, u32, right, vsro)
95 _ (i32x4, i32, right, vsro)
96 _ (u16x8, u16, left, vslo)
97 _ (i16x8, i16, left, vslo)
98 _ (u16x8, u16, right, vsro)
99 _ (i16x8, i16, right, vsro)
112 #define _(t,it,lh,f) \
113 always_inline t t##_interleave_##lh (t x, t y) \
114 { return (t) __builtin_altivec_##f ((it) x, (it) y); }
116 _ (u32x4, i32x4, lo, vmrglw)
117 _ (i32x4, i32x4, lo, vmrglw)
118 _ (u16x8, i16x8, lo, vmrglh)
119 _ (i16x8, i16x8, lo, vmrglh)
120 _ (u32x4, i32x4, hi, vmrghw)
121 _ (i32x4, i32x4, hi, vmrghw)
122 _ (u16x8, i16x8, hi, vmrghh)
123 _ (i16x8, i16x8, hi, vmrghh)
127 /* Unaligned loads/stores. */
130 always_inline void t##_store_unaligned (t x, t * a) \
131 { clib_mem_unaligned (a, t) = x; } \
132 always_inline t t##_load_unaligned (t * a) \
133 { return clib_mem_unaligned (a, t); }
147 #define _signed_binop(n,m,f,g) \
149 always_inline u##n##x##m \
150 u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
151 { return (u##n##x##m) __builtin_altivec_##g ((i##n##x##m) x, (i##n##x##m) y); } \
154 always_inline i##n##x##m \
155 i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
156 { return (i##n##x##m) __builtin_altivec_##g ((i##n##x##m) x, (i##n##x##m) y); }
158 /* Compare operations. */
159 _signed_binop (16, 8, is_equal, vcmpequh)
160 _signed_binop (32, 4, is_equal, vcmpequw)
164 always_inline u16x8 u16x8_is_zero (u16x8 x)
167 return u16x8_is_equal (x, zero);
170 always_inline u32x4 u32x4_is_zero (u32x4 x)
173 return u32x4_is_equal (x, zero);
176 always_inline u32 u32x4_zero_byte_mask (u32x4 x)
178 u32x4 cmp = u32x4_is_zero (x);
179 u32x4 tmp = { 0x000f, 0x00f0, 0x0f00, 0xf000, };
181 cmp |= u32x4_word_shift_right (cmp, 2);
182 cmp |= u32x4_word_shift_right (cmp, 1);
183 return u32x4_get0 (cmp);
186 #endif /* included_vector_altivec_h */