1 From: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2 Date: Thu, 8 Sep 2016 22:18:03 +0530
3 Subject: [PATCH 1/7] lpm: add AltiVec for ppc64
5 This patch adds ppc64le port for LPM library in DPDK.
7 Signed-off-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
8 Acked-by: Chao Zhu <chaozhu@linux.vnet.ibm.com>
10 Origin: Upstream, commit:d2cc7959342b5183ab88aed44ea011d660a91021
11 Author: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
12 Last-Update: 2016-09-21
14 app/test/test_xmmt_ops.h | 16 +++
15 config/defconfig_ppc_64-power8-linuxapp-gcc | 1 -
16 .../common/include/arch/ppc_64/rte_vect.h | 60 ++++++++
17 lib/librte_lpm/Makefile | 2 +
18 lib/librte_lpm/rte_lpm.h | 2 +
19 lib/librte_lpm/rte_lpm_altivec.h | 154 +++++++++++++++++++++
20 6 files changed, 234 insertions(+), 1 deletion(-)
21 create mode 100644 lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
22 create mode 100644 lib/librte_lpm/rte_lpm_altivec.h
24 --- a/app/test/test_xmmt_ops.h
25 +++ b/app/test/test_xmmt_ops.h
27 /* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
28 #define vect_set_epi32(i3, i2, i1, i0) _mm_set_epi32(i3, i2, i1, i0)
30 +#elif defined(RTE_ARCH_PPC_64)
32 +/* vect_* abstraction implementation using ALTIVEC */
34 +/* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
35 +#define vect_loadu_sil128(p) vec_ld(0, p)
37 +/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
38 +static inline xmm_t __attribute__((always_inline))
39 +vect_set_epi32(int i3, int i2, int i1, int i0)
41 + xmm_t data = (xmm_t){i0, i1, i2, i3};
48 #endif /* _TEST_XMMT_OPS_H_ */
49 --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
50 +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
52 CONFIG_RTE_LIBRTE_FM10K_PMD=n
54 # This following libraries are not available on Power. So they're turned off.
55 -CONFIG_RTE_LIBRTE_LPM=n
56 CONFIG_RTE_LIBRTE_ACL=n
57 CONFIG_RTE_LIBRTE_SCHED=n
58 CONFIG_RTE_LIBRTE_PORT=n
60 +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
65 + * Copyright (C) IBM Corporation 2016.
67 + * Redistribution and use in source and binary forms, with or without
68 + * modification, are permitted provided that the following conditions
71 + * * Redistributions of source code must retain the above copyright
72 + * notice, this list of conditions and the following disclaimer.
73 + * * Redistributions in binary form must reproduce the above copyright
74 + * notice, this list of conditions and the following disclaimer in
75 + * the documentation and/or other materials provided with the
77 + * * Neither the name of IBM Corporation nor the names of its
78 + * contributors may be used to endorse or promote products derived
79 + * from this software without specific prior written permission.
81 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
82 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
83 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
84 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
85 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
86 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
87 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
88 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
89 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
90 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
91 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
94 +#ifndef _RTE_VECT_PPC_64_H_
95 +#define _RTE_VECT_PPC_64_H_
103 +typedef vector signed int xmm_t;
105 +#define XMM_SIZE (sizeof(xmm_t))
106 +#define XMM_MASK (XMM_SIZE - 1)
108 +typedef union rte_xmm {
110 + uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
111 + uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
112 + uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
113 + uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
114 + double pd[XMM_SIZE / sizeof(double)];
115 +} __attribute__((aligned(16))) rte_xmm_t;
121 +#endif /* _RTE_VECT_PPC_64_H_ */
122 --- a/lib/librte_lpm/Makefile
123 +++ b/lib/librte_lpm/Makefile
125 SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include += rte_lpm_neon.h
126 else ifeq ($(CONFIG_RTE_ARCH_X86),y)
127 SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include += rte_lpm_sse.h
128 +else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
129 +SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include += rte_lpm_altivec.h
133 --- a/lib/librte_lpm/rte_lpm.h
134 +++ b/lib/librte_lpm/rte_lpm.h
137 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
138 #include "rte_lpm_neon.h"
139 +#elif defined(RTE_ARCH_PPC_64)
140 +#include "rte_lpm_altivec.h"
142 #include "rte_lpm_sse.h"
145 +++ b/lib/librte_lpm/rte_lpm_altivec.h
150 + * Copyright (C) IBM Corporation 2016.
152 + * Redistribution and use in source and binary forms, with or without
153 + * modification, are permitted provided that the following conditions
156 + * * Redistributions of source code must retain the above copyright
157 + * notice, this list of conditions and the following disclaimer.
158 + * * Redistributions in binary form must reproduce the above copyright
159 + * notice, this list of conditions and the following disclaimer in
160 + * the documentation and/or other materials provided with the
162 + * * Neither the name of IBM Corporation nor the names of its
163 + * contributors may be used to endorse or promote products derived
164 + * from this software without specific prior written permission.
166 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
167 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
168 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
169 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
170 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
171 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
172 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
173 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
174 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
175 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
176 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
179 +#ifndef _RTE_LPM_ALTIVEC_H_
180 +#define _RTE_LPM_ALTIVEC_H_
182 +#include <rte_branch_prediction.h>
183 +#include <rte_byteorder.h>
184 +#include <rte_common.h>
185 +#include <rte_vect.h>
192 +rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
195 + vector signed int i24;
198 + uint64_t idx, pt, pt2;
199 + const uint32_t *ptbl;
201 + const uint32_t mask = UINT8_MAX;
202 + const vector signed int mask8 = (xmm_t){mask, mask, mask, mask};
205 + * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
206 + * as one 64-bit value (0x0300000003000000).
208 + const uint64_t mask_xv =
209 + ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
210 + (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
213 + * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries
214 + * as one 64-bit value (0x0100000001000000).
216 + const uint64_t mask_v =
217 + ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
218 + (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32);
220 + /* get 4 indexes for tbl24[]. */
221 + i24 = vec_sr((xmm_t) ip,
222 + (vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
224 + /* extract values from tbl24[] */
225 + idx = (uint32_t)i24[0];
226 + idx = idx < (1<<24) ? idx : (1<<24)-1;
227 + ptbl = (const uint32_t *)&lpm->tbl24[idx];
230 + idx = (uint32_t) i24[1];
231 + idx = idx < (1<<24) ? idx : (1<<24)-1;
232 + ptbl = (const uint32_t *)&lpm->tbl24[idx];
235 + idx = (uint32_t) i24[2];
236 + idx = idx < (1<<24) ? idx : (1<<24)-1;
237 + ptbl = (const uint32_t *)&lpm->tbl24[idx];
240 + idx = (uint32_t) i24[3];
241 + idx = idx < (1<<24) ? idx : (1<<24)-1;
242 + ptbl = (const uint32_t *)&lpm->tbl24[idx];
245 + /* get 4 indexes for tbl8[]. */
246 + i8.x = vec_and(ip, mask8);
248 + pt = (uint64_t)tbl[0] |
249 + (uint64_t)tbl[1] << 32;
250 + pt2 = (uint64_t)tbl[2] |
251 + (uint64_t)tbl[3] << 32;
253 + /* search successfully finished for all 4 IP addresses. */
254 + if (likely((pt & mask_xv) == mask_v) &&
255 + likely((pt2 & mask_xv) == mask_v)) {
256 + *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
257 + *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
261 + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
262 + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
263 + i8.u32[0] = i8.u32[0] +
264 + (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
265 + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
268 + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
269 + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
270 + i8.u32[1] = i8.u32[1] +
271 + (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
272 + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
275 + if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
276 + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
277 + i8.u32[2] = i8.u32[2] +
278 + (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
279 + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
282 + if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
283 + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
284 + i8.u32[3] = i8.u32[3] +
285 + (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
286 + ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
290 + hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv;
291 + hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv;
292 + hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv;
293 + hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv;
300 +#endif /* _RTE_LPM_ALTIVEC_H_ */