2 * Copyright (c) 2016 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <rte_random.h>
21 #include "dpdk_legacy.h"
24 * IPv6 destination lookup callback.
27 lpm6_dst_lookup(void *data, const struct in6_addr *addr,
32 struct netbe_lcore *lc;
37 p = (uintptr_t)addr->s6_addr;
39 rc = rte_lpm6_lookup(lc->lpm6, (uint8_t *)p, &idx);
42 rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
43 offsetof(struct tle_dest, hdr));
49 create_context(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm)
53 struct tle_ctx_param cprm;
55 if (lc->ctx == NULL) {
56 sid = rte_lcore_to_socket_id(lc->id);
58 rc = lcore_lpm_init(lc);
64 cprm.proto = lc->proto;
65 cprm.lookup4 = lpm4_dst_lookup;
66 cprm.lookup4_data = lc;
67 cprm.lookup6 = lpm6_dst_lookup;
68 cprm.lookup6_data = lc;
69 if (cprm.secret_key.u64[0] == 0 &&
70 cprm.secret_key.u64[1] == 0) {
71 cprm.secret_key.u64[0] = rte_rand();
72 cprm.secret_key.u64[1] = rte_rand();
75 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
78 lc->ftbl = rte_ip_frag_table_create(cprm.max_streams,
79 FRAG_TBL_BUCKET_ENTRIES, cprm.max_streams,
82 RTE_LOG(NOTICE, USER1, "%s(lcore=%u): frag_tbl=%p;\n",
83 __func__, lc->id, lc->ftbl);
85 lc->ctx = tle_ctx_create(&cprm);
87 RTE_LOG(NOTICE, USER1, "%s(lcore=%u): proto=%s, ctx=%p;\n",
88 __func__, lc->id, proto_name[lc->proto], lc->ctx);
90 if (lc->ctx == NULL || lc->ftbl == NULL)
98 * BE lcore setup routine.
101 lcore_init(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm,
102 const uint32_t prtqid, const uint16_t *bl_ports, uint32_t nb_bl_ports)
105 struct tle_dev_param dprm;
107 rc = create_context(lc, ctx_prm);
109 if (rc == 0 && lc->ctx != NULL) {
110 memset(&dprm, 0, sizeof(dprm));
111 dprm.rx_offload = lc->prtq[prtqid].port.rx_offload;
112 dprm.tx_offload = lc->prtq[prtqid].port.tx_offload;
113 dprm.local_addr4.s_addr = lc->prtq[prtqid].port.ipv4;
114 memcpy(&dprm.local_addr6, &lc->prtq[prtqid].port.ipv6,
115 sizeof(lc->prtq[prtqid].port.ipv6));
116 dprm.bl4.nb_port = nb_bl_ports;
117 dprm.bl4.port = bl_ports;
118 dprm.bl6.nb_port = nb_bl_ports;
119 dprm.bl6.port = bl_ports;
121 lc->prtq[prtqid].dev = tle_add_dev(lc->ctx, &dprm);
123 RTE_LOG(NOTICE, USER1,
124 "%s(lcore=%u, port=%u, qid=%u), dev: %p\n",
125 __func__, lc->id, lc->prtq[prtqid].port.id,
126 lc->prtq[prtqid].rxqid, lc->prtq[prtqid].dev);
128 if (lc->prtq[prtqid].dev == NULL)
133 "%s(lcore=%u) failed with error code: %d\n",
134 __func__, lc->id, rc);
135 tle_ctx_destroy(lc->ctx);
136 rte_ip_frag_table_destroy(lc->ftbl);
137 rte_lpm_free(lc->lpm4);
138 rte_lpm6_free(lc->lpm6);
139 rte_free(lc->prtq[prtqid].port.lcore_id);
140 lc->prtq[prtqid].port.nb_lcore = 0;
151 create_blocklist(const struct netbe_port *beprt, uint16_t *bl_ports,
154 uint32_t i, j, qid, align_nb_q;
156 align_nb_q = rte_align32pow2(beprt->nb_lcore);
157 for (i = 0, j = 0; i < (UINT16_MAX + 1); i++) {
158 qid = (i % align_nb_q) % beprt->nb_lcore;
167 netbe_lcore_init(struct netbe_cfg *cfg, const struct tle_ctx_param *ctx_prm)
170 uint32_t i, j, nb_bl_ports = 0, sz;
171 struct netbe_lcore *lc;
172 static uint16_t *bl_ports;
174 /* Create the context and attached queue for each lcore. */
176 sz = sizeof(uint16_t) * UINT16_MAX;
177 bl_ports = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
178 for (i = 0; i < cfg->cpu_num; i++) {
180 for (j = 0; j < lc->prtq_num; j++) {
181 memset((uint8_t *)bl_ports, 0, sz);
182 /* create list of blocked ports based on q */
183 nb_bl_ports = create_blocklist(&lc->prtq[j].port,
184 bl_ports, lc->prtq[j].rxqid);
185 RTE_LOG(NOTICE, USER1,
186 "lc=%u, q=%u, nb_bl_ports=%u\n",
187 lc->id, lc->prtq[j].rxqid, nb_bl_ports);
189 rc = lcore_init(lc, ctx_prm, j, bl_ports, nb_bl_ports);
192 "%s: failed with error code: %d\n",
205 netfe_lcore_cmp(const void *s1, const void *s2)
207 const struct netfe_stream_prm *p1, *p2;
211 return p1->lcore - p2->lcore;
215 netbe_find6(const struct in6_addr *laddr, uint16_t lport,
216 const struct in6_addr *raddr, uint32_t belc)
220 struct netbe_lcore *bc;
222 /* we have exactly one BE, use it for all traffic */
223 if (becfg.cpu_num == 1)
226 /* search by provided be_lcore */
227 if (belc != LCORE_ID_ANY) {
228 for (i = 0; i != becfg.cpu_num; i++) {
233 RTE_LOG(NOTICE, USER1, "%s: no stream with belcore=%u\n",
238 /* search by local address */
239 if (memcmp(laddr, &in6addr_any, sizeof(*laddr)) != 0) {
240 for (i = 0; i != becfg.cpu_num; i++) {
242 /* search by queue for the local port */
243 for (j = 0; j != bc->prtq_num; j++) {
244 if (memcmp(laddr, &bc->prtq[j].port.ipv6,
245 sizeof(*laddr)) == 0) {
250 if (verify_queue_for_port(bc->prtq + j,
258 /* search by remote address */
259 if (memcmp(raddr, &in6addr_any, sizeof(*raddr)) == 0) {
260 for (i = 0; i != becfg.cpu_num; i++) {
262 if (rte_lpm6_lookup(bc->lpm6,
263 (uint8_t *)(uintptr_t)raddr->s6_addr,
269 /* search by queue for the local port */
270 for (j = 0; j != bc->prtq_num; j++)
271 if (verify_queue_for_port(bc->prtq + j,
282 netbe_find(const struct sockaddr_storage *la,
283 const struct sockaddr_storage *ra,
286 const struct sockaddr_in *l4, *r4;
287 const struct sockaddr_in6 *l6, *r6;
289 if (la->ss_family == AF_INET) {
290 l4 = (const struct sockaddr_in *)la;
291 r4 = (const struct sockaddr_in *)ra;
292 return netbe_find4(&l4->sin_addr, ntohs(l4->sin_port),
293 &r4->sin_addr, belc);
294 } else if (la->ss_family == AF_INET6) {
295 l6 = (const struct sockaddr_in6 *)la;
296 r6 = (const struct sockaddr_in6 *)ra;
297 return netbe_find6(&l6->sin6_addr, ntohs(l6->sin6_port),
298 &r6->sin6_addr, belc);
304 netfe_sprm_flll_be(struct netfe_sprm *sp, uint32_t line, uint32_t belc)
308 bidx = netbe_find(&sp->local_addr, &sp->remote_addr, belc);
311 RTE_LOG(ERR, USER1, "%s(line=%u): no BE for that stream\n",
319 /* start front-end processing. */
321 netfe_lcore_fill(struct lcore_prm prm[RTE_MAX_LCORE],
322 struct netfe_lcore_prm *lprm)
325 uint32_t i, j, lc, ln;
326 struct netfe_stream_prm *s;
328 /* determine on what BE each stream should be open. */
329 for (i = 0; i != lprm->nb_streams; i++) {
330 s = lprm->stream + i;
333 if (netfe_sprm_flll_be(&s->sprm, ln, belc) != 0 ||
335 netfe_sprm_flll_be(&s->fprm, ln, belc) != 0))
339 /* group all fe parameters by lcore. */
341 qsort(lprm->stream, lprm->nb_streams, sizeof(lprm->stream[0]),
344 for (i = 0; i != lprm->nb_streams; i = j) {
346 lc = lprm->stream[i].lcore;
347 ln = lprm->stream[i].line;
349 if (rte_lcore_is_enabled(lc) == 0) {
351 "%s(line=%u): lcore %u is not enabled\n",
356 if (rte_get_master_lcore() != lc &&
357 rte_eal_get_lcore_state(lc) == RUNNING) {
359 "%s(line=%u): lcore %u already in use\n",
364 for (j = i + 1; j != lprm->nb_streams &&
365 lc == lprm->stream[j].lcore;
369 prm[lc].fe.max_streams = lprm->max_streams;
370 prm[lc].fe.nb_streams = j - i;
371 prm[lc].fe.stream = lprm->stream + i;
377 #endif /* LCORE_H_ */