2 * Copyright (c) 2016 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <rte_malloc.h>
18 #include <rte_errno.h>
19 #include <rte_ethdev.h>
24 #include <halfsiphash.h>
26 #define LPORT_START 0x8000
27 #define LPORT_END MAX_PORT_NUM
29 #define LPORT_START_BLK PORT_BLK(LPORT_START)
30 #define LPORT_END_BLK PORT_BLK(LPORT_END)
32 const struct in6_addr tle_ipv6_any = IN6ADDR_ANY_INIT;
33 const struct in6_addr tle_ipv6_none = {
36 UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX
41 struct stream_ops tle_stream_ops[TLE_PROTO_NUM] = {};
44 check_dev_prm(const struct tle_dev_param *dev_prm)
46 /* no valid IPv4/IPv6 addresses provided. */
47 if (dev_prm->local_addr4.s_addr == INADDR_ANY &&
48 memcmp(&dev_prm->local_addr6, &tle_ipv6_any,
49 sizeof(tle_ipv6_any)) == 0)
52 if (dev_prm->bl4.nb_port > UINT16_MAX ||
53 (dev_prm->bl4.nb_port != 0 &&
54 dev_prm->bl4.port == NULL))
57 if (dev_prm->bl6.nb_port > UINT16_MAX ||
58 (dev_prm->bl6.nb_port != 0 &&
59 dev_prm->bl6.port == NULL))
66 check_ctx_prm(const struct tle_ctx_param *prm)
68 if (prm->proto >= TLE_PROTO_NUM)
70 if (prm->hash_alg >= TLE_HASH_NUM)
76 tle_ctx_create(const struct tle_ctx_param *ctx_prm)
83 if (ctx_prm == NULL || check_ctx_prm(ctx_prm) != 0) {
89 ctx = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE,
92 UDP_LOG(ERR, "allocation of %zu bytes for new ctx "
93 "on socket %d failed\n",
94 sz, ctx_prm->socket_id);
100 rc = tle_stream_ops[ctx_prm->proto].init_streams(ctx);
102 UDP_LOG(ERR, "init_streams(ctx=%p, proto=%u) failed "
103 "with error code: %d;\n",
104 ctx, ctx_prm->proto, rc);
105 tle_ctx_destroy(ctx);
110 for (i = 0; i != RTE_DIM(ctx->use); i++)
111 tle_pbm_init(ctx->use + i, LPORT_START_BLK);
113 ctx->streams.nb_free = ctx->prm.max_streams;
115 /* Initialization of siphash state is done here to speed up the
116 * fastpath processing.
118 if (ctx->prm.hash_alg == TLE_SIPHASH)
119 siphash_initialization(&ctx->prm.secret_key,
120 &ctx->prm.secret_key);
125 tle_ctx_destroy(struct tle_ctx *ctx)
134 for (i = 0; i != RTE_DIM(ctx->dev); i++)
135 tle_del_dev(ctx->dev + i);
137 tle_stream_ops[ctx->prm.proto].fini_streams(ctx);
142 tle_ctx_invalidate(struct tle_ctx *ctx)
148 fill_pbm(struct tle_pbm *pbm, const struct tle_bl_port *blp)
152 for (i = 0; i != blp->nb_port; i++)
153 tle_pbm_set(pbm, blp->port[i]);
157 init_dev_proto(struct tle_dev *dev, uint32_t idx, int32_t socket_id,
158 const struct tle_bl_port *blp)
162 sz = sizeof(*dev->dp[idx]);
163 dev->dp[idx] = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE,
166 if (dev->dp[idx] == NULL) {
167 UDP_LOG(ERR, "allocation of %zu bytes on "
168 "socket %d for %u-th device failed\n",
173 tle_pbm_init(&dev->dp[idx]->use, LPORT_START_BLK);
174 fill_pbm(&dev->dp[idx]->use, blp);
178 static struct tle_dev *
179 find_free_dev(struct tle_ctx *ctx)
183 if (ctx->nb_dev < RTE_DIM(ctx->dev)) {
184 for (i = 0; i != RTE_DIM(ctx->dev); i++) {
185 if (ctx->dev[i].ctx != ctx)
195 tle_add_dev(struct tle_ctx *ctx, const struct tle_dev_param *dev_prm)
200 if (ctx == NULL || dev_prm == NULL || check_dev_prm(dev_prm) != 0) {
205 dev = find_free_dev(ctx);
210 /* device can handle IPv4 traffic */
211 if (dev_prm->local_addr4.s_addr != INADDR_ANY) {
212 rc = init_dev_proto(dev, TLE_V4, ctx->prm.socket_id,
215 fill_pbm(&ctx->use[TLE_V4], &dev_prm->bl4);
218 /* device can handle IPv6 traffic */
219 if (rc == 0 && memcmp(&dev_prm->local_addr6, &tle_ipv6_any,
220 sizeof(tle_ipv6_any)) != 0) {
221 rc = init_dev_proto(dev, TLE_V6, ctx->prm.socket_id,
224 fill_pbm(&ctx->use[TLE_V6], &dev_prm->bl6);
228 /* cleanup and return an error. */
229 rte_free(dev->dp[TLE_V4]);
230 rte_free(dev->dp[TLE_V6]);
236 if (dev_prm->local_addr4.s_addr != INADDR_ANY &&
237 (dev_prm->rx_offload & DEV_RX_OFFLOAD_IPV4_CKSUM) == 0)
238 dev->rx.ol_flags[TLE_V4] |= PKT_RX_IP_CKSUM_BAD;
240 if (((dev_prm->rx_offload & DEV_RX_OFFLOAD_UDP_CKSUM) == 0 &&
241 ctx->prm.proto == TLE_PROTO_UDP) ||
242 ((dev_prm->rx_offload &
243 DEV_RX_OFFLOAD_TCP_CKSUM) == 0 &&
244 ctx->prm.proto == TLE_PROTO_TCP)) {
245 dev->rx.ol_flags[TLE_V4] |= PKT_RX_L4_CKSUM_BAD;
246 dev->rx.ol_flags[TLE_V6] |= PKT_RX_L4_CKSUM_BAD;
250 tle_dring_reset(&dev->tx.dr);
252 if ((dev_prm->tx_offload & DEV_TX_OFFLOAD_UDP_CKSUM) != 0 &&
253 ctx->prm.proto == TLE_PROTO_UDP) {
254 dev->tx.ol_flags[TLE_V4] |= PKT_TX_IPV4 | PKT_TX_UDP_CKSUM;
255 dev->tx.ol_flags[TLE_V6] |= PKT_TX_IPV6 | PKT_TX_UDP_CKSUM;
256 } else if ((dev_prm->tx_offload & DEV_TX_OFFLOAD_TCP_CKSUM) != 0 &&
257 ctx->prm.proto == TLE_PROTO_TCP) {
258 dev->tx.ol_flags[TLE_V4] |= PKT_TX_IPV4 | PKT_TX_TCP_CKSUM;
259 dev->tx.ol_flags[TLE_V6] |= PKT_TX_IPV6 | PKT_TX_TCP_CKSUM;
262 if ((dev_prm->tx_offload & DEV_TX_OFFLOAD_IPV4_CKSUM) != 0)
263 dev->tx.ol_flags[TLE_V4] |= PKT_TX_IPV4 | PKT_TX_IP_CKSUM;
273 empty_dring(struct tle_dring *dr, uint32_t proto)
276 struct tle_stream *s;
277 struct rte_mbuf *pkt[MAX_PKT_BURST];
278 struct tle_drb *drb[MAX_PKT_BURST];
282 n = tle_dring_sc_dequeue(dr, (const void **)(uintptr_t)pkt,
283 RTE_DIM(pkt), drb, &k);
286 for (i = 0; i != n; i++)
287 rte_pktmbuf_free(pkt[i]);
289 for (i = 0; i != k; i++) {
291 tle_stream_ops[proto].free_drbs(s, drb + i, 1);
297 tle_del_dev(struct tle_dev *dev)
302 if (dev == NULL || dev->ctx == NULL)
308 if (p >= RTE_DIM(ctx->dev) ||
309 (dev->dp[TLE_V4] == NULL &&
310 dev->dp[TLE_V6] == NULL))
313 /* emtpy TX queues. */
314 empty_dring(&dev->tx.dr, ctx->prm.proto);
316 rte_free(dev->dp[TLE_V4]);
317 rte_free(dev->dp[TLE_V6]);
318 memset(dev, 0, sizeof(*dev));
323 static struct tle_dev *
324 find_ipv4_dev(struct tle_ctx *ctx, const struct in_addr *addr)
328 for (i = 0; i != RTE_DIM(ctx->dev); i++) {
329 if (ctx->dev[i].prm.local_addr4.s_addr == addr->s_addr &&
330 ctx->dev[i].dp[TLE_V4] != NULL)
337 static struct tle_dev *
338 find_ipv6_dev(struct tle_ctx *ctx, const struct in6_addr *addr)
342 for (i = 0; i != RTE_DIM(ctx->dev); i++) {
343 if (memcmp(&ctx->dev[i].prm.local_addr6, addr,
344 sizeof(*addr)) == 0 &&
345 ctx->dev[i].dp[TLE_V6] != NULL)
353 stream_fill_dev(struct tle_ctx *ctx, struct tle_stream *s,
354 const struct sockaddr *addr)
358 const struct sockaddr_in *lin4;
359 const struct sockaddr_in6 *lin6;
360 uint32_t i, p, sp, t;
362 if (addr->sa_family == AF_INET) {
363 lin4 = (const struct sockaddr_in *)addr;
366 } else if (addr->sa_family == AF_INET6) {
367 lin6 = (const struct sockaddr_in6 *)addr;
375 /* if local address is not wildcard, find device it belongs to. */
376 if (t == TLE_V4 && lin4->sin_addr.s_addr != INADDR_ANY) {
377 dev = find_ipv4_dev(ctx, &lin4->sin_addr);
380 } else if (t == TLE_V6 && memcmp(&tle_ipv6_any, &lin6->sin6_addr,
381 sizeof(tle_ipv6_any)) != 0) {
382 dev = find_ipv6_dev(ctx, &lin6->sin6_addr);
389 pbm = &dev->dp[t]->use;
393 /* try to acquire local port number. */
395 p = tle_pbm_find_range(pbm, pbm->blk, LPORT_END_BLK);
396 if (p == 0 && pbm->blk > LPORT_START_BLK)
397 p = tle_pbm_find_range(pbm, LPORT_START_BLK, pbm->blk);
398 } else if (tle_pbm_check(pbm, p) != 0)
404 /* fill socket's dst port and type */
410 /* mark port as in-use */
412 tle_pbm_set(&ctx->use[t], p);
415 dev->dp[t]->streams[sp] = s;
417 for (i = 0; i != RTE_DIM(ctx->dev); i++) {
418 if (ctx->dev[i].dp[t] != NULL) {
419 tle_pbm_set(&ctx->dev[i].dp[t]->use, p);
420 ctx->dev[i].dp[t]->streams[sp] = s;
429 stream_clear_dev(struct tle_ctx *ctx, const struct tle_stream *s)
432 uint32_t i, p, sp, t;
438 /* if local address is not wildcard, find device it belongs to. */
439 if (t == TLE_V4 && s->ipv4.addr.dst != INADDR_ANY) {
440 dev = find_ipv4_dev(ctx,
441 (const struct in_addr *)&s->ipv4.addr.dst);
444 } else if (t == TLE_V6 && memcmp(&tle_ipv6_any, &s->ipv6.addr.dst,
445 sizeof(tle_ipv6_any)) != 0) {
446 dev = find_ipv6_dev(ctx,
447 (const struct in6_addr *)&s->ipv6.addr.dst);
453 tle_pbm_clear(&ctx->use[t], p);
455 if (dev->dp[t]->streams[sp] == s) {
456 tle_pbm_clear(&dev->dp[t]->use, p);
457 dev->dp[t]->streams[sp] = NULL;
460 for (i = 0; i != RTE_DIM(ctx->dev); i++) {
461 if (ctx->dev[i].dp[t] != NULL &&
462 ctx->dev[i].dp[t]->streams[sp] == s) {
463 tle_pbm_clear(&ctx->dev[i].dp[t]->use, p);
464 ctx->dev[i].dp[t]->streams[sp] = NULL;
473 fill_ipv4_am(const struct sockaddr_in *in, uint32_t *addr, uint32_t *mask)
475 *addr = in->sin_addr.s_addr;
476 *mask = (*addr == INADDR_ANY) ? INADDR_ANY : INADDR_NONE;
480 fill_ipv6_am(const struct sockaddr_in6 *in, rte_xmm_t *addr, rte_xmm_t *mask)
482 const struct in6_addr *pm;
484 memcpy(addr, &in->sin6_addr, sizeof(*addr));
485 if (memcmp(&tle_ipv6_any, addr, sizeof(*addr)) == 0)
490 memcpy(mask, pm, sizeof(*mask));
494 stream_fill_ctx(struct tle_ctx *ctx, struct tle_stream *s,
495 const struct sockaddr *laddr, const struct sockaddr *raddr)
497 const struct sockaddr_in *rin;
500 /* setup ports and port mask fields (except dst port). */
501 rin = (const struct sockaddr_in *)raddr;
502 s->port.src = rin->sin_port;
503 s->pmsk.src = (s->port.src == 0) ? 0 : UINT16_MAX;
504 s->pmsk.dst = UINT16_MAX;
506 /* setup src and dst addresses. */
507 if (laddr->sa_family == AF_INET) {
508 fill_ipv4_am((const struct sockaddr_in *)laddr,
509 &s->ipv4.addr.dst, &s->ipv4.mask.dst);
510 fill_ipv4_am((const struct sockaddr_in *)raddr,
511 &s->ipv4.addr.src, &s->ipv4.mask.src);
512 } else if (laddr->sa_family == AF_INET6) {
513 fill_ipv6_am((const struct sockaddr_in6 *)laddr,
514 &s->ipv6.addr.dst, &s->ipv6.mask.dst);
515 fill_ipv6_am((const struct sockaddr_in6 *)raddr,
516 &s->ipv6.addr.src, &s->ipv6.mask.src);
519 rte_spinlock_lock(&ctx->dev_lock);
520 rc = stream_fill_dev(ctx, s, laddr);
521 rte_spinlock_unlock(&ctx->dev_lock);
526 /* free stream's destination port */
528 stream_clear_ctx(struct tle_ctx *ctx, struct tle_stream *s)
532 rte_spinlock_lock(&ctx->dev_lock);
533 rc = stream_clear_dev(ctx, s);
534 rte_spinlock_unlock(&ctx->dev_lock);