* limitations under the License.
*/
-#ifndef MAIN_DPDK_LEGACY_H_
-#define MAIN_DPDK_LEGACY_H_
+#ifndef DPDK_LEGACY_H_
+#define DPDK_LEGACY_H_
-#include "dpdk_version.h"
+#include <rte_version.h>
+
+#if RTE_VERSION_NUM(17, 5, 0, 0) <= RTE_VERSION
+#ifndef DPDK_VERSION_GE_1705
+#define DPDK_VERSION_GE_1705
+#endif
+#endif
/*
- * UDP IPv4 destination lookup callback.
+ * IPv6 destination lookup callback.
*/
static int
-lpm4_dst_lookup(void *data, const struct in_addr *addr,
+lpm6_dst_lookup(void *data, const struct in6_addr *addr,
struct tle_dest *res)
{
int32_t rc;
-#ifdef DPDK_VERSION_GE_1604
+#ifdef DPDK_VERSION_GE_1705
uint32_t idx;
#else
uint8_t idx;
#endif
struct netbe_lcore *lc;
struct tle_dest *dst;
+ uintptr_t p;
lc = data;
+ p = (uintptr_t)addr->s6_addr;
- rc = rte_lpm_lookup(lc->lpm4, rte_be_to_cpu_32(addr->s_addr), &idx);
+ rc = rte_lpm6_lookup(lc->lpm6, (uint8_t *)p, &idx);
if (rc == 0) {
- dst = &lc->dst4[idx];
+ dst = &lc->dst6[idx];
rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
offsetof(struct tle_dest, hdr));
}
}
static int
-lcore_lpm_init(struct netbe_lcore *lc)
-{
- int32_t sid;
- char str[RTE_LPM_NAMESIZE];
-#ifdef DPDK_VERSION_GE_1604
- const struct rte_lpm_config lpm4_cfg = {
- .max_rules = MAX_RULES,
- .number_tbl8s = MAX_TBL8,
- };
-#endif
- const struct rte_lpm6_config lpm6_cfg = {
- .max_rules = MAX_RULES,
- .number_tbl8s = MAX_TBL8,
- };
-
- sid = rte_lcore_to_socket_id(lc->id);
-
- snprintf(str, sizeof(str), "LPM4%u\n", lc->id);
-#ifdef DPDK_VERSION_GE_1604
- lc->lpm4 = rte_lpm_create(str, sid, &lpm4_cfg);
-#else
- lc->lpm4 = rte_lpm_create(str, sid, MAX_RULES, 0);
-#endif
- RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm4=%p;\n",
- __func__, lc->id, lc->lpm4);
- if (lc->lpm4 == NULL)
- return -ENOMEM;
-
- snprintf(str, sizeof(str), "LPM6%u\n", lc->id);
- lc->lpm6 = rte_lpm6_create(str, sid, &lpm6_cfg);
- RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm6=%p;\n",
- __func__, lc->id, lc->lpm6);
- if (lc->lpm6 == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Helper functions, finds BE by given local and remote addresses.
- */
-static int
-netbe_find4(const struct in_addr *laddr, const uint16_t lport,
- const struct in_addr *raddr, const uint32_t belc)
+netbe_find6(const struct in6_addr *laddr, uint16_t lport,
+ const struct in6_addr *raddr, uint32_t belc)
{
uint32_t i, j;
-#ifdef DPDK_VERSION_GE_1604
+#ifdef DPDK_VERSION_GE_1705
uint32_t idx;
#else
uint8_t idx;
if (belc == bc->id)
return i;
}
- RTE_LOG(NOTICE, USER1, "%s: no stream with be_lcore=%u\n",
+ RTE_LOG(NOTICE, USER1, "%s: no stream with belcore=%u\n",
__func__, belc);
return -ENOENT;
}
/* search by local address */
- if (laddr->s_addr != INADDR_ANY) {
+ if (memcmp(laddr, &in6addr_any, sizeof(*laddr)) != 0) {
for (i = 0; i != becfg.cpu_num; i++) {
bc = becfg.cpu + i;
/* search by queue for the local port */
for (j = 0; j != bc->prtq_num; j++) {
- if (laddr->s_addr == bc->prtq[j].port.ipv4) {
+ if (memcmp(laddr, &bc->prtq[j].port.ipv6,
+ sizeof(*laddr)) == 0) {
if (lport == 0)
return i;
}
/* search by remote address */
- if (raddr->s_addr != INADDR_ANY) {
+ if (memcmp(raddr, &in6addr_any, sizeof(*raddr)) == 0) {
for (i = 0; i != becfg.cpu_num; i++) {
bc = becfg.cpu + i;
- if (rte_lpm_lookup(bc->lpm4,
- rte_be_to_cpu_32(raddr->s_addr),
+ if (rte_lpm6_lookup(bc->lpm6,
+ (uint8_t *)(uintptr_t)raddr->s6_addr,
&idx) == 0) {
if (lport == 0)
return -ENOENT;
}
-#endif /* MAIN_DPDK_LEGACY_H_ */
+#endif /* DPDK_LEGACY_H_ */
+++ /dev/null
-/*
- * Copyright (c) 2016 Intel Corporation.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DPDK_VERSION_H_
-#define DPDK_VERSION_H_
-
-#include <rte_version.h>
-
-#ifdef RTE_VER_MAJOR
-#if RTE_VER_MAJOR >= 16 && RTE_VER_MINOR >= 4
-#define DPDK_VERSION_GE_1604
-#endif
-#elif defined(RTE_VER_YEAR)
-#if RTE_VERSION_NUM(16, 4, 0, 0) <= RTE_VERSION
-#define DPDK_VERSION_GE_1604
-#endif
-#else
-#error "RTE_VER_MAJOR and RTE_VER_YEAR are undefined!"
-#endif
-
-#endif /* DPDK_VERSION_H_ */
#include "dpdk_legacy.h"
/*
- * IPv6 destination lookup callback.
+ * IPv4 destination lookup callback.
*/
static int
-lpm6_dst_lookup(void *data, const struct in6_addr *addr,
+lpm4_dst_lookup(void *data, const struct in_addr *addr,
struct tle_dest *res)
{
int32_t rc;
- uint8_t idx;
+ uint32_t idx;
struct netbe_lcore *lc;
struct tle_dest *dst;
- uintptr_t p;
lc = data;
- p = (uintptr_t)addr->s6_addr;
- rc = rte_lpm6_lookup(lc->lpm6, (uint8_t *)p, &idx);
+ rc = rte_lpm_lookup(lc->lpm4, rte_be_to_cpu_32(addr->s_addr), &idx);
if (rc == 0) {
- dst = &lc->dst6[idx];
+ dst = &lc->dst4[idx];
rte_memcpy(res, dst, dst->l2_len + dst->l3_len +
offsetof(struct tle_dest, hdr));
}
return rc;
}
+static int
+lcore_lpm_init(struct netbe_lcore *lc)
+{
+ int32_t sid;
+ char str[RTE_LPM_NAMESIZE];
+ const struct rte_lpm_config lpm4_cfg = {
+ .max_rules = MAX_RULES,
+ .number_tbl8s = MAX_TBL8,
+ };
+ const struct rte_lpm6_config lpm6_cfg = {
+ .max_rules = MAX_RULES,
+ .number_tbl8s = MAX_TBL8,
+ };
+
+ sid = rte_lcore_to_socket_id(lc->id);
+
+ snprintf(str, sizeof(str), "LPM4%u\n", lc->id);
+ lc->lpm4 = rte_lpm_create(str, sid, &lpm4_cfg);
+ RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm4=%p;\n",
+ __func__, lc->id, lc->lpm4);
+ if (lc->lpm4 == NULL)
+ return -ENOMEM;
+
+ snprintf(str, sizeof(str), "LPM6%u\n", lc->id);
+ lc->lpm6 = rte_lpm6_create(str, sid, &lpm6_cfg);
+ RTE_LOG(NOTICE, USER1, "%s(lcore=%u): lpm6=%p;\n",
+ __func__, lc->id, lc->lpm6);
+ if (lc->lpm6 == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Helper functions, finds BE by given local and remote addresses.
+ */
+static int
+netbe_find4(const struct in_addr *laddr, const uint16_t lport,
+ const struct in_addr *raddr, const uint32_t belc)
+{
+ uint32_t i, j;
+ uint32_t idx;
+ struct netbe_lcore *bc;
+
+ /* we have exactly one BE, use it for all traffic */
+ if (becfg.cpu_num == 1)
+ return 0;
+
+ /* search by provided be_lcore */
+ if (belc != LCORE_ID_ANY) {
+ for (i = 0; i != becfg.cpu_num; i++) {
+ bc = becfg.cpu + i;
+ if (belc == bc->id)
+ return i;
+ }
+ RTE_LOG(NOTICE, USER1, "%s: no stream with be_lcore=%u\n",
+ __func__, belc);
+ return -ENOENT;
+ }
+
+ /* search by local address */
+ if (laddr->s_addr != INADDR_ANY) {
+ for (i = 0; i != becfg.cpu_num; i++) {
+ bc = becfg.cpu + i;
+ /* search by queue for the local port */
+ for (j = 0; j != bc->prtq_num; j++) {
+ if (laddr->s_addr == bc->prtq[j].port.ipv4) {
+
+ if (lport == 0)
+ return i;
+
+ if (verify_queue_for_port(bc->prtq + j,
+ lport) != 0)
+ return i;
+ }
+ }
+ }
+ }
+
+ /* search by remote address */
+ if (raddr->s_addr != INADDR_ANY) {
+ for (i = 0; i != becfg.cpu_num; i++) {
+ bc = becfg.cpu + i;
+ if (rte_lpm_lookup(bc->lpm4,
+ rte_be_to_cpu_32(raddr->s_addr),
+ &idx) == 0) {
+
+ if (lport == 0)
+ return i;
+
+ /* search by queue for the local port */
+ for (j = 0; j != bc->prtq_num; j++)
+ if (verify_queue_for_port(bc->prtq + j,
+ lport) != 0)
+ return i;
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+
static int
create_context(struct netbe_lcore *lc, const struct tle_ctx_param *ctx_prm)
{
return p1->lcore - p2->lcore;
}
-static int
-netbe_find6(const struct in6_addr *laddr, uint16_t lport,
- const struct in6_addr *raddr, uint32_t belc)
-{
- uint32_t i, j;
- uint8_t idx;
- struct netbe_lcore *bc;
-
- /* we have exactly one BE, use it for all traffic */
- if (becfg.cpu_num == 1)
- return 0;
-
- /* search by provided be_lcore */
- if (belc != LCORE_ID_ANY) {
- for (i = 0; i != becfg.cpu_num; i++) {
- bc = becfg.cpu + i;
- if (belc == bc->id)
- return i;
- }
- RTE_LOG(NOTICE, USER1, "%s: no stream with belcore=%u\n",
- __func__, belc);
- return -ENOENT;
- }
-
- /* search by local address */
- if (memcmp(laddr, &in6addr_any, sizeof(*laddr)) != 0) {
- for (i = 0; i != becfg.cpu_num; i++) {
- bc = becfg.cpu + i;
- /* search by queue for the local port */
- for (j = 0; j != bc->prtq_num; j++) {
- if (memcmp(laddr, &bc->prtq[j].port.ipv6,
- sizeof(*laddr)) == 0) {
-
- if (lport == 0)
- return i;
-
- if (verify_queue_for_port(bc->prtq + j,
- lport) != 0)
- return i;
- }
- }
- }
- }
-
- /* search by remote address */
- if (memcmp(raddr, &in6addr_any, sizeof(*raddr)) == 0) {
- for (i = 0; i != becfg.cpu_num; i++) {
- bc = becfg.cpu + i;
- if (rte_lpm6_lookup(bc->lpm6,
- (uint8_t *)(uintptr_t)raddr->s6_addr,
- &idx) == 0) {
-
- if (lport == 0)
- return i;
-
- /* search by queue for the local port */
- for (j = 0; j != bc->prtq_num; j++)
- if (verify_queue_for_port(bc->prtq + j,
- lport) != 0)
- return i;
- }
- }
- }
-
- return -ENOENT;
-}
-
static int
netbe_find(const struct sockaddr_storage *la,
const struct sockaddr_storage *ra,
#include "netbe.h"
+struct ptype2cb {
+ uint32_t mask;
+ const char *name;
+ rte_rx_callback_fn fn;
+};
+
+enum {
+ ETHER_PTYPE = 0x1,
+ IPV4_PTYPE = 0x2,
+ IPV4_EXT_PTYPE = 0x4,
+ IPV6_PTYPE = 0x8,
+ IPV6_EXT_PTYPE = 0x10,
+ TCP_PTYPE = 0x20,
+ UDP_PTYPE = 0x40,
+};
+
static inline uint64_t
_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
uint64_t ol3, uint64_t ol2)
return compress_pkt_list(pkt, nb_pkts, x);
}
-#include "pkt_dpdk_legacy.h"
+static uint32_t
+get_ptypes(const struct netbe_port *uprt)
+{
+ uint32_t smask;
+ int32_t i, rc;
+ const uint32_t pmask = RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK;
+
+ smask = 0;
+ rc = rte_eth_dev_get_supported_ptypes(uprt->id, pmask, NULL, 0);
+ if (rc < 0) {
+ RTE_LOG(ERR, USER1,
+ "%s(port=%u) failed to get supported ptypes;\n",
+ __func__, uprt->id);
+ return smask;
+ }
+
+ uint32_t ptype[rc];
+ rc = rte_eth_dev_get_supported_ptypes(uprt->id, pmask, ptype, rc);
+
+ for (i = 0; i != rc; i++) {
+ switch (ptype[i]) {
+ case RTE_PTYPE_L2_ETHER:
+ smask |= ETHER_PTYPE;
+ break;
+ case RTE_PTYPE_L3_IPV4:
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ smask |= IPV4_PTYPE;
+ break;
+ case RTE_PTYPE_L3_IPV4_EXT:
+ smask |= IPV4_EXT_PTYPE;
+ break;
+ case RTE_PTYPE_L3_IPV6:
+ case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
+ smask |= IPV6_PTYPE;
+ break;
+ case RTE_PTYPE_L3_IPV6_EXT:
+ smask |= IPV6_EXT_PTYPE;
+ break;
+ case RTE_PTYPE_L4_TCP:
+ smask |= TCP_PTYPE;
+ break;
+ case RTE_PTYPE_L4_UDP:
+ smask |= UDP_PTYPE;
+ break;
+ }
+ }
+
+ return smask;
+}
+
+int
+setup_rx_cb(const struct netbe_port *uprt, struct netbe_lcore *lc,
+ uint16_t qid, uint32_t arp)
+{
+ int32_t rc;
+ uint32_t i, n, smask;
+ void *cb;
+ const struct ptype2cb *ptype2cb;
+
+ static const struct ptype2cb tcp_ptype2cb[] = {
+ {
+ .mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
+ IPV6_PTYPE | IPV6_EXT_PTYPE | TCP_PTYPE,
+ .name = "HW l2/l3x/l4-tcp ptype",
+ .fn = type0_tcp_rx_callback,
+ },
+ {
+ .mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
+ TCP_PTYPE,
+ .name = "HW l2/l3/l4-tcp ptype",
+ .fn = type1_tcp_rx_callback,
+ },
+ {
+ .mask = 0,
+ .name = "tcp no HW ptype",
+ .fn = typen_tcp_rx_callback,
+ },
+ };
+
+ static const struct ptype2cb tcp_arp_ptype2cb[] = {
+ {
+ .mask = 0,
+ .name = "tcp with arp no HW ptype",
+ .fn = typen_tcp_arp_rx_callback,
+ },
+ };
+
+ static const struct ptype2cb udp_ptype2cb[] = {
+ {
+ .mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
+ IPV6_PTYPE | IPV6_EXT_PTYPE | UDP_PTYPE,
+ .name = "HW l2/l3x/l4-udp ptype",
+ .fn = type0_udp_rx_callback,
+ },
+ {
+ .mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
+ UDP_PTYPE,
+ .name = "HW l2/l3/l4-udp ptype",
+ .fn = type1_udp_rx_callback,
+ },
+ {
+ .mask = 0,
+ .name = "udp no HW ptype",
+ .fn = typen_udp_rx_callback,
+ },
+ };
+
+ smask = get_ptypes(uprt);
+
+ if (lc->proto == TLE_PROTO_TCP) {
+ if (arp != 0) {
+ ptype2cb = tcp_arp_ptype2cb;
+ n = RTE_DIM(tcp_arp_ptype2cb);
+ } else {
+ ptype2cb = tcp_ptype2cb;
+ n = RTE_DIM(tcp_ptype2cb);
+ }
+ } else if (lc->proto == TLE_PROTO_UDP) {
+ ptype2cb = udp_ptype2cb;
+ n = RTE_DIM(udp_ptype2cb);
+ } else {
+ RTE_LOG(ERR, USER1,
+ "%s(lc=%u) unsupported proto: %u\n",
+ __func__, lc->id, lc->proto);
+ return -EINVAL;
+ }
+
+ for (i = 0; i != n; i++) {
+ if ((smask & ptype2cb[i].mask) == ptype2cb[i].mask) {
+ cb = rte_eth_add_rx_callback(uprt->id, qid,
+ ptype2cb[i].fn, lc);
+ rc = -rte_errno;
+ RTE_LOG(ERR, USER1,
+ "%s(port=%u), setup RX callback \"%s\" "
+ "returns %p;\n",
+ __func__, uprt->id, ptype2cb[i].name, cb);
+ return ((cb == NULL) ? rc : 0);
+ }
+ }
+
+ /* no proper callback found. */
+ RTE_LOG(ERR, USER1,
+ "%s(port=%u) failed to find an appropriate callback;\n",
+ __func__, uprt->id);
+ return -ENOENT;
+}
+
+++ /dev/null
-/*
- * Copyright (c) 2016 Intel Corporation.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PKT_DPDK_LEGACY_H_
-#define PKT_DPDK_LEGACY_H_
-
-#include "dpdk_version.h"
-
-struct ptype2cb {
- uint32_t mask;
- const char *name;
- rte_rx_callback_fn fn;
-};
-
-enum {
- ETHER_PTYPE = 0x1,
- IPV4_PTYPE = 0x2,
- IPV4_EXT_PTYPE = 0x4,
- IPV6_PTYPE = 0x8,
- IPV6_EXT_PTYPE = 0x10,
- TCP_PTYPE = 0x20,
- UDP_PTYPE = 0x40,
-};
-
-#ifdef DPDK_VERSION_GE_1604
-
-static uint32_t
-get_ptypes(const struct netbe_port *uprt)
-{
- uint32_t smask;
- int32_t i, rc;
- const uint32_t pmask = RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
- RTE_PTYPE_L4_MASK;
-
- smask = 0;
- rc = rte_eth_dev_get_supported_ptypes(uprt->id, pmask, NULL, 0);
- if (rc < 0) {
- RTE_LOG(ERR, USER1,
- "%s(port=%u) failed to get supported ptypes;\n",
- __func__, uprt->id);
- return smask;
- }
-
- uint32_t ptype[rc];
- rc = rte_eth_dev_get_supported_ptypes(uprt->id, pmask, ptype, rc);
-
- for (i = 0; i != rc; i++) {
- switch (ptype[i]) {
- case RTE_PTYPE_L2_ETHER:
- smask |= ETHER_PTYPE;
- break;
- case RTE_PTYPE_L3_IPV4:
- case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
- smask |= IPV4_PTYPE;
- break;
- case RTE_PTYPE_L3_IPV4_EXT:
- smask |= IPV4_EXT_PTYPE;
- break;
- case RTE_PTYPE_L3_IPV6:
- case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
- smask |= IPV6_PTYPE;
- break;
- case RTE_PTYPE_L3_IPV6_EXT:
- smask |= IPV6_EXT_PTYPE;
- break;
- case RTE_PTYPE_L4_TCP:
- smask |= TCP_PTYPE;
- break;
- case RTE_PTYPE_L4_UDP:
- smask |= UDP_PTYPE;
- break;
- }
- }
-
- return smask;
-}
-
-#else
-
-static uint32_t
-get_ptypes(__rte_unused const struct netbe_port *uprt)
-{
- return 0;
-}
-
-#endif /* DPDK_VERSION_GE_1604 */
-
-int
-setup_rx_cb(const struct netbe_port *uprt, struct netbe_lcore *lc,
- uint16_t qid, uint32_t arp)
-{
- int32_t rc;
- uint32_t i, n, smask;
- void *cb;
- const struct ptype2cb *ptype2cb;
-
- static const struct ptype2cb tcp_ptype2cb[] = {
- {
- .mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
- IPV6_PTYPE | IPV6_EXT_PTYPE | TCP_PTYPE,
- .name = "HW l2/l3x/l4-tcp ptype",
- .fn = type0_tcp_rx_callback,
- },
- {
- .mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
- TCP_PTYPE,
- .name = "HW l2/l3/l4-tcp ptype",
- .fn = type1_tcp_rx_callback,
- },
- {
- .mask = 0,
- .name = "tcp no HW ptype",
- .fn = typen_tcp_rx_callback,
- },
- };
-
- static const struct ptype2cb tcp_arp_ptype2cb[] = {
- {
- .mask = 0,
- .name = "tcp with arp no HW ptype",
- .fn = typen_tcp_arp_rx_callback,
- },
- };
-
- static const struct ptype2cb udp_ptype2cb[] = {
- {
- .mask = ETHER_PTYPE | IPV4_PTYPE | IPV4_EXT_PTYPE |
- IPV6_PTYPE | IPV6_EXT_PTYPE | UDP_PTYPE,
- .name = "HW l2/l3x/l4-udp ptype",
- .fn = type0_udp_rx_callback,
- },
- {
- .mask = ETHER_PTYPE | IPV4_PTYPE | IPV6_PTYPE |
- UDP_PTYPE,
- .name = "HW l2/l3/l4-udp ptype",
- .fn = type1_udp_rx_callback,
- },
- {
- .mask = 0,
- .name = "udp no HW ptype",
- .fn = typen_udp_rx_callback,
- },
- };
-
- smask = get_ptypes(uprt);
-
- if (lc->proto == TLE_PROTO_TCP) {
- if (arp != 0) {
- ptype2cb = tcp_arp_ptype2cb;
- n = RTE_DIM(tcp_arp_ptype2cb);
- } else {
- ptype2cb = tcp_ptype2cb;
- n = RTE_DIM(tcp_ptype2cb);
- }
- } else if (lc->proto == TLE_PROTO_UDP) {
- ptype2cb = udp_ptype2cb;
- n = RTE_DIM(udp_ptype2cb);
- } else {
- RTE_LOG(ERR, USER1,
- "%s(lc=%u) unsupported proto: %u\n",
- __func__, lc->id, lc->proto);
- return -EINVAL;
- }
-
- for (i = 0; i != n; i++) {
- if ((smask & ptype2cb[i].mask) == ptype2cb[i].mask) {
- cb = rte_eth_add_rx_callback(uprt->id, qid,
- ptype2cb[i].fn, lc);
- rc = -rte_errno;
- RTE_LOG(ERR, USER1,
- "%s(port=%u), setup RX callback \"%s\" "
- "returns %p;\n",
- __func__, uprt->id, ptype2cb[i].name, cb);
- return ((cb == NULL) ? rc : 0);
- }
- }
-
- /* no proper callback found. */
- RTE_LOG(ERR, USER1,
- "%s(port=%u) failed to find an appropriate callback;\n",
- __func__, uprt->id);
- return -ENOENT;
-}
-
-#endif /* PKT_DPDK_LEGACY_H_ */
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-y += libtle_misc
DIRS-y += libtle_dring
DIRS-y += libtle_timer
DIRS-y += libtle_l4p
SYMLINK-y-include += tle_udp.h
# this lib dependencies
+DEPDIRS-y += lib/libtle_misc
DEPDIRS-y += lib/libtle_dring
DEPDIRS-y += lib/libtle_timer
#ifndef _MISC_H_
#define _MISC_H_
+#include <tle_dpdk_wrapper.h>
+
#ifdef __cplusplus
extern "C" {
#endif
struct rte_mbuf *mb[MAX_PKT_BURST];
do {
- n = rte_ring_dequeue_burst(r, (void **)mb, RTE_DIM(mb));
+ n = _rte_ring_dequeue_burst(r, (void **)mb, RTE_DIM(mb));
for (i = 0; i != n; i++)
rte_pktmbuf_free(mb[i]);
} while (n != 0);
/* peer doesn't support WSCALE option, wnd size is limited to 64K */
if (scale == TCP_WSCALE_NONE) {
- wnd = s->rx.q->prod.mask << TCP_WSCALE_DEFAULT;
+ wnd = _rte_ring_get_mask(s->rx.q) << TCP_WSCALE_DEFAULT;
return RTE_MIN(wnd, (uint32_t)UINT16_MAX);
} else
- return s->rx.q->prod.mask << scale;
+ return _rte_ring_get_mask(s->rx.q) << scale;
}
/* empty stream's receive queue */
struct stbl_entry *se[MAX_PKT_BURST];
do {
- n = rte_ring_dequeue_burst(s->rx.q, (void **)se, RTE_DIM(se));
+ n = _rte_ring_dequeue_burst(s->rx.q, (void **)se, RTE_DIM(se));
for (i = 0; i != n; i++) {
mb = stbl_get_pkt(se[i]);
get_pkt_info(mb, &pi, &si);
num = db->nb_elem;
sl->raw = db->sl.raw;
- n = rte_ring_enqueue_burst(r, (void * const *)db->obj, num);
+ n = _rte_ring_enqueue_burst(r, (void * const *)db->obj, num);
sl->len -= tcp_mbuf_seq_free(db->obj + n, num - n);
return num - n;
{
uint32_t i, n;
- n = rte_ring_enqueue_burst(s->rx.q, (void * const *)mb, num);
+ n = _rte_ring_enqueue_burst(s->rx.q, (void * const *)mb, num);
/* error: can'queue some packets into receive buffer. */
for (i = n; i != num; i++)
stream_drb_free(struct tle_tcp_stream *s, struct tle_drb *drbs[],
uint32_t nb_drb)
{
- rte_ring_enqueue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
+ _rte_ring_enqueue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
}
static inline uint32_t
stream_drb_alloc(struct tle_tcp_stream *s, struct tle_drb *drbs[],
uint32_t nb_drb)
{
- return rte_ring_dequeue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
+ return _rte_ring_dequeue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
}
static inline void
if (accept_prep_stream(s, st, cs, &so, tms, pi, si) == 0) {
/* put new stream in the accept queue */
- if (rte_ring_enqueue_burst(s->rx.q,
+ if (_rte_ring_enqueue_burst(s->rx.q,
(void * const *)&ts, 1) == 1) {
*csp = cs;
return 0;
struct tle_tcp_stream *s;
s = TCP_STREAM(ts);
- n = rte_ring_mc_dequeue_burst(s->rx.q, (void **)rs, num);
+ n = _rte_ring_mc_dequeue_burst(s->rx.q, (void **)rs, num);
if (n == 0)
return 0;
struct tle_tcp_stream *s;
s = TCP_STREAM(ts);
- n = rte_ring_mc_dequeue_burst(s->rx.q, (void **)pkt, num);
+ n = _rte_ring_mc_dequeue_burst(s->rx.q, (void **)pkt, num);
if (n == 0)
return 0;
if (i == num) {
/* queue packets for further transmission. */
- rc = rte_ring_mp_enqueue_bulk(s->tx.q, (void **)segs, num);
+ rc = _rte_ring_mp_enqueue_bulk(s->tx.q, (void **)segs, num);
if (rc != 0)
free_segments(segs, num);
}
if (i != k) {
/* queue packets for further transmission. */
- n = rte_ring_mp_enqueue_burst(s->tx.q, (void **)pkt + k,
- (i - k));
+ n = _rte_ring_mp_enqueue_burst(s->tx.q,
+ (void **)pkt + k, (i - k));
k += n;
/*
#include "tcp_ofo.h"
#include "tcp_txq.h"
-
static void
unuse_stream(struct tle_tcp_stream *s)
{
char name[RTE_RING_NAMESIZE];
n = rte_align32pow2(n);
- sz = sizeof(*r) + n * sizeof(r->ring[0]);
+ sz = rte_ring_get_memsize(n);
r = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE, socket);
if (r == NULL) {
n = rte_align32pow2(k);
/* size of the drbs ring */
- rsz = sizeof(*s->tx.drb.r) + n * sizeof(s->tx.drb.r->ring[0]);
+ rsz = rte_ring_get_memsize(n);
rsz = RTE_ALIGN_CEIL(rsz, RTE_CACHE_LINE_SIZE);
/* size of the drb. */
struct tle_tcp_stream *us;
us = (struct tle_tcp_stream *)s;
- rte_ring_enqueue_burst(us->tx.drb.r, (void **)drb, nb_drb);
+ _rte_ring_enqueue_burst(us->tx.drb.r, (void **)drb, nb_drb);
}
static struct tle_timer_wheel *
struct rte_ring *r;
r = s->tx.q;
- sz = r->prod.size;
- mask = r->prod.mask;
+ sz = _rte_ring_get_size(r);
+ mask = _rte_ring_get_mask(r);
head = r->cons.head & mask;
tail = r->prod.tail & mask;
cnt = (tail >= head) ? tail - head : sz - head;
*num = cnt;
- return (struct rte_mbuf **)(r->ring + head);
+ return (struct rte_mbuf **)(_rte_ring_get_data(r) + head);
}
static inline struct rte_mbuf **
struct rte_ring *r;
r = s->tx.q;
- sz = r->prod.size;
- mask = r->prod.mask;
+ sz = _rte_ring_get_size(r);
+ mask = _rte_ring_get_mask(r);
head = r->prod.tail & mask;
tail = r->cons.tail & mask;
cnt = (head >= tail) ? head - tail : sz - tail;
*num = cnt;
- return (struct rte_mbuf **)(r->ring + tail);
+ return (struct rte_mbuf **)(_rte_ring_get_data(r) + tail);
}
static inline void
struct rte_ring *r;
r = s->tx.q;
- return (r->prod.tail - r->cons.head) & r->prod.mask;
+ return (r->prod.tail - r->cons.head) & _rte_ring_get_mask(r);
}
static inline void
if (rte_atomic32_add_return(&s->tx.arm, 1) == 1) {
r = CTX_TCP_TSQ(ctx);
- n = rte_ring_enqueue_burst(r, (void * const *)&s, 1);
+ n = _rte_ring_enqueue_burst(r, (void * const *)&s, 1);
RTE_VERIFY(n == 1);
}
}
struct rte_ring *r;
r = CTX_TCP_TSQ(ctx);
- return rte_ring_dequeue_burst(r, (void **)s, num);
+ return _rte_ring_dequeue_burst(r, (void **)s, num);
}
#ifdef __cplusplus
{
uint32_t i, k, r;
- r = rte_ring_enqueue_burst(s->rx.q, mb, num);
+ r = _rte_ring_enqueue_burst(s->rx.q, mb, num);
/* if RX queue was empty invoke user RX notification callback. */
if (s->rx.cb.func != NULL && r != 0 && rte_ring_count(s->rx.q) == r)
uint32_t n;
n = rte_ring_count(s->tx.drb.r);
- rte_ring_enqueue_burst(s->tx.drb.r, (void **)drb, nb_drb);
+ _rte_ring_enqueue_burst(s->tx.drb.r, (void **)drb, nb_drb);
/* If stream is still open, then mark it as avaialble for writing. */
if (rwl_try_acquire(&s->tx.use) > 0) {
struct tle_udp_stream *s;
s = UDP_STREAM(us);
- n = rte_ring_mc_dequeue_burst(s->rx.q, (void **)pkt, num);
+ n = _rte_ring_mc_dequeue_burst(s->rx.q, (void **)pkt, num);
if (n == 0)
return 0;
stream_drb_free(struct tle_udp_stream *s, struct tle_drb *drbs[],
uint32_t nb_drb)
{
- rte_ring_enqueue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
+ _rte_ring_enqueue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
}
static inline uint32_t
stream_drb_alloc(struct tle_udp_stream *s, struct tle_drb *drbs[],
uint32_t nb_drb)
{
- return rte_ring_dequeue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
+ return _rte_ring_dequeue_burst(s->tx.drb.r, (void **)drbs, nb_drb);
}
/* enqueue up to num packets to the destination device queue. */
n = RTE_MAX(ctx->prm.max_stream_rbufs, 1U);
n = rte_align32pow2(n);
- sz = sizeof(*s->rx.q) + n * sizeof(s->rx.q->ring[0]);
+ sz = rte_ring_get_memsize(n);
s->rx.q = rte_zmalloc_socket(NULL, sz, RTE_CACHE_LINE_SIZE,
ctx->prm.socket_id);
n = rte_align32pow2(k);
/* size of the drbs ring */
- rsz = sizeof(*s->tx.drb.r) + n * sizeof(s->tx.drb.r->ring[0]);
+ rsz = rte_ring_get_memsize(n);
rsz = RTE_ALIGN_CEIL(rsz, RTE_CACHE_LINE_SIZE);
/* size of the drb. */
struct tle_udp_stream *us;
us = (struct tle_udp_stream *)s;
- rte_ring_enqueue_burst(us->tx.drb.r, (void **)drb, nb_drb);
+ _rte_ring_enqueue_burst(us->tx.drb.r, (void **)drb, nb_drb);
}
static int
--- /dev/null
+# Copyright (c) 2017 Intel Corporation.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overwritten by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = libtle_misc.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := tle_misc_version.map
+
+LIBABIVER := 1
+
+SYMLINK-y-include += tle_dpdk_wrapper.h
+
+include $(TLDK_ROOT)/mk/tle.lib.mk
--- /dev/null
+/*
+ * Copyright (c) 2017 Intel Corporation.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TLE_DPDK_WRAPPER_H_
+#define TLE_DPDK_WRAPPER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_version.h>
+
+#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0)
+
+static inline uint32_t
+_rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+ uint32_t n)
+{
+ uint32_t rc;
+
+ rc = rte_ring_mp_enqueue_bulk(r, (void * const *)obj_table, n, NULL);
+ if (rc == n)
+ return 0;
+ else
+ return -ENOSPC;
+}
+
+static inline uint32_t
+_rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+ uint32_t n)
+{
+ return rte_ring_mp_enqueue_burst(r, (void * const *)obj_table, n, NULL);
+}
+
+static inline uint32_t
+_rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, uint32_t n)
+{
+ return rte_ring_mc_dequeue_burst(r, (void **)obj_table, n, NULL);
+}
+
+static inline uint32_t
+_rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table, uint32_t n)
+{
+ return rte_ring_enqueue_burst(r, (void * const *)obj_table, n, NULL);
+}
+
+static inline uint32_t
+_rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, uint32_t n)
+{
+ return rte_ring_dequeue_burst(r, (void **)obj_table, n, NULL);
+}
+
+static inline uint32_t
+_rte_ring_get_size(struct rte_ring *r)
+{
+ return r->size;
+}
+
+static inline uint32_t
+_rte_ring_get_mask(struct rte_ring *r)
+{
+ return r->mask;
+}
+
+static inline void **
+_rte_ring_get_data(struct rte_ring *r)
+{
+ return (void **)(&r[1]);
+}
+
+#else
+
+static inline uint32_t
+_rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+ uint32_t n)
+{
+ return rte_ring_mp_enqueue_bulk(r, (void * const *)obj_table, n);
+}
+
+static inline uint32_t
+_rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+ uint32_t n)
+{
+ return rte_ring_mp_enqueue_burst(r, (void * const *)obj_table, n);
+}
+
+static inline uint32_t
+_rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, uint32_t n)
+{
+ return rte_ring_mc_dequeue_burst(r, (void **)obj_table, n);
+}
+
+static inline uint32_t
+_rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table, uint32_t n)
+{
+ return rte_ring_enqueue_burst(r, (void * const *)obj_table, n);
+}
+
+static inline uint32_t
+_rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, uint32_t n)
+{
+ return rte_ring_dequeue_burst(r, (void **)obj_table, n);
+}
+
+static inline uint32_t
+_rte_ring_get_size(struct rte_ring *r)
+{
+ return r->prod.size;
+}
+
+static inline uint32_t
+_rte_ring_get_mask(struct rte_ring *r)
+{
+ return r->prod.mask;
+}
+
+static inline void **
+_rte_ring_get_data(struct rte_ring *r)
+{
+ return (void **)r->ring;
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* TLE_DPDK_WRAPPER_H_ */
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
include $(TLDK_ROOT)/mk/tle.cpp-pre.mk
#
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
/* allocate and initialise rte_ring. */
n = rte_align32pow2(num);
- sz = sizeof(*r) + n * sizeof(r->ring[0]);
+ sz = rte_ring_get_memsize(n);
r = calloc(1, sz);
if (r == NULL) {
/* allocate and initialise rte_ring. */
n = rte_align32pow2(num);
- sz = sizeof(*r) + n * sizeof(r->ring[0]);
+ sz = rte_ring_get_memsize(n);
r = (struct rte_ring *)calloc(1, sz);
if (r == NULL) {