Bump dpdk version to 20.05 and adjust tldk source.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Change-Id: Id2ce864ad20b3b347f1ac05cd67c15384e454c52
port_conf.rxmode.offloads |= pcf->rx_offload & RX_CSUM_OFFLOAD;
}
- port_conf.rxmode.max_rx_pkt_len = pcf->mtu + ETHER_CRC_LEN;
- if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ port_conf.rxmode.max_rx_pkt_len = pcf->mtu + RTE_ETHER_CRC_LEN;
+ if (port_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP;
if (rte_get_master_lcore() != lid &&
rte_eal_get_lcore_state(lid) == RUNNING) {
- RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
- lid, lcore_config[lid].f);
+ RTE_LOG(ERR, USER1, "lcore %u already in use\n", lid);
return -EINVAL;
}
const struct tldk_port_conf *pcf, const struct tldk_dest_conf *dest,
uint16_t l3_type, struct rte_mempool *mp)
{
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv6_hdr *ip6h;
dst->dev = td->dev;
dst->head_mp = mp;
dst->mtu = RTE_MIN(dest->mtu, pcf->mtu);
dst->l2_len = sizeof(*eth);
- eth = (struct ether_hdr *)dst->hdr;
+ eth = (struct rte_ether_hdr *)dst->hdr;
- ether_addr_copy(&pcf->mac, ð->s_addr);
- ether_addr_copy(&dest->mac, ð->d_addr);
+ rte_ether_addr_copy(&pcf->mac, ð->s_addr);
+ rte_ether_addr_copy(&dest->mac, ð->d_addr);
eth->ether_type = rte_cpu_to_be_16(l3_type);
- if (l3_type == ETHER_TYPE_IPv4) {
+ if (l3_type == RTE_ETHER_TYPE_IPV4) {
dst->l3_len = sizeof(*ip4h);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = 4 << 4 |
- sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
+ sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER;
ip4h->time_to_live = 64;
ip4h->next_proto_id = IPPROTO_TCP;
- } else if (l3_type == ETHER_TYPE_IPv6) {
+ } else if (l3_type == RTE_ETHER_TYPE_IPV6) {
dst->l3_len = sizeof(*ip6h);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = IPPROTO_TCP;
ip6h->hop_limits = 64;
n = tcx->dst4_num;
dp = tcx->dst4 + n;
m = RTE_DIM(tcx->dst4);
- l3_type = ETHER_TYPE_IPv4;
+ l3_type = RTE_ETHER_TYPE_IPV4;
} else {
n = tcx->dst6_num;
dp = tcx->dst6 + n;
m = RTE_DIM(tcx->dst6);
- l3_type = ETHER_TYPE_IPv6;
+ l3_type = RTE_ETHER_TYPE_IPV6;
}
if (n + dnum >= m) {
}
static inline int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
static inline uint32_t
get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *tcp;
+ const struct rte_tcp_hdr *tcp;
- tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ tcp = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
return (tcp->data_off >> 4) * 4;
}
adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
tcp_stat_update(struct tldk_ctx *lc, const struct rte_mbuf *m,
uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
- th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ th = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
lc->tcp_stat.flags[th->tcp_flags]++;
}
static inline uint32_t
get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
static inline uint32_t
get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- return sizeof(struct ipv6_hdr);
+ return sizeof(struct rte_ipv6_hdr);
else if (ipv6x_hdr(iph->proto) != 0)
return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
{
uint32_t j, tp;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
l2_len = sizeof(*eth);
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr), l4_len);
+ sizeof(struct rte_ipv4_hdr), l4_len);
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr), l4_len);
+ sizeof(struct rte_ipv6_hdr), l4_len);
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
uint32_t j, tp;
struct tldk_ctx *tcx;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
tcx = user_param;
l2_len = sizeof(*eth);
uint64_t tx_offload;
uint32_t ipv4;
struct in6_addr ipv6;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
struct tldk_dev_conf {
struct in_addr ipv4;
struct in6_addr ipv6;
};
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
#define TLDK_MAX_DEST 0x10
struct in6_addr addr6;
};
} in;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
rte_cpuset_t cpuset;
};
union parse_val pvl[RTE_DIM(kh)];
memset(pvl, 0, sizeof(pvl));
- pvl[1].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ pvl[1].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
if (cf->args->nelts % 2 != 0)
return NGX_CONF_ERROR;
union parse_val pvl[RTE_DIM(kh)];
memset(pvl, 0, sizeof(pvl));
- pvl[1].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ pvl[1].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
if (cf->args->nelts % 2 != 1 || cf->args->nelts == 1)
return NGX_CONF_ERROR;
# Scripts require non-POSIX parts of bash
SHELL := /bin/bash
-DPDK_VERSION ?= v18.11
+DPDK_VERSION ?= v20.05
DPDK_BUILD_DIR ?= $(CURDIR)/_build
DPDK_INSTALL_DIR ?= $(DPDK_BUILD_DIR)/dpdk/$(RTE_TARGET)
DPDK_PKTMBUF_HEADROOM ?= 128
const struct netbe_dest *bdp, uint16_t l3_type, int32_t sid,
uint8_t proto_id)
{
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv6_hdr *ip6h;
dst->dev = bed->dev;
dst->head_mp = frag_mpool[sid + 1];
dst->mtu = RTE_MIN(bdp->mtu, bed->port.mtu);
dst->l2_len = sizeof(*eth);
- eth = (struct ether_hdr *)dst->hdr;
+ eth = (struct rte_ether_hdr *)dst->hdr;
- ether_addr_copy(&bed->port.mac, ð->s_addr);
- ether_addr_copy(&bdp->mac, ð->d_addr);
+ rte_ether_addr_copy(&bed->port.mac, ð->s_addr);
+ rte_ether_addr_copy(&bdp->mac, ð->d_addr);
eth->ether_type = rte_cpu_to_be_16(l3_type);
- if (l3_type == ETHER_TYPE_IPv4) {
+ if (l3_type == RTE_ETHER_TYPE_IPV4) {
dst->l3_len = sizeof(*ip4h);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = 4 << 4 |
- sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
+ sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER;
ip4h->time_to_live = 64;
ip4h->next_proto_id = proto_id;
- } else if (l3_type == ETHER_TYPE_IPv6) {
+ } else if (l3_type == RTE_ETHER_TYPE_IPV6) {
dst->l3_len = sizeof(*ip6h);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = proto_id;
ip6h->hop_limits = 64;
n = lc->dst4_num;
dp = lc->dst4 + n;
m = RTE_DIM(lc->dst4);
- l3_type = ETHER_TYPE_IPv4;
+ l3_type = RTE_ETHER_TYPE_IPV4;
} else {
n = lc->dst6_num;
dp = lc->dst6 + n;
m = RTE_DIM(lc->dst6);
- l3_type = ETHER_TYPE_IPv6;
+ l3_type = RTE_ETHER_TYPE_IPV6;
}
if (n + dnum >= m) {
static inline void
fill_arp_reply(struct netbe_dev *dev, struct rte_mbuf *m)
{
- struct ether_hdr *eth;
- struct arp_hdr *ahdr;
- struct arp_ipv4 *adata;
+ struct rte_ether_hdr *eth;
+ struct rte_arp_hdr *ahdr;
+ struct rte_arp_ipv4 *adata;
uint32_t tip;
/* set up the ethernet data */
- eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
eth->d_addr = eth->s_addr;
eth->s_addr = dev->port.mac;
/* set up the arp data */
- ahdr = rte_pktmbuf_mtod_offset(m, struct arp_hdr *, m->l2_len);
+ ahdr = rte_pktmbuf_mtod_offset(m, struct rte_arp_hdr *, m->l2_len);
adata = &ahdr->arp_data;
- ahdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ahdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
tip = adata->arp_tip;
adata->arp_tip = adata->arp_sip;
uint64_t tx_offload;
uint32_t ipv4;
struct in6_addr ipv6;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
uint32_t hash_key_size;
uint8_t hash_key[RSS_HASH_KEY_LENGTH];
};
struct in_addr ipv4;
struct in6_addr ipv6;
};
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
struct netbe_dest_prm {
union parse_val val[RTE_DIM(hndl)];
memset(val, 0, sizeof(val));
- val[2].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ val[2].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man),
keys_opt, RTE_DIM(keys_opt), hndl, val);
RTE_LOG(ERR, USER1, "%s(line=%u) invalid masklen=%u",
__func__, dst->line, dst->prfx);
return -EINVAL;
- } else if (dst->mtu > ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN) {
+ } else if (dst->mtu >
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN) {
RTE_LOG(ERR, USER1, "%s(line=%u) invalid mtu=%u",
__func__, dst->line, dst->mtu);
return -EINVAL;
/* set default values. */
memset(val, 0, sizeof(val));
- val[4].u64 = ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN;
+ val[4].u64 = RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN;
rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man),
keys_opt, RTE_DIM(keys_opt), hndl, val);
struct in6_addr addr6;
};
} in;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
rte_cpuset_t cpuset;
};
}
static inline int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
static inline uint32_t
get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *tcp;
+ const struct rte_tcp_hdr *tcp;
- tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ tcp = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
return (tcp->data_off >> 4) * 4;
}
adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
tcp_stat_update(struct netbe_lcore *lc, const struct rte_mbuf *m,
uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
- th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ th = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
lc->tcp_stat.flags[th->tcp_flags]++;
}
static inline uint32_t
get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
static inline uint32_t
get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- return sizeof(struct ipv6_hdr);
+ return sizeof(struct rte_ipv6_hdr);
else if (ipv6x_hdr(iph->proto) != 0)
return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
handle_arp(struct rte_mbuf *m, struct netbe_lcore *lc, dpdk_port_t port,
uint32_t l2len)
{
- const struct arp_hdr *ahdr;
+ const struct rte_arp_hdr *ahdr;
struct pkt_buf *abuf;
- ahdr = rte_pktmbuf_mtod_offset(m, const struct arp_hdr *, l2len);
+ ahdr = rte_pktmbuf_mtod_offset(m, const struct rte_arp_hdr *, l2len);
- if (ahdr->arp_hrd != rte_be_to_cpu_16(ARP_HRD_ETHER) ||
- ahdr->arp_pro != rte_be_to_cpu_16(ETHER_TYPE_IPv4) ||
- ahdr->arp_op != rte_be_to_cpu_16(ARP_OP_REQUEST)) {
+ if (ahdr->arp_hardware != rte_be_to_cpu_16(RTE_ARP_HRD_ETHER) ||
+ ahdr->arp_protocol != rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4) ||
+ ahdr->arp_opcode != rte_be_to_cpu_16(RTE_ARP_OP_REQUEST)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return m;
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return m;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP))
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_ARP))
return handle_arp(m, lc, port, l2_len);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
{
uint32_t dlen, l2_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 42B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct udp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv4_hdr_len(m, l2_len, IPPROTO_UDP, 1,
- sizeof(struct udp_hdr));
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct udp_hdr)) {
+ sizeof(struct rte_udp_hdr));
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv6_hdr_len(m, l2_len, IPPROTO_UDP,
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
} else
m->packet_type = RTE_PTYPE_UNKNOWN;
}
static inline void
fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto)
{
- struct ipv4_hdr *iph;
+ struct rte_ipv4_hdr *iph;
/* update packet type. */
m->packet_type &= ~RTE_PTYPE_L4_MASK;
/* recalculate ipv4 cksum after reassemble. */
else if (hwcsum == 0 && RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
iph->hdr_checksum = ipv4x_cksum(iph, m->l3_len);
}
}
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- struct ipv4_hdr *iph;
+ struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
/* process this fragment. */
m = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, iph);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
- struct ipv6_hdr *iph;
+ struct rte_ipv6_hdr *iph;
struct ipv6_extension_fragment *fhdr;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->l2_len);
/*
* we store fragment header offset in tso_segsz before
uint32_t j, tp;
struct netbe_lcore *lc;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
l2_len = sizeof(*eth);
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr), l4_len);
+ sizeof(struct rte_ipv4_hdr), l4_len);
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr), l4_len);
+ sizeof(struct rte_ipv6_hdr), l4_len);
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
uint64_t cts;
struct netbe_lcore *lc;
uint32_t l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
cts = 0;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr),
- sizeof(struct udp_hdr));
+ sizeof(struct rte_ipv4_hdr),
+ sizeof(struct rte_udp_hdr));
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr),
- sizeof(struct udp_hdr));
+ sizeof(struct rte_ipv6_hdr),
+ sizeof(struct rte_udp_hdr));
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- UINT32_MAX, 0, sizeof(struct udp_hdr));
+ UINT32_MAX, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
/* possibly fragmented udp packets. */
case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER):
case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, 1, sizeof(struct udp_hdr));
+ IPPROTO_UDP, 1, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER):
case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
default:
/* treat packet types as invalid. */
uint32_t j, tp;
struct netbe_lcore *lc;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
l2_len = sizeof(*eth);
uint64_t cts;
struct netbe_lcore *lc;
uint32_t l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
cts = 0;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- UINT32_MAX, 0, sizeof(struct udp_hdr));
+ UINT32_MAX, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, 0, sizeof(struct udp_hdr));
+ IPPROTO_UDP, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
default:
/* treat packet types as invalid. */
__func__, uprt->id);
port_conf.rxmode.offloads |= uprt->rx_offload & RX_CSUM_OFFLOAD;
}
- port_conf.rxmode.max_rx_pkt_len = uprt->mtu + ETHER_CRC_LEN;
- if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ port_conf.rxmode.max_rx_pkt_len = uprt->mtu + RTE_ETHER_CRC_LEN;
+ if (port_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
rc = update_rss_conf(uprt, &dev_info, &port_conf, proto);
return -EINVAL;
}
if (rte_eal_get_lcore_state(lc) == RUNNING) {
- RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
- lc, lcore_config[lc].f);
+ RTE_LOG(ERR, USER1, "lcore %u already in use\n", lc);
return -EINVAL;
}
return 0;
netfe_pkt_addr(const struct rte_mbuf *m, struct sockaddr_storage *ps,
uint16_t family)
{
- const struct ipv4_hdr *ip4h;
- const struct ipv6_hdr *ip6h;
- const struct udp_hdr *udph;
+ const struct rte_ipv4_hdr *ip4h;
+ const struct rte_ipv6_hdr *ip6h;
+ const struct rte_udp_hdr *udph;
struct sockaddr_in *in4;
struct sockaddr_in6 *in6;
NETFE_PKT_DUMP(m);
- udph = rte_pktmbuf_mtod_offset(m, struct udp_hdr *, -m->l4_len);
+ udph = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -m->l4_len);
if (family == AF_INET) {
in4 = (struct sockaddr_in *)ps;
- ip4h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ip4h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-(m->l4_len + m->l3_len));
in4->sin_port = udph->src_port;
in4->sin_addr.s_addr = ip4h->src_addr;
} else {
in6 = (struct sockaddr_in6 *)ps;
- ip6h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ip6h = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-(m->l4_len + m->l3_len));
in6->sin6_port = udph->src_port;
rte_memcpy(&in6->sin6_addr, ip6h->src_addr,
* The non-complemented checksum to set in the L4 header.
*/
static inline uint16_t
-_ipv4x_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
+_ipv4x_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
uint64_t ol_flags)
{
uint32_t s0, s1;
*/
static inline int
_ipv4_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
- const struct ipv4_hdr *ipv4_hdr)
+ const struct rte_ipv4_hdr *ipv4_hdr)
{
uint32_t cksum;
*/
static inline int
_ipv6_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
- const struct ipv6_hdr *ipv6_hdr)
+ const struct rte_ipv6_hdr *ipv6_hdr)
{
uint32_t cksum;
check_pkt_csum(const struct rte_mbuf *m, uint64_t ol_flags, uint32_t type,
uint32_t proto)
{
- const struct ipv4_hdr *l3h4;
- const struct ipv6_hdr *l3h6;
- const struct udp_hdr *l4h;
+ const struct rte_ipv4_hdr *l3h4;
+ const struct rte_ipv6_hdr *l3h6;
+ const struct rte_udp_hdr *l4h;
uint64_t fl3, fl4;
uint16_t csum;
int32_t ret;
return 1;
/* case 2: either ip or l4 or both cksum is unknown */
- l3h4 = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, m->l2_len);
- l3h6 = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, m->l2_len);
+ l3h4 = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *,
+ m->l2_len);
+ l3h6 = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ m->l2_len);
ret = 0;
if (fl3 == PKT_RX_IP_CKSUM_UNKNOWN && l3h4->hdr_checksum != 0) {
* for IPv6 valid UDP cksum is mandatory.
*/
if (type == TLE_V4) {
- l4h = (const struct udp_hdr *)((uintptr_t)l3h4 +
+ l4h = (const struct rte_udp_hdr *)((uintptr_t)l3h4 +
m->l3_len);
csum = (proto == IPPROTO_UDP && l4h->dgram_cksum == 0) ?
UINT16_MAX : _ipv4_udptcp_mbuf_cksum(m,
dst->ol_flags = dev->tx.ol_flags[s->type];
if (s->type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(dst->hdr + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(dst->hdr + dst->l2_len);
l3h->src_addr = dev->prm.local_addr4.s_addr;
l3h->dst_addr = d4->s_addr;
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(dst->hdr + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(dst->hdr + dst->l2_len);
rte_memcpy(l3h->src_addr, &dev->prm.local_addr6,
sizeof(l3h->src_addr));
rte_memcpy(l3h->dst_addr, d6, sizeof(l3h->dst_addr));
#define TCP_WSCALE_DEFAULT 7
#define TCP_WSCALE_NONE 0
-#define TCP_TX_HDR_MAX (sizeof(struct tcp_hdr) + TCP_TX_OPT_LEN_MAX)
+#define TCP_TX_HDR_MAX (sizeof(struct rte_tcp_hdr) + TCP_TX_OPT_LEN_MAX)
/* max header size for normal data+ack packet */
-#define TCP_TX_HDR_DACK (sizeof(struct tcp_hdr) + TCP_TX_OPT_LEN_TMS)
+#define TCP_TX_HDR_DACK (sizeof(struct rte_tcp_hdr) + TCP_TX_OPT_LEN_TMS)
#define TCP4_MIN_MSS 536
/* default MTU, no TCP options. */
#define TCP4_NOP_MSS \
- (ETHER_MTU - sizeof(struct ipv4_hdr) - sizeof(struct tcp_hdr))
+ (RTE_ETHER_MTU - sizeof(struct rte_ipv4_hdr) - \
+ sizeof(struct rte_tcp_hdr))
#define TCP6_NOP_MSS \
- (ETHER_MTU - sizeof(struct ipv6_hdr) - sizeof(struct tcp_hdr))
+ (RTE_ETHER_MTU - sizeof(struct rte_ipv6_hdr) - \
+ sizeof(struct rte_tcp_hdr))
/* default MTU, TCP options present */
#define TCP4_OP_MSS (TCP4_NOP_MSS - TCP_TX_OPT_LEN_MAX)
}
static inline void
-get_seg_info(const struct tcp_hdr *th, union seg_info *si)
+get_seg_info(const struct rte_tcp_hdr *th, union seg_info *si)
{
__m128i v;
const __m128i bswap_mask =
get_pkt_info(const struct rte_mbuf *m, union pkt_info *pi, union seg_info *si)
{
uint32_t len, type;
- const struct tcp_hdr *tcph;
+ const struct rte_tcp_hdr *tcph;
const union l4_ports *prt;
const union ipv4_addrs *pa4;
if (type == TLE_V4) {
pa4 = rte_pktmbuf_mtod_offset(m, const union ipv4_addrs *,
- len + offsetof(struct ipv4_hdr, src_addr));
+ len + offsetof(struct rte_ipv4_hdr, src_addr));
pi->addr4.raw = pa4->raw;
} else if (type == TLE_V6) {
pi->addr6 = rte_pktmbuf_mtod_offset(m, const union ipv6_addrs *,
- len + offsetof(struct ipv6_hdr, src_addr));
+ len + offsetof(struct rte_ipv6_hdr, src_addr));
}
len += m->l3_len;
- tcph = rte_pktmbuf_mtod_offset(m, const struct tcp_hdr *, len);
+ tcph = rte_pktmbuf_mtod_offset(m, const struct rte_tcp_hdr *, len);
prt = (const union l4_ports *)
- ((uintptr_t)tcph + offsetof(struct tcp_hdr, src_port));
+ ((uintptr_t)tcph + offsetof(struct rte_tcp_hdr, src_port));
pi->tf.flags = tcph->tcp_flags;
pi->tf.type = type;
pi->csf = m->ol_flags & (PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK);
}
static inline void
-fill_tcph(struct tcp_hdr *l4h, const struct tcb *tcb, union l4_ports port,
+fill_tcph(struct rte_tcp_hdr *l4h, const struct tcb *tcb, union l4_ports port,
uint32_t seq, uint8_t hlen, uint8_t flags)
{
uint16_t wnd;
uint32_t pid, uint32_t swcsm)
{
uint32_t l4, len, plen;
- struct tcp_hdr *l4h;
+ struct rte_tcp_hdr *l4h;
char *l2h;
len = dst->l2_len + dst->l3_len;
rte_memcpy(l2h, dst->hdr, len);
/* setup TCP header & options */
- l4h = (struct tcp_hdr *)(l2h + len);
+ l4h = (struct rte_tcp_hdr *)(l2h + len);
fill_tcph(l4h, &s->tcb, port, seq, l4, flags);
/* setup mbuf TX offload related fields. */
/* update proto specific fields. */
if (s->s.type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(l2h + dst->l2_len);
l3h->packet_id = rte_cpu_to_be_16(pid);
l3h->total_length = rte_cpu_to_be_16(plen + dst->l3_len + l4);
if ((ol_flags & PKT_TX_IP_CKSUM) == 0 && swcsm != 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(l2h + dst->l2_len);
l3h->payload_len = rte_cpu_to_be_16(plen + l4);
if ((ol_flags & PKT_TX_TCP_CKSUM) != 0)
l4h->cksum = rte_ipv6_phdr_cksum(l3h, ol_flags);
tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
uint32_t seq, uint32_t pid)
{
- struct tcp_hdr *l4h;
+ struct rte_tcp_hdr *l4h;
uint32_t len;
len = m->l2_len + m->l3_len;
- l4h = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, len);
+ l4h = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, len);
l4h->sent_seq = rte_cpu_to_be_32(seq);
l4h->recv_ack = rte_cpu_to_be_32(tcb->rcv.nxt);
fill_tms_opts(l4h + 1, tcb->snd.ts, tcb->rcv.ts);
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
l3h->hdr_checksum = 0;
l3h->packet_id = rte_cpu_to_be_16(pid);
if ((m->ol_flags & PKT_TX_IP_CKSUM) == 0)
l4h->cksum = 0;
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ struct rte_ipv4_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->l2_len);
l4h->cksum = _ipv4_udptcp_mbuf_cksum(m, len, l3h);
} else {
- struct ipv6_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ struct rte_ipv6_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->l2_len);
l4h->cksum = _ipv6_udptcp_mbuf_cksum(m, len, l3h);
}
struct tle_dev *dev;
const void *da;
struct tle_dest dst;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
type = s->s.type;
if (rc < 0)
return rc;
- th = rte_pktmbuf_mtod_offset(m, const struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(m, const struct rte_tcp_hdr *,
m->l2_len + m->l3_len);
get_syn_opts(&s->tcb.so, (uintptr_t)(th + 1), m->l4_len - sizeof(*th));
{
union tsopt ts;
uintptr_t opt;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
if (tcb->so.ts.val != 0) {
opt = rte_pktmbuf_mtod_offset(mb, uintptr_t,
{
int32_t rc;
uint32_t len;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
/* check that ACK, etc fields are what we expected. */
rc = sync_check_ack(pi, si->seq, si->ack - 1, ts,
si->mss = rc;
- th = rte_pktmbuf_mtod_offset(mb, const struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(mb, const struct rte_tcp_hdr *,
mb->l2_len + mb->l3_len);
len = mb->l4_len - sizeof(*th);
to[0] = get_tms_opts((uintptr_t)(th + 1), len);
struct resp_info *rsp)
{
struct syn_opts so;
- struct tcp_hdr *th;
+ struct rte_tcp_hdr *th;
if (state != TCP_ST_SYN_SENT)
return -EINVAL;
return 0;
}
- th = rte_pktmbuf_mtod_offset(mb, struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(mb, struct rte_tcp_hdr *,
mb->l2_len + mb->l3_len);
get_syn_opts(&so, (uintptr_t)(th + 1), mb->l4_len - sizeof(*th));
len = m->l2_len;
if (ret.src == TLE_V4) {
pa4 = rte_pktmbuf_mtod_offset(m, union ipv4_addrs *,
- len + offsetof(struct ipv4_hdr, src_addr));
+ len + offsetof(struct rte_ipv4_hdr, src_addr));
addr4->raw = pa4->raw;
} else if (ret.src == TLE_V6) {
*addr6 = rte_pktmbuf_mtod_offset(m, union ipv6_addrs *,
- len + offsetof(struct ipv6_hdr, src_addr));
+ len + offsetof(struct rte_ipv6_hdr, src_addr));
}
len += m->l3_len;
up = rte_pktmbuf_mtod_offset(m, union l4_ports *,
- len + offsetof(struct udp_hdr, src_port));
+ len + offsetof(struct rte_udp_hdr, src_port));
ports->raw = up->raw;
ret.dst = ports->dst;
return ret;
/* update proto specific fields. */
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(l2h + dst->l2_len);
l3h->packet_id = rte_cpu_to_be_16(pid);
l3h->total_length = rte_cpu_to_be_16(plen + dst->l3_len +
sizeof(*l4h));
if ((ol_flags & PKT_TX_IP_CKSUM) == 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(l2h + dst->l2_len);
l3h->payload_len = rte_cpu_to_be_16(plen + sizeof(*l4h));
if ((ol_flags & PKT_TX_UDP_CKSUM) != 0)
l4h->cksum = rte_ipv6_phdr_cksum(l3h, ol_flags);
static inline void
frag_fixup(const struct rte_mbuf *ms, struct rte_mbuf *mf, uint32_t type)
{
- struct ipv4_hdr *l3h;
+ struct rte_ipv4_hdr *l3h;
mf->ol_flags = ms->ol_flags;
mf->tx_offload = ms->tx_offload;
if (type == TLE_V4 && (ms->ol_flags & PKT_TX_IP_CKSUM) == 0) {
- l3h = rte_pktmbuf_mtod(mf, struct ipv4_hdr *);
+ l3h = rte_pktmbuf_mtod(mf, struct rte_ipv4_hdr *);
l3h->hdr_checksum = _ipv4x_cksum(l3h, mf->l3_len);
}
}
socket_id = rte_eth_dev_socket_id(port);
memset(&port_conf, 0, sizeof(struct rte_eth_conf));
- port_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
+ port_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MAX_LEN;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
}
int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
- dlen -= l2 + sizeof(struct udp_hdr);
+ dlen -= l2 + sizeof(struct rte_udp_hdr);
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
m->packet_type = RTE_PTYPE_UNKNOWN;
else
- fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, len, sizeof(struct rte_udp_hdr));
}
int
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
- dlen -= l2 + sizeof(struct udp_hdr);
+ dlen -= l2 + sizeof(struct rte_udp_hdr);
ofs = l2 + len;
ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
if ((ofs == 0 && nproto != fproto) || len > dlen)
m->packet_type = RTE_PTYPE_UNKNOWN;
else
- fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, len, sizeof(struct rte_udp_hdr));
}
void
fill_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- fill_pkt_hdr_len(m, l2, sizeof(struct ipv6_hdr),
- sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, sizeof(struct rte_ipv6_hdr),
+ sizeof(struct rte_udp_hdr));
else if (ipv6x_hdr(iph->proto) != 0)
fill_ipv6x_hdr_len(m, l2, iph->proto, fproto);
}
{
uint32_t dlen, l2;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 42B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct udp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2 = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2 += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2 += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv4_hdr_len(m, l2, IPPROTO_UDP, 1);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2 + sizeof(struct ipv6_hdr) +
- sizeof(struct udp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2 + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4);
int
-is_ipv4_frag(const struct ipv4_hdr *iph);
+is_ipv4_frag(const struct rte_ipv4_hdr *iph);
void
fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
lookup4_function(void *opaque, const struct in_addr *addr, struct tle_dest *res)
{
struct in_addr route;
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
auto routes = static_cast<map<string, tle_dev *> *>(opaque);
/* Check all routes added in map for a match with dest *addr */
res->l2_len = sizeof(*eth);
res->l3_len = sizeof(*ip4h);
res->head_mp = mbuf_pool;
- eth = (struct ether_hdr *)res->hdr;
- eth->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ eth = (struct rte_ether_hdr *)res->hdr;
+ eth->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = (4 << 4) |
- (sizeof(*ip4h) / IPV4_IHL_MULTIPLIER);
+ (sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER);
ip4h->time_to_live = 64;
ip4h->next_proto_id = IPPROTO_UDP;
ip4h->fragment_offset = 0;
lookup6_function(void *opaque, const struct in6_addr *addr,
struct tle_dest *res)
{
- struct ether_hdr *eth;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv6_hdr *ip6h;
struct in6_addr route;
auto routes = static_cast<map<string, tle_dev *> *>(opaque);
res->l2_len = sizeof(*eth);
res->l3_len = sizeof(*ip6h);
res->head_mp = mbuf_pool;
- eth = (struct ether_hdr *)res->hdr;
- eth->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ eth = (struct rte_ether_hdr *)res->hdr;
+ eth->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = IPPROTO_UDP;
ip6h->hop_limits = 64;