X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=1adf1b803c6458fa7b2ef3c87a82aa8925eec809;hb=refs%2Ftags%2Fupstream%2F18.08;hp=dcbfb38b3050809f9f7e0008115e40c6a50ecd04;hpb=ca33590b6af032bff57d9cc70455660466a654b2;p=deb_dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index dcbfb38b..1adf1b80 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; } else { @@ -557,6 +557,15 @@ action: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -786,6 +795,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; - e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + e_tag_spec = item->spec; + e_tag_mask = item->mask; /* Only care about GRP and E cid base. */ if (e_tag_mask->epcp_edei_in_ecid_b || @@ -1249,6 +1275,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -1353,6 +1388,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) break; if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { - spec = - (const struct rte_flow_item_fuzzy *)item->spec; - last = - (const struct rte_flow_item_fuzzy *)item->last; - mask = - (const struct rte_flow_item_fuzzy *)item->mask; + spec = item->spec; + last = item->last; + mask = item->mask; if (!spec || !mask) return 0; @@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->mask) { rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type || @@ -1698,7 +1739,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } } else { - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1725,8 +1767,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -1772,8 +1814,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || ipv4_mask->hdr.total_length || @@ -1793,8 +1834,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; rule->ixgbe_fdir.formatted.dst_ip[0] = ipv4_spec->hdr.dst_addr; rule->ixgbe_fdir.formatted.src_ip[0] = @@ -1844,8 +1884,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } rule->b_mask = TRUE; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_mask = item->mask; if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.proto || @@ -1885,8 +1924,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_spec = item->spec; rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, @@ -1938,7 +1976,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || @@ -1957,7 +1995,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = tcp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2003,7 +2041,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2017,7 +2055,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = udp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2068,8 +2106,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2083,8 +2120,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = sctp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2092,8 +2128,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } /* others even sctp port is not supported */ } else { - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask && (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port || @@ -2136,7 +2171,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_mask = item->mask; /* check mask */ if (raw_mask->relative != 0x1 || @@ -2152,7 +2187,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_spec = item->spec; /* check spec */ if (raw_spec->relative != 0 || @@ -2403,7 +2438,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Get the VxLAN info */ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_VXLAN; + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; /* Only care about VNI, others should be masked. */ if (!item->mask) { @@ -2425,8 +2460,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_mask = item->mask; if (vxlan_mask->flags) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2452,20 +2486,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - vxlan_spec = (const struct rte_flow_item_vxlan *) - item->spec; + vxlan_spec = item->spec; rte_memcpy(((uint8_t *) - &rule->ixgbe_fdir.formatted.tni_vni + 1), + &rule->ixgbe_fdir.formatted.tni_vni), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( - rule->ixgbe_fdir.formatted.tni_vni); } } /* Get the NVGRE info */ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_NVGRE; + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; /** * Only care about flags0, flags1, protocol and TNI, @@ -2490,8 +2521,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_mask = item->mask; if (nvgre_mask->flow_id) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2534,8 +2564,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; + nvgre_spec = item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != rte_cpu_to_be_16(0x2000) && nvgre_mask->c_k_s_rsvd0_ver) { @@ -2557,7 +2586,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* tni is a 24-bits bit field */ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; } } @@ -2591,7 +2619,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type) { @@ -2632,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -2671,8 +2699,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -2775,7 +2803,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -2783,7 +2811,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -2792,14 +2820,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL; - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (ixgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ act = next_no_void_action(actions, act); @@ -2830,6 +2871,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -2848,7 +2898,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); } @@ -3167,9 +3217,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct ixgbe_rte_flow_rss_conf)); + ixgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr;