X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_flow.c;h=e60ecce77ce25b70846417b99fdf99ccd1032236;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=9aeb71e41a7a2670156ed510d3767da0b542517c;hpb=bf7567fd2a5b0b28ab724046143c24561d38d015;p=deb_dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 9aeb71e4..e60ecce7 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -51,12 +51,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -78,23 +76,85 @@ #define IXGBE_MIN_N_TUPLE_PRIO 1 #define IXGBE_MAX_N_TUPLE_PRIO 7 -#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\ - do { \ - item = pattern + index;\ - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\ - index++; \ - item = pattern + index; \ - } \ - } while (0) - -#define NEXT_ITEM_OF_ACTION(act, actions, index)\ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\ - index++; \ - act = actions + index; \ - } \ - } while (0) +#define IXGBE_MAX_FLX_SOURCE_OFF 62 + +/* ntuple filter list structure */ +struct ixgbe_ntuple_filter_ele { + TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct ixgbe_ethertype_filter_ele { + TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct ixgbe_eth_syn_filter_ele { + TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct ixgbe_fdir_rule_ele { + TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; + struct ixgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct ixgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; + struct rte_eth_l2_tunnel_conf filter_info; +}; +/* ixgbe_flow memory list structure */ +struct ixgbe_flow_mem { + TAILQ_ENTRY(ixgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); +TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); +TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); +TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); +TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); + +static struct ixgbe_ntuple_filter_list filter_ntuple_list; +static struct ixgbe_ethertype_filter_list filter_ethertype_list; +static struct ixgbe_syn_filter_list filter_syn_list; +static struct ixgbe_fdir_rule_filter_list filter_fdir_list; +static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_flow_mem_list ixgbe_flow_list; + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline +const struct rte_flow_item *next_no_void_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline +const struct rte_flow_action *next_no_void_action( + const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} /** * Please aware there's an asumption for all the parsers. @@ -126,6 +186,9 @@ * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -144,7 +207,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; - uint32_t index; if (!pattern) { rte_flow_error_set(error, @@ -166,11 +228,45 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; +#ifdef RTE_LIBRTE_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif /* the first not void item can be MAC or IPv4 */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4) { @@ -198,8 +294,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } /* check if the next not void item is IPv4 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -252,11 +347,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->proto = ipv4_spec->hdr.next_proto_id; /* check if the next not void item is TCP or UDP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && - item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -265,7 +360,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* get the TCP/UDP info */ - if (!item->spec || !item->mask) { + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -345,7 +441,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, udp_spec = (const struct rte_flow_item_udp *)item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; - } else { + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { sctp_mask = (const struct rte_flow_item_sctp *)item->mask; /** @@ -368,11 +464,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, sctp_spec = (const struct rte_flow_item_sctp *)item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -381,14 +478,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; +action: /** * n-tuple only supports forwarding, * check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -400,8 +496,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, ((const struct rte_flow_action_queue *)act->conf)->index; /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -463,6 +558,12 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* Ixgbe doesn't support tcp flags. */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -482,9 +583,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) + if (filter->queue >= dev->data->nb_rx_queues) return -rte_errno; /* fixed value for ixgbe */ @@ -520,7 +619,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -543,15 +641,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* Parse pattern */ - index = 0; - + item = next_no_void_pattern(pattern, NULL); /* The first non-void item should be MAC. */ - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -610,12 +701,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, filter->ether_type = rte_be_to_cpu_16(eth_spec->type); /* Check if the next non-void item is END. */ - index++; - item = pattern + index; - while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { - index++; - item = pattern + index; - } + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -625,13 +711,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* Parse action */ - index = 0; - /* Check if the first non-void action is QUEUE or DROP. */ - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { rte_flow_error_set(error, EINVAL, @@ -648,12 +728,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* Check if the next non-void item is END */ - index++; - act = actions + index; - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { - index++; - act = actions + index; - } + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -725,7 +800,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + if (filter->queue >= dev->data->nb_rx_queues) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -793,7 +868,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_action_queue *act_q; - uint32_t index; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -816,11 +890,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse pattern */ - index = 0; /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -849,8 +921,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is IPv4 or IPv6 */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { rte_flow_error_set(error, EINVAL, @@ -872,8 +943,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is TCP */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -917,8 +987,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -927,11 +996,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -951,8 +1017,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_syn_filter)); rte_flow_error_set(error, EINVAL, @@ -1012,6 +1077,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, ret = cons_parse_syn_filter(attr, pattern, actions, filter, error); + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + if (ret) return ret; @@ -1026,7 +1094,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * The first not void item can be E_TAG. * The next not void item must be END. * action: - * The first not void action should be QUEUE. + * The first not void action should be VF or PF. * The next not void action should be END. * pattern example: * ITEM Spec Mask @@ -1037,7 +1105,8 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * item->last should be NULL. */ static int -cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, +cons_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_l2_tunnel_conf *filter, @@ -1047,8 +1116,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_e_tag *e_tag_spec; const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; - const struct rte_flow_action_queue *act_q; - uint32_t index; + const struct rte_flow_action_vf *act_vf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1070,11 +1139,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, NULL, "NULL attribute."); return -rte_errno; } - /* parse pattern */ - index = 0; /* The first not void item should be e-tag. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1121,8 +1188,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1159,12 +1225,10 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - - /* check if the first not void action is QUEUE. */ - NEXT_ITEM_OF_ACTION(act, actions, index); - if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + /* check if the first not void action is VF or PF. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_VF && + act->type != RTE_FLOW_ACTION_TYPE_PF) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1172,12 +1236,15 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - act_q = (const struct rte_flow_action_queue *)act->conf; - filter->pool = act_q->index; + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->pool = act_vf->id; + } else { + filter->pool = pci_dev->max_vfs; + } /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, @@ -1199,8 +1266,10 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num; - ret = cons_parse_l2_tn_filter(attr, pattern, + ret = cons_parse_l2_tn_filter(dev, attr, pattern, actions, l2_tn_filter, error); if (hw->mac.type != ixgbe_mac_X550 && @@ -1213,6 +1282,11 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + vf_num = pci_dev->max_vfs; + + if (l2_tn_filter->pool > vf_num) + return -rte_errno; + return ret; } @@ -1226,7 +1300,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_mark *mark; - uint32_t index; /* parse attr */ /* must be input direction */ @@ -1256,11 +1329,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } - /* parse action */ - index = 0; - /* check if the first not void action is QUEUE or DROP. */ - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && act->type != RTE_FLOW_ACTION_TYPE_DROP) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1274,12 +1344,19 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; rule->queue = act_q->index; } else { /* drop */ + /* signature mode does not support drop action. */ + if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } rule->fdirflags = IXGBE_FDIRCMD_DROP; } /* check if the next not void item is MARK */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) && (act->type != RTE_FLOW_ACTION_TYPE_END)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1294,8 +1371,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, if (act->type == RTE_FLOW_ACTION_TYPE_MARK) { mark = (const struct rte_flow_action_mark *)act->conf; rule->soft_id = mark->id; - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); + act = next_no_void_action(actions, act); } /* check if the next not void item is END */ @@ -1310,14 +1386,78 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = + (const struct rte_flow_item_fuzzy *)item->spec; + last = + (const struct rte_flow_item_fuzzy *)item->last; + mask = + (const struct rte_flow_item_fuzzy *)item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4. - * The second not void item must be IPV4 if the first one is ETH. - * The third not void item must be UDP or TCP or SCTP. + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1334,6 +1474,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * dst_addr 192.167.3.50 0xFFFFFFFF * UDP/TCP/SCTP src_port 80 0xFFFF * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF * END * MAC VLAN pattern example: * ITEM Spec Mask @@ -1346,7 +1494,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * Item->last should be NULL. */ static int -ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ixgbe_fdir_rule *rule, @@ -1357,6 +1506,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; @@ -1365,8 +1516,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_sctp *sctp_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; + uint8_t j; - uint32_t index, j; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1396,17 +1550,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - - /* parse pattern */ - index = 0; + rule->mask.flex_bytes_mask = 0; /** * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { @@ -1417,7 +1570,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1454,14 +1610,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1469,6 +1624,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1497,8 +1655,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1508,7 +1665,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } } else { - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1544,18 +1702,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); /* More than one tags are not supported. */ - /** - * Check if the next not void item is not vlan. - */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); - if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by fdir filter"); - return -rte_errno; - } else if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* Next not void item must be END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1564,7 +1713,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } } - /* Get the IP info. */ + /* Get the IPV4 info. */ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { /** * Set the flow type even if there's no content @@ -1624,12 +1773,104 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && - item->type != RTE_FLOW_ITEM_TYPE_END) { + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV6; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = + (const struct rte_flow_item_ipv6 *)item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = + (const struct rte_flow_item_ipv6 *)item->spec; + rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1644,8 +1885,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_TCPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_TCP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1690,6 +1931,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = tcp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the UDP info */ @@ -1698,8 +1950,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_UDPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_UDP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1739,6 +1991,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, rule->ixgbe_fdir.formatted.dst_port = udp_spec->hdr.dst_port; } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } /* Get the SCTP info */ @@ -1747,8 +2010,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_SCTPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_SCTP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1756,46 +2019,147 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, item, "Not supported last point for range"); return -rte_errno; } - /** - * Only care about src & dst ports, - * others should be masked. - */ - if (!item->mask) { + + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = + (const struct rte_flow_item_sctp *)item->spec; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = + (const struct rte_flow_item_sctp *)item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; - if (sctp_mask->hdr.tag || - sctp_mask->hdr.cksum) { + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by fdir filter"); return -rte_errno; } - rule->mask.src_port_mask = sctp_mask->hdr.src_port; - rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; - if (item->spec) { - rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - rule->ixgbe_fdir.formatted.src_port = - sctp_spec->hdr.src_port; - rule->ixgbe_fdir.formatted.dst_port = - sctp_spec->hdr.dst_port; + raw_mask = (const struct rte_flow_item_raw *)item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; } + + raw_spec = (const struct rte_flow_item_raw *)item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mask.flex_bytes_mask = 0xffff; + rule->ixgbe_fdir.formatted.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; } if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1863,7 +2227,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; - uint32_t index, j; + uint32_t j; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1894,14 +2258,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); rule->mask.vlan_tci_mask = 0; - /* parse pattern */ - index = 0; - /** * The first not void item should be * MAC or IPv4 or IPv6 or UDP or VxLAN. */ - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6 && @@ -1927,7 +2288,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - /*Not supported last point for range*/ + /* Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -1936,8 +2297,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is IPv4 or IPv6. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_IPV6) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1968,8 +2328,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is UDP or NVGRE. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1999,8 +2358,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, } /* Check if the next not void item is VxLAN. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2013,7 +2371,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Get the VxLAN info */ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_VXLAN; + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; /* Only care about VNI, others should be masked. */ if (!item->mask) { @@ -2065,17 +2423,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, vxlan_spec = (const struct rte_flow_item_vxlan *) item->spec; rte_memcpy(((uint8_t *) - &rule->ixgbe_fdir.formatted.tni_vni + 1), + &rule->ixgbe_fdir.formatted.tni_vni), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); - rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32( - rule->ixgbe_fdir.formatted.tni_vni); } } /* Get the NVGRE info */ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { rule->ixgbe_fdir.formatted.tunnel_type = - RTE_FDIR_TUNNEL_TYPE_NVGRE; + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; /** * Only care about flags0, flags1, protocol and TNI, @@ -2109,8 +2465,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } - if (nvgre_mask->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x3000) || + if (nvgre_mask->protocol && nvgre_mask->protocol != 0xFFFF) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2118,6 +2473,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, item, "Not supported by fdir filter"); return -rte_errno; } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } /* TNI must be totally masked or not. */ if (nvgre_mask->tni[0] && ((nvgre_mask->tni[0] != 0xFF) || @@ -2139,7 +2503,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, nvgre_spec = (const struct rte_flow_item_nvgre *)item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != - rte_cpu_to_be_16(0x2000) || + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->protocol && nvgre_spec->protocol != rte_cpu_to_be_16(NVGRE_PROTOCOL)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2151,13 +2523,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* tni is a 24-bits bit field */ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); - rule->ixgbe_fdir.formatted.tni_vni <<= 8; } } /* check if the next not void item is MAC */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2240,8 +2610,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) && (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2277,8 +2646,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* More than one tags are not supported. */ /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_PATTERN(item, pattern, index); + item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2316,7 +2684,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, hw->mac.type != ixgbe_mac_X550EM_a) return -ENOTSUP; - ret = ixgbe_parse_fdir_filter_normal(attr, pattern, + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, actions, rule, error); if (!ret) @@ -2325,13 +2693,38 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, actions, rule, error); + if (ret) + return ret; + step_next: + + if (hw->mac.type == ixgbe_mac_82599EB && + rule->fdirflags == IXGBE_FDIRCMD_DROP && + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) + return -ENOTSUP; + if (fdir_mode == RTE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + return ret; } +void +ixgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&ixgbe_flow_list); +} + void ixgbe_filterlist_flush(void) { @@ -2414,6 +2807,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; struct ixgbe_fdir_rule_ele *fdir_rule_ptr; struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -2434,12 +2828,23 @@ ixgbe_flow_create(struct rte_eth_dev *dev, memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + if (!ret) { ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); if (!ret) { ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", sizeof(struct ixgbe_ntuple_filter_ele), 0); - (void)rte_memcpy(&ntuple_filter_ptr->filter_info, + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); TAILQ_INSERT_TAIL(&filter_ntuple_list, @@ -2461,7 +2866,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "ixgbe_ethertype_filter", sizeof(struct ixgbe_ethertype_filter_ele), 0); - (void)rte_memcpy(ðertype_filter_ptr->filter_info, + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); TAILQ_INSERT_TAIL(&filter_ethertype_list, @@ -2481,7 +2890,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", sizeof(struct ixgbe_eth_syn_filter_ele), 0); - (void)rte_memcpy(&syn_filter_ptr->filter_info, + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); TAILQ_INSERT_TAIL(&filter_syn_list, @@ -2505,11 +2918,19 @@ ixgbe_flow_create(struct rte_eth_dev *dev, rte_memcpy(&fdir_info->mask, &fdir_rule.mask, sizeof(struct ixgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + ret = ixgbe_fdir_set_input_mask(dev); if (ret) goto out; fdir_info->mask_added = TRUE; + first_mask = TRUE; } else { /** * Only support one global mask, @@ -2520,6 +2941,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev, sizeof(struct ixgbe_hw_fdir_mask)); if (ret) goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; } } @@ -2529,7 +2954,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", sizeof(struct ixgbe_fdir_rule_ele), 0); - (void)rte_memcpy(&fdir_rule_ptr->filter_info, + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, sizeof(struct ixgbe_fdir_rule)); TAILQ_INSERT_TAIL(&filter_fdir_list, @@ -2540,8 +2969,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev, return flow; } - if (ret) + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; goto out; + } } goto out; @@ -2555,7 +2991,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); - (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, sizeof(struct rte_eth_l2_tunnel_conf)); TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, @@ -2583,7 +3023,7 @@ out: * the HW. Because there can be no enough room for the rule. */ static int -ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev, +ixgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -2654,7 +3094,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_NTUPLE: ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&ntuple_filter, + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); @@ -2667,7 +3107,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(ðertype_filter, + rte_memcpy(ðertype_filter, ðertype_filter_ptr->filter_info, sizeof(struct rte_eth_ethertype_filter)); ret = ixgbe_add_del_ethertype_filter(dev, @@ -2681,7 +3121,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_SYN: syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&syn_filter, + rte_memcpy(&syn_filter, &syn_filter_ptr->filter_info, sizeof(struct rte_eth_syn_filter)); ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); @@ -2693,7 +3133,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; - (void)rte_memcpy(&fdir_rule, + rte_memcpy(&fdir_rule, &fdir_rule_ptr->filter_info, sizeof(struct ixgbe_fdir_rule)); ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); @@ -2708,7 +3148,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_L2_TUNNEL: l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) pmd_flow->rule; - (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); if (!ret) { @@ -2774,9 +3214,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev, } const struct rte_flow_ops ixgbe_flow_ops = { - ixgbe_flow_validate, - ixgbe_flow_create, - ixgbe_flow_destroy, - ixgbe_flow_flush, - NULL, + .validate = ixgbe_flow_validate, + .create = ixgbe_flow_create, + .destroy = ixgbe_flow_destroy, + .flush = ixgbe_flow_flush, };