2 * Copyright (c) 2017 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include <rte_errno.h>
14 #include "qede_ethdev.h"
16 #define IP_VERSION (0x40)
17 #define IP_HDRLEN (0x5)
18 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
19 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
20 #define QEDE_FDIR_IPV4_DEF_TTL (64)
21 #define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
23 /* Sum of length of header types of L2, L3, L4.
24 * L2 : ether_hdr + vlan_hdr + vxlan_hdr
28 #define QEDE_MAX_FDIR_PKT_LEN (86)
31 #define IPV6_ADDR_LEN (16)
34 #define QEDE_VALID_FLOW(flow_type) \
35 ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
36 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
37 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
38 (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
40 /* Note: Flowdir support is only partial.
41 * For ex: drop_queue, FDIR masks, flex_conf are not supported.
42 * Parameters like pballoc/status fields are irrelevant here.
44 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
46 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
47 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
48 struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
50 /* check FDIR modes */
52 case RTE_FDIR_MODE_NONE:
53 qdev->fdir_info.arfs.arfs_enable = false;
54 DP_INFO(edev, "flowdir is disabled\n");
56 case RTE_FDIR_MODE_PERFECT:
57 if (ECORE_IS_CMT(edev)) {
58 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
59 qdev->fdir_info.arfs.arfs_enable = false;
62 qdev->fdir_info.arfs.arfs_enable = true;
63 DP_INFO(edev, "flowdir is enabled\n");
65 case RTE_FDIR_MODE_PERFECT_TUNNEL:
66 case RTE_FDIR_MODE_SIGNATURE:
67 case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
68 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
75 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
77 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
78 struct qede_fdir_entry *tmp = NULL;
80 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
83 rte_memzone_free(tmp->mz);
84 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
85 qede_fdir_entry, list);
92 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
93 struct rte_eth_fdir_filter *fdir_filter,
96 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
97 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
98 char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
99 struct qede_fdir_entry *tmp = NULL;
100 struct qede_fdir_entry *fdir = NULL;
101 const struct rte_memzone *mz;
102 struct ecore_hwfn *p_hwfn;
103 enum _ecore_status_t rc;
108 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
109 DP_ERR(edev, "Reached max flowdir filter limit\n");
112 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
113 RTE_CACHE_LINE_SIZE);
115 DP_ERR(edev, "Did not allocate memory for fdir\n");
119 /* soft_id could have been used as memzone string, but soft_id is
120 * not currently used so it has no significance.
122 snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
123 (unsigned long)rte_get_timer_cycles());
124 mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
125 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
127 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
128 rte_strerror(rte_errno));
134 memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
135 pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
136 &qdev->fdir_info.arfs);
141 DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
143 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
144 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
145 DP_INFO(edev, "flowdir filter exist\n");
151 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
152 if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
156 DP_ERR(edev, "flowdir filter does not exist\n");
161 p_hwfn = ECORE_LEADING_HWFN(edev);
163 if (!qdev->fdir_info.arfs.arfs_enable) {
165 eth_dev->data->dev_conf.fdir_conf.mode =
166 RTE_FDIR_MODE_PERFECT;
167 qdev->fdir_info.arfs.arfs_enable = true;
168 DP_INFO(edev, "Force enable flowdir in perfect mode\n");
170 /* Enable ARFS searcher with updated flow_types */
171 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
172 &qdev->fdir_info.arfs);
174 /* configure filter with ECORE_SPQ_MODE_EBLOCK */
175 rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
176 (dma_addr_t)mz->iova,
178 fdir_filter->action.rx_queue,
180 if (rc == ECORE_SUCCESS) {
182 fdir->rx_queue = fdir_filter->action.rx_queue;
183 fdir->pkt_len = pkt_len;
185 SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
187 qdev->fdir_info.filter_count++;
188 DP_INFO(edev, "flowdir filter added, count = %d\n",
189 qdev->fdir_info.filter_count);
191 rte_memzone_free(tmp->mz);
192 SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
193 qede_fdir_entry, list);
194 rte_free(tmp); /* the node deleted */
195 rte_memzone_free(mz); /* temp node allocated */
196 qdev->fdir_info.filter_count--;
197 DP_INFO(edev, "Fdir filter deleted, count = %d\n",
198 qdev->fdir_info.filter_count);
201 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
202 rc, qdev->fdir_info.filter_count);
205 /* Disable ARFS searcher if there are no more filters */
206 if (qdev->fdir_info.filter_count == 0) {
207 memset(&qdev->fdir_info.arfs, 0,
208 sizeof(struct ecore_arfs_config_params));
209 DP_INFO(edev, "Disabling flowdir\n");
210 qdev->fdir_info.arfs.arfs_enable = false;
211 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
212 &qdev->fdir_info.arfs);
217 rte_memzone_free(mz);
225 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
226 struct rte_eth_fdir_filter *fdir,
229 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
230 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
232 if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
233 DP_ERR(edev, "invalid flow_type input\n");
237 if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
238 DP_ERR(edev, "invalid queue number %u\n",
239 fdir->action.rx_queue);
243 if (fdir->input.flow_ext.is_vf) {
244 DP_ERR(edev, "flowdir is not supported over VF\n");
248 return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
251 /* Fills the L3/L4 headers and returns the actual length of flowdir packet */
253 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
254 struct rte_eth_fdir_filter *fdir,
256 struct ecore_arfs_config_params *params)
259 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
260 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
261 uint16_t *ether_type;
263 struct rte_eth_fdir_input *input;
264 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
266 struct ipv6_hdr *ip6;
270 static const uint8_t next_proto[] = {
271 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
272 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
273 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
274 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
276 raw_pkt = (uint8_t *)buff;
277 input = &fdir->input;
278 DP_INFO(edev, "flow_type %d\n", input->flow_type);
280 len = 2 * sizeof(struct ether_addr);
281 raw_pkt += 2 * sizeof(struct ether_addr);
282 if (input->flow_ext.vlan_tci) {
283 DP_INFO(edev, "adding VLAN header\n");
284 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
285 rte_memcpy(raw_pkt + sizeof(uint16_t),
286 &input->flow_ext.vlan_tci,
288 raw_pkt += sizeof(vlan_frame);
289 len += sizeof(vlan_frame);
291 ether_type = (uint16_t *)raw_pkt;
292 raw_pkt += sizeof(uint16_t);
293 len += sizeof(uint16_t);
295 switch (input->flow_type) {
296 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
297 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
298 /* fill the common ip header */
299 ip = (struct ipv4_hdr *)raw_pkt;
300 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
301 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
302 ip->total_length = sizeof(struct ipv4_hdr);
303 ip->next_proto_id = input->flow.ip4_flow.proto ?
304 input->flow.ip4_flow.proto :
305 next_proto[input->flow_type];
306 ip->time_to_live = input->flow.ip4_flow.ttl ?
307 input->flow.ip4_flow.ttl :
308 QEDE_FDIR_IPV4_DEF_TTL;
309 ip->type_of_service = input->flow.ip4_flow.tos;
310 ip->dst_addr = input->flow.ip4_flow.dst_ip;
311 ip->src_addr = input->flow.ip4_flow.src_ip;
312 len += sizeof(struct ipv4_hdr);
315 raw_pkt = (uint8_t *)buff;
317 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
318 udp = (struct udp_hdr *)(raw_pkt + len);
319 udp->dst_port = input->flow.udp4_flow.dst_port;
320 udp->src_port = input->flow.udp4_flow.src_port;
321 udp->dgram_len = sizeof(struct udp_hdr);
322 len += sizeof(struct udp_hdr);
323 /* adjust ip total_length */
324 ip->total_length += sizeof(struct udp_hdr);
327 tcp = (struct tcp_hdr *)(raw_pkt + len);
328 tcp->src_port = input->flow.tcp4_flow.src_port;
329 tcp->dst_port = input->flow.tcp4_flow.dst_port;
330 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
331 len += sizeof(struct tcp_hdr);
332 /* adjust ip total_length */
333 ip->total_length += sizeof(struct tcp_hdr);
337 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
338 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
339 ip6 = (struct ipv6_hdr *)raw_pkt;
340 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
341 ip6->proto = input->flow.ipv6_flow.proto ?
342 input->flow.ipv6_flow.proto :
343 next_proto[input->flow_type];
345 rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
346 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
348 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
350 len += sizeof(struct ipv6_hdr);
353 raw_pkt = (uint8_t *)buff;
355 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
356 udp = (struct udp_hdr *)(raw_pkt + len);
357 udp->src_port = input->flow.udp6_flow.src_port;
358 udp->dst_port = input->flow.udp6_flow.dst_port;
359 len += sizeof(struct udp_hdr);
362 tcp = (struct tcp_hdr *)(raw_pkt + len);
363 tcp->src_port = input->flow.tcp4_flow.src_port;
364 tcp->dst_port = input->flow.tcp4_flow.dst_port;
365 tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
366 len += sizeof(struct tcp_hdr);
371 DP_ERR(edev, "Unsupported flow_type %u\n",
380 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
381 enum rte_filter_op filter_op,
384 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
385 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
386 struct rte_eth_fdir_filter *fdir;
389 fdir = (struct rte_eth_fdir_filter *)arg;
391 case RTE_ETH_FILTER_NOP:
392 /* Typically used to query flowdir support */
393 if (ECORE_IS_CMT(edev)) {
394 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
397 return 0; /* means supported */
398 case RTE_ETH_FILTER_ADD:
399 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
401 case RTE_ETH_FILTER_DELETE:
402 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
404 case RTE_ETH_FILTER_FLUSH:
405 case RTE_ETH_FILTER_UPDATE:
406 case RTE_ETH_FILTER_INFO:
410 DP_ERR(edev, "unknown operation %u", filter_op);
417 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
418 enum rte_filter_op filter_op,
421 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
422 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
423 struct rte_eth_ntuple_filter *ntuple;
424 struct rte_eth_fdir_filter fdir_entry;
425 struct rte_eth_tcpv4_flow *tcpv4_flow;
426 struct rte_eth_udpv4_flow *udpv4_flow;
430 case RTE_ETH_FILTER_NOP:
431 /* Typically used to query fdir support */
432 if (ECORE_IS_CMT(edev)) {
433 DP_ERR(edev, "flowdir is not supported in 100G mode\n");
436 return 0; /* means supported */
437 case RTE_ETH_FILTER_ADD:
440 case RTE_ETH_FILTER_DELETE:
442 case RTE_ETH_FILTER_INFO:
443 case RTE_ETH_FILTER_GET:
444 case RTE_ETH_FILTER_UPDATE:
445 case RTE_ETH_FILTER_FLUSH:
446 case RTE_ETH_FILTER_SET:
447 case RTE_ETH_FILTER_STATS:
448 case RTE_ETH_FILTER_OP_MAX:
449 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
452 ntuple = (struct rte_eth_ntuple_filter *)arg;
453 /* Internally convert ntuple to fdir entry */
454 memset(&fdir_entry, 0, sizeof(fdir_entry));
455 if (ntuple->proto == IPPROTO_TCP) {
456 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
457 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
458 tcpv4_flow->ip.src_ip = ntuple->src_ip;
459 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
460 tcpv4_flow->ip.proto = IPPROTO_TCP;
461 tcpv4_flow->src_port = ntuple->src_port;
462 tcpv4_flow->dst_port = ntuple->dst_port;
464 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
465 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
466 udpv4_flow->ip.src_ip = ntuple->src_ip;
467 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
468 udpv4_flow->ip.proto = IPPROTO_TCP;
469 udpv4_flow->src_port = ntuple->src_port;
470 udpv4_flow->dst_port = ntuple->dst_port;
473 fdir_entry.action.rx_queue = ntuple->queue;
475 return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);