New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / qede_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_udp.h>
8 #include <rte_tcp.h>
9 #include <rte_sctp.h>
10 #include <rte_errno.h>
11
12 #include "qede_ethdev.h"
13
14 #define IP_VERSION                              (0x40)
15 #define IP_HDRLEN                               (0x5)
16 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL        (IP_VERSION | IP_HDRLEN)
17 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF           (0x50)
18 #define QEDE_FDIR_IPV4_DEF_TTL                  (64)
19
20 /* Sum of length of header types of L2, L3, L4.
21  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
22  * L3 : ipv6_hdr
23  * L4 : tcp_hdr
24  */
25 #define QEDE_MAX_FDIR_PKT_LEN                   (86)
26
27 #ifndef IPV6_ADDR_LEN
28 #define IPV6_ADDR_LEN                           (16)
29 #endif
30
31 #define QEDE_VALID_FLOW(flow_type) \
32         ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP   || \
33         (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP    || \
34         (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP    || \
35         (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
36
37 /* Note: Flowdir support is only partial.
38  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
39  * Parameters like pballoc/status fields are irrelevant here.
40  */
41 int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
42 {
43         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
44         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
45         struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
46
47         /* check FDIR modes */
48         switch (fdir->mode) {
49         case RTE_FDIR_MODE_NONE:
50                 qdev->fdir_info.arfs.arfs_enable = false;
51                 DP_INFO(edev, "flowdir is disabled\n");
52         break;
53         case RTE_FDIR_MODE_PERFECT:
54                 if (ECORE_IS_CMT(edev)) {
55                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
56                         qdev->fdir_info.arfs.arfs_enable = false;
57                         return -ENOTSUP;
58                 }
59                 qdev->fdir_info.arfs.arfs_enable = true;
60                 DP_INFO(edev, "flowdir is enabled\n");
61         break;
62         case RTE_FDIR_MODE_PERFECT_TUNNEL:
63         case RTE_FDIR_MODE_SIGNATURE:
64         case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
65                 DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
66                 return -ENOTSUP;
67         }
68
69         return 0;
70 }
71
72 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
73 {
74         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
75         struct qede_fdir_entry *tmp = NULL;
76
77         SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
78                 if (tmp) {
79                         if (tmp->mz)
80                                 rte_memzone_free(tmp->mz);
81                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
82                                      qede_fdir_entry, list);
83                         rte_free(tmp);
84                 }
85         }
86 }
87
88 static int
89 qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
90                             struct rte_eth_fdir_filter *fdir_filter,
91                             bool add)
92 {
93         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
94         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
95         char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
96         struct qede_fdir_entry *tmp = NULL;
97         struct qede_fdir_entry *fdir = NULL;
98         const struct rte_memzone *mz;
99         struct ecore_hwfn *p_hwfn;
100         enum _ecore_status_t rc;
101         uint16_t pkt_len;
102         void *pkt;
103
104         if (add) {
105                 if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
106                         DP_ERR(edev, "Reached max flowdir filter limit\n");
107                         return -EINVAL;
108                 }
109                 fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
110                                   RTE_CACHE_LINE_SIZE);
111                 if (!fdir) {
112                         DP_ERR(edev, "Did not allocate memory for fdir\n");
113                         return -ENOMEM;
114                 }
115         }
116         /* soft_id could have been used as memzone string, but soft_id is
117          * not currently used so it has no significance.
118          */
119         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
120                  (unsigned long)rte_get_timer_cycles());
121         mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
122                                          SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
123         if (!mz) {
124                 DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
125                        rte_strerror(rte_errno));
126                 rc = -rte_errno;
127                 goto err1;
128         }
129
130         pkt = mz->addr;
131         memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
132         pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
133                                           &qdev->fdir_info.arfs);
134         if (pkt_len == 0) {
135                 rc = -EINVAL;
136                 goto err2;
137         }
138         DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
139         if (add) {
140                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
141                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
142                                 DP_INFO(edev, "flowdir filter exist\n");
143                                 rc = 0;
144                                 goto err2;
145                         }
146                 }
147         } else {
148                 SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
149                         if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
150                                 break;
151                 }
152                 if (!tmp) {
153                         DP_ERR(edev, "flowdir filter does not exist\n");
154                         rc = -EEXIST;
155                         goto err2;
156                 }
157         }
158         p_hwfn = ECORE_LEADING_HWFN(edev);
159         if (add) {
160                 if (!qdev->fdir_info.arfs.arfs_enable) {
161                         /* Force update */
162                         eth_dev->data->dev_conf.fdir_conf.mode =
163                                                 RTE_FDIR_MODE_PERFECT;
164                         qdev->fdir_info.arfs.arfs_enable = true;
165                         DP_INFO(edev, "Force enable flowdir in perfect mode\n");
166                 }
167                 /* Enable ARFS searcher with updated flow_types */
168                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
169                                           &qdev->fdir_info.arfs);
170         }
171         /* configure filter with ECORE_SPQ_MODE_EBLOCK */
172         rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
173                                                (dma_addr_t)mz->iova,
174                                                pkt_len,
175                                                fdir_filter->action.rx_queue,
176                                                0, add);
177         if (rc == ECORE_SUCCESS) {
178                 if (add) {
179                         fdir->rx_queue = fdir_filter->action.rx_queue;
180                         fdir->pkt_len = pkt_len;
181                         fdir->mz = mz;
182                         SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
183                                           fdir, list);
184                         qdev->fdir_info.filter_count++;
185                         DP_INFO(edev, "flowdir filter added, count = %d\n",
186                                 qdev->fdir_info.filter_count);
187                 } else {
188                         rte_memzone_free(tmp->mz);
189                         SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
190                                      qede_fdir_entry, list);
191                         rte_free(tmp); /* the node deleted */
192                         rte_memzone_free(mz); /* temp node allocated */
193                         qdev->fdir_info.filter_count--;
194                         DP_INFO(edev, "Fdir filter deleted, count = %d\n",
195                                 qdev->fdir_info.filter_count);
196                 }
197         } else {
198                 DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
199                        rc, qdev->fdir_info.filter_count);
200         }
201
202         /* Disable ARFS searcher if there are no more filters */
203         if (qdev->fdir_info.filter_count == 0) {
204                 memset(&qdev->fdir_info.arfs, 0,
205                        sizeof(struct ecore_arfs_config_params));
206                 DP_INFO(edev, "Disabling flowdir\n");
207                 qdev->fdir_info.arfs.arfs_enable = false;
208                 ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
209                                           &qdev->fdir_info.arfs);
210         }
211         return 0;
212
213 err2:
214         rte_memzone_free(mz);
215 err1:
216         if (add)
217                 rte_free(fdir);
218         return rc;
219 }
220
221 static int
222 qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
223                      struct rte_eth_fdir_filter *fdir,
224                      bool add)
225 {
226         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
227         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
228
229         if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
230                 DP_ERR(edev, "invalid flow_type input\n");
231                 return -EINVAL;
232         }
233
234         if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
235                 DP_ERR(edev, "invalid queue number %u\n",
236                        fdir->action.rx_queue);
237                 return -EINVAL;
238         }
239
240         if (fdir->input.flow_ext.is_vf) {
241                 DP_ERR(edev, "flowdir is not supported over VF\n");
242                 return -EINVAL;
243         }
244
245         return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
246 }
247
248 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
249 uint16_t
250 qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
251                         struct rte_eth_fdir_filter *fdir,
252                         void *buff,
253                         struct ecore_arfs_config_params *params)
254
255 {
256         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
257         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
258         uint16_t *ether_type;
259         uint8_t *raw_pkt;
260         struct rte_eth_fdir_input *input;
261         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
262         struct ipv4_hdr *ip;
263         struct ipv6_hdr *ip6;
264         struct udp_hdr *udp;
265         struct tcp_hdr *tcp;
266         uint16_t len;
267         static const uint8_t next_proto[] = {
268                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
269                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
270                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
271                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
272         };
273         raw_pkt = (uint8_t *)buff;
274         input = &fdir->input;
275         DP_INFO(edev, "flow_type %d\n", input->flow_type);
276
277         len =  2 * sizeof(struct ether_addr);
278         raw_pkt += 2 * sizeof(struct ether_addr);
279         if (input->flow_ext.vlan_tci) {
280                 DP_INFO(edev, "adding VLAN header\n");
281                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
282                 rte_memcpy(raw_pkt + sizeof(uint16_t),
283                            &input->flow_ext.vlan_tci,
284                            sizeof(uint16_t));
285                 raw_pkt += sizeof(vlan_frame);
286                 len += sizeof(vlan_frame);
287         }
288         ether_type = (uint16_t *)raw_pkt;
289         raw_pkt += sizeof(uint16_t);
290         len += sizeof(uint16_t);
291
292         switch (input->flow_type) {
293         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
294         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
295                 /* fill the common ip header */
296                 ip = (struct ipv4_hdr *)raw_pkt;
297                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
298                 ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
299                 ip->total_length = sizeof(struct ipv4_hdr);
300                 ip->next_proto_id = input->flow.ip4_flow.proto ?
301                                     input->flow.ip4_flow.proto :
302                                     next_proto[input->flow_type];
303                 ip->time_to_live = input->flow.ip4_flow.ttl ?
304                                    input->flow.ip4_flow.ttl :
305                                    QEDE_FDIR_IPV4_DEF_TTL;
306                 ip->type_of_service = input->flow.ip4_flow.tos;
307                 ip->dst_addr = input->flow.ip4_flow.dst_ip;
308                 ip->src_addr = input->flow.ip4_flow.src_ip;
309                 len += sizeof(struct ipv4_hdr);
310                 params->ipv4 = true;
311
312                 raw_pkt = (uint8_t *)buff;
313                 /* UDP */
314                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
315                         udp = (struct udp_hdr *)(raw_pkt + len);
316                         udp->dst_port = input->flow.udp4_flow.dst_port;
317                         udp->src_port = input->flow.udp4_flow.src_port;
318                         udp->dgram_len = sizeof(struct udp_hdr);
319                         len += sizeof(struct udp_hdr);
320                         /* adjust ip total_length */
321                         ip->total_length += sizeof(struct udp_hdr);
322                         params->udp = true;
323                 } else { /* TCP */
324                         tcp = (struct tcp_hdr *)(raw_pkt + len);
325                         tcp->src_port = input->flow.tcp4_flow.src_port;
326                         tcp->dst_port = input->flow.tcp4_flow.dst_port;
327                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
328                         len += sizeof(struct tcp_hdr);
329                         /* adjust ip total_length */
330                         ip->total_length += sizeof(struct tcp_hdr);
331                         params->tcp = true;
332                 }
333                 break;
334         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
335         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
336                 ip6 = (struct ipv6_hdr *)raw_pkt;
337                 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
338                 ip6->proto = input->flow.ipv6_flow.proto ?
339                                         input->flow.ipv6_flow.proto :
340                                         next_proto[input->flow_type];
341                 rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
342                            IPV6_ADDR_LEN);
343                 rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
344                            IPV6_ADDR_LEN);
345                 len += sizeof(struct ipv6_hdr);
346
347                 raw_pkt = (uint8_t *)buff;
348                 /* UDP */
349                 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
350                         udp = (struct udp_hdr *)(raw_pkt + len);
351                         udp->src_port = input->flow.udp6_flow.dst_port;
352                         udp->dst_port = input->flow.udp6_flow.src_port;
353                         len += sizeof(struct udp_hdr);
354                         params->udp = true;
355                 } else { /* TCP */
356                         tcp = (struct tcp_hdr *)(raw_pkt + len);
357                         tcp->src_port = input->flow.tcp4_flow.src_port;
358                         tcp->dst_port = input->flow.tcp4_flow.dst_port;
359                         tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
360                         len += sizeof(struct tcp_hdr);
361                         params->tcp = true;
362                 }
363                 break;
364         default:
365                 DP_ERR(edev, "Unsupported flow_type %u\n",
366                        input->flow_type);
367                 return 0;
368         }
369
370         return len;
371 }
372
373 int
374 qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
375                       enum rte_filter_op filter_op,
376                       void *arg)
377 {
378         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
379         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
380         struct rte_eth_fdir_filter *fdir;
381         int ret;
382
383         fdir = (struct rte_eth_fdir_filter *)arg;
384         switch (filter_op) {
385         case RTE_ETH_FILTER_NOP:
386                 /* Typically used to query flowdir support */
387                 if (ECORE_IS_CMT(edev)) {
388                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
389                         return -ENOTSUP;
390                 }
391                 return 0; /* means supported */
392         case RTE_ETH_FILTER_ADD:
393                 ret = qede_fdir_filter_add(eth_dev, fdir, 1);
394         break;
395         case RTE_ETH_FILTER_DELETE:
396                 ret = qede_fdir_filter_add(eth_dev, fdir, 0);
397         break;
398         case RTE_ETH_FILTER_FLUSH:
399         case RTE_ETH_FILTER_UPDATE:
400         case RTE_ETH_FILTER_INFO:
401                 return -ENOTSUP;
402         break;
403         default:
404                 DP_ERR(edev, "unknown operation %u", filter_op);
405                 ret = -EINVAL;
406         }
407
408         return ret;
409 }
410
411 int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
412                             enum rte_filter_op filter_op,
413                             void *arg)
414 {
415         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
416         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
417         struct rte_eth_ntuple_filter *ntuple;
418         struct rte_eth_fdir_filter fdir_entry;
419         struct rte_eth_tcpv4_flow *tcpv4_flow;
420         struct rte_eth_udpv4_flow *udpv4_flow;
421         bool add = false;
422
423         switch (filter_op) {
424         case RTE_ETH_FILTER_NOP:
425                 /* Typically used to query fdir support */
426                 if (ECORE_IS_CMT(edev)) {
427                         DP_ERR(edev, "flowdir is not supported in 100G mode\n");
428                         return -ENOTSUP;
429                 }
430                 return 0; /* means supported */
431         case RTE_ETH_FILTER_ADD:
432                 add = true;
433         break;
434         case RTE_ETH_FILTER_DELETE:
435         break;
436         case RTE_ETH_FILTER_INFO:
437         case RTE_ETH_FILTER_GET:
438         case RTE_ETH_FILTER_UPDATE:
439         case RTE_ETH_FILTER_FLUSH:
440         case RTE_ETH_FILTER_SET:
441         case RTE_ETH_FILTER_STATS:
442         case RTE_ETH_FILTER_OP_MAX:
443                 DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
444                 return -ENOTSUP;
445         }
446         ntuple = (struct rte_eth_ntuple_filter *)arg;
447         /* Internally convert ntuple to fdir entry */
448         memset(&fdir_entry, 0, sizeof(fdir_entry));
449         if (ntuple->proto == IPPROTO_TCP) {
450                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
451                 tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
452                 tcpv4_flow->ip.src_ip = ntuple->src_ip;
453                 tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
454                 tcpv4_flow->ip.proto = IPPROTO_TCP;
455                 tcpv4_flow->src_port = ntuple->src_port;
456                 tcpv4_flow->dst_port = ntuple->dst_port;
457         } else {
458                 fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
459                 udpv4_flow = &fdir_entry.input.flow.udp4_flow;
460                 udpv4_flow->ip.src_ip = ntuple->src_ip;
461                 udpv4_flow->ip.dst_ip = ntuple->dst_ip;
462                 udpv4_flow->ip.proto = IPPROTO_TCP;
463                 udpv4_flow->src_port = ntuple->src_port;
464                 udpv4_flow->dst_port = ntuple->dst_port;
465         }
466
467         fdir_entry.action.rx_queue = ntuple->queue;
468
469         return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
470 }