New upstream version 18.11.2
[deb_dpdk.git] / drivers / net / enic / enic_clsf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
8 #include <rte_hash.h>
9 #include <rte_byteorder.h>
10 #include <rte_ip.h>
11 #include <rte_tcp.h>
12 #include <rte_udp.h>
13 #include <rte_sctp.h>
14 #include <rte_eth_ctrl.h>
15
16 #include "enic_compat.h"
17 #include "enic.h"
18 #include "wq_enet_desc.h"
19 #include "rq_enet_desc.h"
20 #include "cq_enet_desc.h"
21 #include "vnic_enet.h"
22 #include "vnic_dev.h"
23 #include "vnic_wq.h"
24 #include "vnic_rq.h"
25 #include "vnic_cq.h"
26 #include "vnic_intr.h"
27 #include "vnic_nic.h"
28
29 #ifdef RTE_ARCH_X86
30 #include <rte_hash_crc.h>
31 #define DEFAULT_HASH_FUNC       rte_hash_crc
32 #else
33 #include <rte_jhash.h>
34 #define DEFAULT_HASH_FUNC       rte_jhash
35 #endif
36
37 #define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX
38
39 static void copy_fltr_v1(struct filter_v2 *fltr,
40                 const struct rte_eth_fdir_input *input,
41                 const struct rte_eth_fdir_masks *masks);
42 static void copy_fltr_v2(struct filter_v2 *fltr,
43                 const struct rte_eth_fdir_input *input,
44                 const struct rte_eth_fdir_masks *masks);
45
46 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
47 {
48         *stats = enic->fdir.stats;
49 }
50
51 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
52 {
53         info->mode = (enum rte_fdir_mode)enic->fdir.modes;
54         info->flow_types_mask[0] = enic->fdir.types_mask;
55 }
56
57 void enic_fdir_info(struct enic *enic)
58 {
59         enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
60         enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
61                                  1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
62         if (enic->adv_filters) {
63                 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
64                                          1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
65                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
66                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
67                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
68                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
69                 enic->fdir.copy_fltr_fn = copy_fltr_v2;
70         } else {
71                 enic->fdir.copy_fltr_fn = copy_fltr_v1;
72         }
73 }
74
75 static void
76 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
77                enum filter_generic_1_layer layer, void *mask, void *val,
78                unsigned int len)
79 {
80         gp->mask_flags |= flag;
81         gp->val_flags |= gp->mask_flags;
82         memcpy(gp->layer[layer].mask, mask, len);
83         memcpy(gp->layer[layer].val, val, len);
84 }
85
86 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
87  * without advanced filter support.
88  */
89 static void
90 copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
91              __rte_unused const struct rte_eth_fdir_masks *masks)
92 {
93         fltr->type = FILTER_IPV4_5TUPLE;
94         fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
95                 input->flow.ip4_flow.src_ip);
96         fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
97                 input->flow.ip4_flow.dst_ip);
98         fltr->u.ipv4.src_port = rte_be_to_cpu_16(
99                 input->flow.udp4_flow.src_port);
100         fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
101                 input->flow.udp4_flow.dst_port);
102
103         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
104                 fltr->u.ipv4.protocol = PROTO_TCP;
105         else
106                 fltr->u.ipv4.protocol = PROTO_UDP;
107
108         fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
109 }
110
111 /* Copy Flow Director filter to a VIC generic filter (requires advanced
112  * filter support.
113  */
114 static void
115 copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
116              const struct rte_eth_fdir_masks *masks)
117 {
118         struct filter_generic_1 *gp = &fltr->u.generic_1;
119
120         fltr->type = FILTER_DPDK_1;
121         memset(gp, 0, sizeof(*gp));
122
123         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
124                 struct udp_hdr udp_mask, udp_val;
125                 memset(&udp_mask, 0, sizeof(udp_mask));
126                 memset(&udp_val, 0, sizeof(udp_val));
127
128                 if (input->flow.udp4_flow.src_port) {
129                         udp_mask.src_port = masks->src_port_mask;
130                         udp_val.src_port = input->flow.udp4_flow.src_port;
131                 }
132                 if (input->flow.udp4_flow.dst_port) {
133                         udp_mask.dst_port = masks->dst_port_mask;
134                         udp_val.dst_port = input->flow.udp4_flow.dst_port;
135                 }
136
137                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
138                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
139         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
140                 struct tcp_hdr tcp_mask, tcp_val;
141                 memset(&tcp_mask, 0, sizeof(tcp_mask));
142                 memset(&tcp_val, 0, sizeof(tcp_val));
143
144                 if (input->flow.tcp4_flow.src_port) {
145                         tcp_mask.src_port = masks->src_port_mask;
146                         tcp_val.src_port = input->flow.tcp4_flow.src_port;
147                 }
148                 if (input->flow.tcp4_flow.dst_port) {
149                         tcp_mask.dst_port = masks->dst_port_mask;
150                         tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
151                 }
152
153                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
154                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
155         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
156                 struct sctp_hdr sctp_mask, sctp_val;
157                 memset(&sctp_mask, 0, sizeof(sctp_mask));
158                 memset(&sctp_val, 0, sizeof(sctp_val));
159
160                 if (input->flow.sctp4_flow.src_port) {
161                         sctp_mask.src_port = masks->src_port_mask;
162                         sctp_val.src_port = input->flow.sctp4_flow.src_port;
163                 }
164                 if (input->flow.sctp4_flow.dst_port) {
165                         sctp_mask.dst_port = masks->dst_port_mask;
166                         sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
167                 }
168                 if (input->flow.sctp4_flow.verify_tag) {
169                         sctp_mask.tag = 0xffffffff;
170                         sctp_val.tag = input->flow.sctp4_flow.verify_tag;
171                 }
172
173                 /*
174                  * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
175                  * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
176                  * manually set proto_id=sctp below.
177                  */
178                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
179                                &sctp_val, sizeof(struct sctp_hdr));
180         }
181
182         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
183             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
184             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
185             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
186                 struct ipv4_hdr ip4_mask, ip4_val;
187                 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
188                 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
189
190                 if (input->flow.ip4_flow.tos) {
191                         ip4_mask.type_of_service = masks->ipv4_mask.tos;
192                         ip4_val.type_of_service = input->flow.ip4_flow.tos;
193                 }
194                 if (input->flow.ip4_flow.ttl) {
195                         ip4_mask.time_to_live = masks->ipv4_mask.ttl;
196                         ip4_val.time_to_live = input->flow.ip4_flow.ttl;
197                 }
198                 if (input->flow.ip4_flow.proto) {
199                         ip4_mask.next_proto_id = masks->ipv4_mask.proto;
200                         ip4_val.next_proto_id = input->flow.ip4_flow.proto;
201                 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
202                         /* Explicitly match the SCTP protocol number */
203                         ip4_mask.next_proto_id = 0xff;
204                         ip4_val.next_proto_id = IPPROTO_SCTP;
205                 }
206                 if (input->flow.ip4_flow.src_ip) {
207                         ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
208                         ip4_val.src_addr = input->flow.ip4_flow.src_ip;
209                 }
210                 if (input->flow.ip4_flow.dst_ip) {
211                         ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
212                         ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
213                 }
214
215                 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
216                                &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
217         }
218
219         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
220                 struct udp_hdr udp_mask, udp_val;
221                 memset(&udp_mask, 0, sizeof(udp_mask));
222                 memset(&udp_val, 0, sizeof(udp_val));
223
224                 if (input->flow.udp6_flow.src_port) {
225                         udp_mask.src_port = masks->src_port_mask;
226                         udp_val.src_port = input->flow.udp6_flow.src_port;
227                 }
228                 if (input->flow.udp6_flow.dst_port) {
229                         udp_mask.dst_port = masks->dst_port_mask;
230                         udp_val.dst_port = input->flow.udp6_flow.dst_port;
231                 }
232                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
233                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
234         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
235                 struct tcp_hdr tcp_mask, tcp_val;
236                 memset(&tcp_mask, 0, sizeof(tcp_mask));
237                 memset(&tcp_val, 0, sizeof(tcp_val));
238
239                 if (input->flow.tcp6_flow.src_port) {
240                         tcp_mask.src_port = masks->src_port_mask;
241                         tcp_val.src_port = input->flow.tcp6_flow.src_port;
242                 }
243                 if (input->flow.tcp6_flow.dst_port) {
244                         tcp_mask.dst_port = masks->dst_port_mask;
245                         tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
246                 }
247                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
248                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
249         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
250                 struct sctp_hdr sctp_mask, sctp_val;
251                 memset(&sctp_mask, 0, sizeof(sctp_mask));
252                 memset(&sctp_val, 0, sizeof(sctp_val));
253
254                 if (input->flow.sctp6_flow.src_port) {
255                         sctp_mask.src_port = masks->src_port_mask;
256                         sctp_val.src_port = input->flow.sctp6_flow.src_port;
257                 }
258                 if (input->flow.sctp6_flow.dst_port) {
259                         sctp_mask.dst_port = masks->dst_port_mask;
260                         sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
261                 }
262                 if (input->flow.sctp6_flow.verify_tag) {
263                         sctp_mask.tag = 0xffffffff;
264                         sctp_val.tag = input->flow.sctp6_flow.verify_tag;
265                 }
266
267                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
268                                &sctp_val, sizeof(struct sctp_hdr));
269         }
270
271         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
272             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
273             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
274             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
275                 struct ipv6_hdr ipv6_mask, ipv6_val;
276                 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
277                 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
278
279                 if (input->flow.ipv6_flow.proto) {
280                         ipv6_mask.proto = masks->ipv6_mask.proto;
281                         ipv6_val.proto = input->flow.ipv6_flow.proto;
282                 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
283                         /* See comments for IPv4 SCTP above. */
284                         ipv6_mask.proto = 0xff;
285                         ipv6_val.proto = IPPROTO_SCTP;
286                 }
287                 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
288                        sizeof(ipv6_mask.src_addr));
289                 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
290                        sizeof(ipv6_val.src_addr));
291                 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
292                        sizeof(ipv6_mask.dst_addr));
293                 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
294                        sizeof(ipv6_val.dst_addr));
295                 if (input->flow.ipv6_flow.tc) {
296                         ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
297                         ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
298                 }
299                 if (input->flow.ipv6_flow.hop_limits) {
300                         ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
301                         ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
302                 }
303
304                 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
305                                &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
306         }
307 }
308
309 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
310 {
311         int32_t pos;
312         struct enic_fdir_node *key;
313         /* See if the key is in the table */
314         pos = rte_hash_del_key(enic->fdir.hash, params);
315         switch (pos) {
316         case -EINVAL:
317         case -ENOENT:
318                 enic->fdir.stats.f_remove++;
319                 return -EINVAL;
320         default:
321                 /* The entry is present in the table */
322                 key = enic->fdir.nodes[pos];
323
324                 /* Delete the filter */
325                 vnic_dev_classifier(enic->vdev, CLSF_DEL,
326                         &key->fltr_id, NULL, NULL);
327                 rte_free(key);
328                 enic->fdir.nodes[pos] = NULL;
329                 enic->fdir.stats.free++;
330                 enic->fdir.stats.remove++;
331                 break;
332         }
333         return 0;
334 }
335
336 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
337 {
338         struct enic_fdir_node *key;
339         struct filter_v2 fltr;
340         int32_t pos;
341         u8 do_free = 0;
342         u16 old_fltr_id = 0;
343         u32 flowtype_supported;
344         u16 flex_bytes;
345         u16 queue;
346         struct filter_action_v2 action;
347
348         memset(&fltr, 0, sizeof(fltr));
349         memset(&action, 0, sizeof(action));
350         flowtype_supported = enic->fdir.types_mask
351                              & (1 << params->input.flow_type);
352
353         flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
354                 (params->input.flow_ext.flexbytes[0] & 0xFF));
355
356         if (!enic->fdir.hash ||
357                 (params->input.flow_ext.vlan_tci & 0xFFF) ||
358                 !flowtype_supported || flex_bytes ||
359                 params->action.behavior /* drop */) {
360                 enic->fdir.stats.f_add++;
361                 return -ENOTSUP;
362         }
363
364         /* Get the enicpmd RQ from the DPDK Rx queue */
365         queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
366
367         if (!enic->rq[queue].in_use)
368                 return -EINVAL;
369
370         /* See if the key is already there in the table */
371         pos = rte_hash_del_key(enic->fdir.hash, params);
372         switch (pos) {
373         case -EINVAL:
374                 enic->fdir.stats.f_add++;
375                 return -EINVAL;
376         case -ENOENT:
377                 /* Add a new classifier entry */
378                 if (!enic->fdir.stats.free) {
379                         enic->fdir.stats.f_add++;
380                         return -ENOSPC;
381                 }
382                 key = rte_zmalloc("enic_fdir_node",
383                                   sizeof(struct enic_fdir_node), 0);
384                 if (!key) {
385                         enic->fdir.stats.f_add++;
386                         return -ENOMEM;
387                 }
388                 break;
389         default:
390                 /* The entry is already present in the table.
391                  * Check if there is a change in queue
392                  */
393                 key = enic->fdir.nodes[pos];
394                 enic->fdir.nodes[pos] = NULL;
395                 if (unlikely(key->rq_index == queue)) {
396                         /* Nothing to be done */
397                         enic->fdir.stats.f_add++;
398                         pos = rte_hash_add_key(enic->fdir.hash, params);
399                         if (pos < 0) {
400                                 dev_err(enic, "Add hash key failed\n");
401                                 return pos;
402                         }
403                         enic->fdir.nodes[pos] = key;
404                         dev_warning(enic,
405                                 "FDIR rule is already present\n");
406                         return 0;
407                 }
408
409                 if (likely(enic->fdir.stats.free)) {
410                         /* Add the filter and then delete the old one.
411                          * This is to avoid packets from going into the
412                          * default queue during the window between
413                          * delete and add
414                          */
415                         do_free = 1;
416                         old_fltr_id = key->fltr_id;
417                 } else {
418                         /* No free slots in the classifier.
419                          * Delete the filter and add the modified one later
420                          */
421                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
422                                 &key->fltr_id, NULL, NULL);
423                         enic->fdir.stats.free++;
424                 }
425
426                 break;
427         }
428
429         key->filter = *params;
430         key->rq_index = queue;
431
432         enic->fdir.copy_fltr_fn(&fltr, &params->input,
433                                 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
434         action.type = FILTER_ACTION_RQ_STEERING;
435         action.rq_idx = queue;
436
437         if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
438             &action)) {
439                 key->fltr_id = queue;
440         } else {
441                 dev_err(enic, "Add classifier entry failed\n");
442                 enic->fdir.stats.f_add++;
443                 rte_free(key);
444                 return -1;
445         }
446
447         if (do_free)
448                 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
449                                     NULL);
450         else{
451                 enic->fdir.stats.free--;
452                 enic->fdir.stats.add++;
453         }
454
455         pos = rte_hash_add_key(enic->fdir.hash, params);
456         if (pos < 0) {
457                 enic->fdir.stats.f_add++;
458                 dev_err(enic, "Add hash key failed\n");
459                 return pos;
460         }
461
462         enic->fdir.nodes[pos] = key;
463         return 0;
464 }
465
466 void enic_clsf_destroy(struct enic *enic)
467 {
468         u32 index;
469         struct enic_fdir_node *key;
470         /* delete classifier entries */
471         for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
472                 key = enic->fdir.nodes[index];
473                 if (key) {
474                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
475                                 &key->fltr_id, NULL, NULL);
476                         rte_free(key);
477                         enic->fdir.nodes[index] = NULL;
478                 }
479         }
480
481         if (enic->fdir.hash) {
482                 rte_hash_free(enic->fdir.hash);
483                 enic->fdir.hash = NULL;
484         }
485 }
486
487 int enic_clsf_init(struct enic *enic)
488 {
489         char clsf_name[RTE_HASH_NAMESIZE];
490         struct rte_hash_parameters hash_params = {
491                 .name = clsf_name,
492                 .entries = ENICPMD_CLSF_HASH_ENTRIES,
493                 .key_len = sizeof(struct rte_eth_fdir_filter),
494                 .hash_func = DEFAULT_HASH_FUNC,
495                 .hash_func_init_val = 0,
496                 .socket_id = SOCKET_ID_ANY,
497         };
498         snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
499         enic->fdir.hash = rte_hash_create(&hash_params);
500         memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
501         enic->fdir.stats.free = ENICPMD_FDIR_MAX;
502         return NULL == enic->fdir.hash;
503 }