New upstream version 18.02
[deb_dpdk.git] / drivers / net / enic / enic_clsf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <libgen.h>
7
8 #include <rte_ethdev_driver.h>
9 #include <rte_malloc.h>
10 #include <rte_hash.h>
11 #include <rte_byteorder.h>
12 #include <rte_ip.h>
13 #include <rte_tcp.h>
14 #include <rte_udp.h>
15 #include <rte_sctp.h>
16 #include <rte_eth_ctrl.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "wq_enet_desc.h"
21 #include "rq_enet_desc.h"
22 #include "cq_enet_desc.h"
23 #include "vnic_enet.h"
24 #include "vnic_dev.h"
25 #include "vnic_wq.h"
26 #include "vnic_rq.h"
27 #include "vnic_cq.h"
28 #include "vnic_intr.h"
29 #include "vnic_nic.h"
30
31 #ifdef RTE_ARCH_X86
32 #include <rte_hash_crc.h>
33 #define DEFAULT_HASH_FUNC       rte_hash_crc
34 #else
35 #include <rte_jhash.h>
36 #define DEFAULT_HASH_FUNC       rte_jhash
37 #endif
38
39 #define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX
40
41 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
42 {
43         *stats = enic->fdir.stats;
44 }
45
46 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
47 {
48         info->mode = (enum rte_fdir_mode)enic->fdir.modes;
49         info->flow_types_mask[0] = enic->fdir.types_mask;
50 }
51
52 void enic_fdir_info(struct enic *enic)
53 {
54         enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
55         enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
56                                  1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
57         if (enic->adv_filters) {
58                 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
59                                          1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
60                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
61                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
62                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
63                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
64                 enic->fdir.copy_fltr_fn = copy_fltr_v2;
65         } else {
66                 enic->fdir.copy_fltr_fn = copy_fltr_v1;
67         }
68 }
69
70 static void
71 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
72                enum filter_generic_1_layer layer, void *mask, void *val,
73                unsigned int len)
74 {
75         gp->mask_flags |= flag;
76         gp->val_flags |= gp->mask_flags;
77         memcpy(gp->layer[layer].mask, mask, len);
78         memcpy(gp->layer[layer].val, val, len);
79 }
80
81 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
82  * without advanced filter support.
83  */
84 void
85 copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
86              __rte_unused struct rte_eth_fdir_masks *masks)
87 {
88         fltr->type = FILTER_IPV4_5TUPLE;
89         fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
90                 input->flow.ip4_flow.src_ip);
91         fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
92                 input->flow.ip4_flow.dst_ip);
93         fltr->u.ipv4.src_port = rte_be_to_cpu_16(
94                 input->flow.udp4_flow.src_port);
95         fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
96                 input->flow.udp4_flow.dst_port);
97
98         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
99                 fltr->u.ipv4.protocol = PROTO_TCP;
100         else
101                 fltr->u.ipv4.protocol = PROTO_UDP;
102
103         fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
104 }
105
106 /* Copy Flow Director filter to a VIC generic filter (requires advanced
107  * filter support.
108  */
109 void
110 copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
111              struct rte_eth_fdir_masks *masks)
112 {
113         struct filter_generic_1 *gp = &fltr->u.generic_1;
114         int i;
115
116         fltr->type = FILTER_DPDK_1;
117         memset(gp, 0, sizeof(*gp));
118
119         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
120                 struct udp_hdr udp_mask, udp_val;
121                 memset(&udp_mask, 0, sizeof(udp_mask));
122                 memset(&udp_val, 0, sizeof(udp_val));
123
124                 if (input->flow.udp4_flow.src_port) {
125                         udp_mask.src_port = masks->src_port_mask;
126                         udp_val.src_port = input->flow.udp4_flow.src_port;
127                 }
128                 if (input->flow.udp4_flow.dst_port) {
129                         udp_mask.dst_port = masks->dst_port_mask;
130                         udp_val.dst_port = input->flow.udp4_flow.dst_port;
131                 }
132
133                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
134                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
135         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
136                 struct tcp_hdr tcp_mask, tcp_val;
137                 memset(&tcp_mask, 0, sizeof(tcp_mask));
138                 memset(&tcp_val, 0, sizeof(tcp_val));
139
140                 if (input->flow.tcp4_flow.src_port) {
141                         tcp_mask.src_port = masks->src_port_mask;
142                         tcp_val.src_port = input->flow.tcp4_flow.src_port;
143                 }
144                 if (input->flow.tcp4_flow.dst_port) {
145                         tcp_mask.dst_port = masks->dst_port_mask;
146                         tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
147                 }
148
149                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
150                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
151         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
152                 struct sctp_hdr sctp_mask, sctp_val;
153                 memset(&sctp_mask, 0, sizeof(sctp_mask));
154                 memset(&sctp_val, 0, sizeof(sctp_val));
155
156                 if (input->flow.sctp4_flow.src_port) {
157                         sctp_mask.src_port = masks->src_port_mask;
158                         sctp_val.src_port = input->flow.sctp4_flow.src_port;
159                 }
160                 if (input->flow.sctp4_flow.dst_port) {
161                         sctp_mask.dst_port = masks->dst_port_mask;
162                         sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
163                 }
164                 if (input->flow.sctp4_flow.verify_tag) {
165                         sctp_mask.tag = 0xffffffff;
166                         sctp_val.tag = input->flow.sctp4_flow.verify_tag;
167                 }
168
169                 /* v4 proto should be 132, override ip4_flow.proto */
170                 input->flow.ip4_flow.proto = 132;
171
172                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
173                                &sctp_val, sizeof(struct sctp_hdr));
174         }
175
176         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
177             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
178             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
179             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
180                 struct ipv4_hdr ip4_mask, ip4_val;
181                 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
182                 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
183
184                 if (input->flow.ip4_flow.tos) {
185                         ip4_mask.type_of_service = masks->ipv4_mask.tos;
186                         ip4_val.type_of_service = input->flow.ip4_flow.tos;
187                 }
188                 if (input->flow.ip4_flow.ttl) {
189                         ip4_mask.time_to_live = masks->ipv4_mask.ttl;
190                         ip4_val.time_to_live = input->flow.ip4_flow.ttl;
191                 }
192                 if (input->flow.ip4_flow.proto) {
193                         ip4_mask.next_proto_id = masks->ipv4_mask.proto;
194                         ip4_val.next_proto_id = input->flow.ip4_flow.proto;
195                 }
196                 if (input->flow.ip4_flow.src_ip) {
197                         ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
198                         ip4_val.src_addr = input->flow.ip4_flow.src_ip;
199                 }
200                 if (input->flow.ip4_flow.dst_ip) {
201                         ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
202                         ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
203                 }
204
205                 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
206                                &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
207         }
208
209         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
210                 struct udp_hdr udp_mask, udp_val;
211                 memset(&udp_mask, 0, sizeof(udp_mask));
212                 memset(&udp_val, 0, sizeof(udp_val));
213
214                 if (input->flow.udp6_flow.src_port) {
215                         udp_mask.src_port = masks->src_port_mask;
216                         udp_val.src_port = input->flow.udp6_flow.src_port;
217                 }
218                 if (input->flow.udp6_flow.dst_port) {
219                         udp_mask.dst_port = masks->dst_port_mask;
220                         udp_val.dst_port = input->flow.udp6_flow.dst_port;
221                 }
222                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
223                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
224         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
225                 struct tcp_hdr tcp_mask, tcp_val;
226                 memset(&tcp_mask, 0, sizeof(tcp_mask));
227                 memset(&tcp_val, 0, sizeof(tcp_val));
228
229                 if (input->flow.tcp6_flow.src_port) {
230                         tcp_mask.src_port = masks->src_port_mask;
231                         tcp_val.src_port = input->flow.tcp6_flow.src_port;
232                 }
233                 if (input->flow.tcp6_flow.dst_port) {
234                         tcp_mask.dst_port = masks->dst_port_mask;
235                         tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
236                 }
237                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
238                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
239         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
240                 struct sctp_hdr sctp_mask, sctp_val;
241                 memset(&sctp_mask, 0, sizeof(sctp_mask));
242                 memset(&sctp_val, 0, sizeof(sctp_val));
243
244                 if (input->flow.sctp6_flow.src_port) {
245                         sctp_mask.src_port = masks->src_port_mask;
246                         sctp_val.src_port = input->flow.sctp6_flow.src_port;
247                 }
248                 if (input->flow.sctp6_flow.dst_port) {
249                         sctp_mask.dst_port = masks->dst_port_mask;
250                         sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
251                 }
252                 if (input->flow.sctp6_flow.verify_tag) {
253                         sctp_mask.tag = 0xffffffff;
254                         sctp_val.tag = input->flow.sctp6_flow.verify_tag;
255                 }
256
257                 /* v4 proto should be 132, override ipv6_flow.proto */
258                 input->flow.ipv6_flow.proto = 132;
259
260                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
261                                &sctp_val, sizeof(struct sctp_hdr));
262         }
263
264         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
265             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
266             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
267             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
268                 struct ipv6_hdr ipv6_mask, ipv6_val;
269                 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
270                 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
271
272                 if (input->flow.ipv6_flow.proto) {
273                         ipv6_mask.proto = masks->ipv6_mask.proto;
274                         ipv6_val.proto = input->flow.ipv6_flow.proto;
275                 }
276                 for (i = 0; i < 4; i++) {
277                         *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
278                                         masks->ipv6_mask.src_ip[i];
279                         *(uint32_t *)&ipv6_val.src_addr[i * 4] =
280                                         input->flow.ipv6_flow.src_ip[i];
281                 }
282                 for (i = 0; i < 4; i++) {
283                         *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
284                                         masks->ipv6_mask.src_ip[i];
285                         *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
286                                         input->flow.ipv6_flow.dst_ip[i];
287                 }
288                 if (input->flow.ipv6_flow.tc) {
289                         ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
290                         ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
291                 }
292                 if (input->flow.ipv6_flow.hop_limits) {
293                         ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
294                         ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
295                 }
296
297                 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
298                                &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
299         }
300 }
301
302 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
303 {
304         int32_t pos;
305         struct enic_fdir_node *key;
306         /* See if the key is in the table */
307         pos = rte_hash_del_key(enic->fdir.hash, params);
308         switch (pos) {
309         case -EINVAL:
310         case -ENOENT:
311                 enic->fdir.stats.f_remove++;
312                 return -EINVAL;
313         default:
314                 /* The entry is present in the table */
315                 key = enic->fdir.nodes[pos];
316
317                 /* Delete the filter */
318                 vnic_dev_classifier(enic->vdev, CLSF_DEL,
319                         &key->fltr_id, NULL, NULL);
320                 rte_free(key);
321                 enic->fdir.nodes[pos] = NULL;
322                 enic->fdir.stats.free++;
323                 enic->fdir.stats.remove++;
324                 break;
325         }
326         return 0;
327 }
328
329 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
330 {
331         struct enic_fdir_node *key;
332         struct filter_v2 fltr;
333         int32_t pos;
334         u8 do_free = 0;
335         u16 old_fltr_id = 0;
336         u32 flowtype_supported;
337         u16 flex_bytes;
338         u16 queue;
339         struct filter_action_v2 action;
340
341         memset(&fltr, 0, sizeof(fltr));
342         memset(&action, 0, sizeof(action));
343         flowtype_supported = enic->fdir.types_mask
344                              & (1 << params->input.flow_type);
345
346         flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
347                 (params->input.flow_ext.flexbytes[0] & 0xFF));
348
349         if (!enic->fdir.hash ||
350                 (params->input.flow_ext.vlan_tci & 0xFFF) ||
351                 !flowtype_supported || flex_bytes ||
352                 params->action.behavior /* drop */) {
353                 enic->fdir.stats.f_add++;
354                 return -ENOTSUP;
355         }
356
357         /* Get the enicpmd RQ from the DPDK Rx queue */
358         queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
359
360         if (!enic->rq[queue].in_use)
361                 return -EINVAL;
362
363         /* See if the key is already there in the table */
364         pos = rte_hash_del_key(enic->fdir.hash, params);
365         switch (pos) {
366         case -EINVAL:
367                 enic->fdir.stats.f_add++;
368                 return -EINVAL;
369         case -ENOENT:
370                 /* Add a new classifier entry */
371                 if (!enic->fdir.stats.free) {
372                         enic->fdir.stats.f_add++;
373                         return -ENOSPC;
374                 }
375                 key = rte_zmalloc("enic_fdir_node",
376                                   sizeof(struct enic_fdir_node), 0);
377                 if (!key) {
378                         enic->fdir.stats.f_add++;
379                         return -ENOMEM;
380                 }
381                 break;
382         default:
383                 /* The entry is already present in the table.
384                  * Check if there is a change in queue
385                  */
386                 key = enic->fdir.nodes[pos];
387                 enic->fdir.nodes[pos] = NULL;
388                 if (unlikely(key->rq_index == queue)) {
389                         /* Nothing to be done */
390                         enic->fdir.stats.f_add++;
391                         pos = rte_hash_add_key(enic->fdir.hash, params);
392                         if (pos < 0) {
393                                 dev_err(enic, "Add hash key failed\n");
394                                 return pos;
395                         }
396                         enic->fdir.nodes[pos] = key;
397                         dev_warning(enic,
398                                 "FDIR rule is already present\n");
399                         return 0;
400                 }
401
402                 if (likely(enic->fdir.stats.free)) {
403                         /* Add the filter and then delete the old one.
404                          * This is to avoid packets from going into the
405                          * default queue during the window between
406                          * delete and add
407                          */
408                         do_free = 1;
409                         old_fltr_id = key->fltr_id;
410                 } else {
411                         /* No free slots in the classifier.
412                          * Delete the filter and add the modified one later
413                          */
414                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
415                                 &key->fltr_id, NULL, NULL);
416                         enic->fdir.stats.free++;
417                 }
418
419                 break;
420         }
421
422         key->filter = *params;
423         key->rq_index = queue;
424
425         enic->fdir.copy_fltr_fn(&fltr, &params->input,
426                                 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
427         action.type = FILTER_ACTION_RQ_STEERING;
428         action.rq_idx = queue;
429
430         if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
431             &action)) {
432                 key->fltr_id = queue;
433         } else {
434                 dev_err(enic, "Add classifier entry failed\n");
435                 enic->fdir.stats.f_add++;
436                 rte_free(key);
437                 return -1;
438         }
439
440         if (do_free)
441                 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
442                                     NULL);
443         else{
444                 enic->fdir.stats.free--;
445                 enic->fdir.stats.add++;
446         }
447
448         pos = rte_hash_add_key(enic->fdir.hash, params);
449         if (pos < 0) {
450                 enic->fdir.stats.f_add++;
451                 dev_err(enic, "Add hash key failed\n");
452                 return pos;
453         }
454
455         enic->fdir.nodes[pos] = key;
456         return 0;
457 }
458
459 void enic_clsf_destroy(struct enic *enic)
460 {
461         u32 index;
462         struct enic_fdir_node *key;
463         /* delete classifier entries */
464         for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
465                 key = enic->fdir.nodes[index];
466                 if (key) {
467                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
468                                 &key->fltr_id, NULL, NULL);
469                         rte_free(key);
470                         enic->fdir.nodes[index] = NULL;
471                 }
472         }
473
474         if (enic->fdir.hash) {
475                 rte_hash_free(enic->fdir.hash);
476                 enic->fdir.hash = NULL;
477         }
478 }
479
480 int enic_clsf_init(struct enic *enic)
481 {
482         char clsf_name[RTE_HASH_NAMESIZE];
483         struct rte_hash_parameters hash_params = {
484                 .name = clsf_name,
485                 .entries = ENICPMD_CLSF_HASH_ENTRIES,
486                 .key_len = sizeof(struct rte_eth_fdir_filter),
487                 .hash_func = DEFAULT_HASH_FUNC,
488                 .hash_func_init_val = 0,
489                 .socket_id = SOCKET_ID_ANY,
490         };
491         snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
492         enic->fdir.hash = rte_hash_create(&hash_params);
493         memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
494         enic->fdir.stats.free = ENICPMD_FDIR_MAX;
495         return NULL == enic->fdir.hash;
496 }