New upstream version 16.11.7
[deb_dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdlib.h>
34 #include <netinet/in.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_ip.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
45 #include <rte_vdev.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
48
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
52
53 #define REORDER_PERIOD_MS 10
54
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
59
60 static inline size_t
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 {
63         size_t vlan_offset = 0;
64
65         if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67
68                 vlan_offset = sizeof(struct vlan_hdr);
69                 *proto = vlan_hdr->eth_proto;
70
71                 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72                         vlan_hdr = vlan_hdr + 1;
73                         *proto = vlan_hdr->eth_proto;
74                         vlan_offset += sizeof(struct vlan_hdr);
75                 }
76         }
77         return vlan_offset;
78 }
79
80 static uint16_t
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 {
83         struct bond_dev_private *internals;
84
85         uint16_t num_rx_slave = 0;
86         uint16_t num_rx_total = 0;
87
88         int i;
89
90         /* Cast to structure, containing bonded device's port id and queue id */
91         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92
93         internals = bd_rx_q->dev_private;
94
95
96         for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97                 /* Offset of pointer to *bufs increases as packets are received
98                  * from other slaves */
99                 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100                                 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101                 if (num_rx_slave) {
102                         num_rx_total += num_rx_slave;
103                         nb_pkts -= num_rx_slave;
104                 }
105         }
106
107         return num_rx_total;
108 }
109
110 static uint16_t
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
112                 uint16_t nb_pkts)
113 {
114         struct bond_dev_private *internals;
115
116         /* Cast to structure, containing bonded device's port id and queue id */
117         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118
119         internals = bd_rx_q->dev_private;
120
121         return rte_eth_rx_burst(internals->current_primary_port,
122                         bd_rx_q->queue_id, bufs, nb_pkts);
123 }
124
125 static inline uint8_t
126 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
127 {
128         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
129
130         return !vlan_tci && (ethertype == ether_type_slow_be &&
131                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
132 }
133
134 static uint16_t
135 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
136                 uint16_t nb_pkts)
137 {
138         /* Cast to structure, containing bonded device's port id and queue id */
139         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
140         struct bond_dev_private *internals = bd_rx_q->dev_private;
141         struct ether_addr bond_mac;
142
143         struct ether_hdr *hdr;
144
145         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
146         uint16_t num_rx_total = 0;      /* Total number of received packets */
147         uint8_t slaves[RTE_MAX_ETHPORTS];
148         uint8_t slave_count;
149
150         uint8_t collecting;  /* current slave collecting status */
151         const uint8_t promisc = internals->promiscuous_en;
152         uint8_t i, j, k;
153         uint8_t subtype;
154
155         rte_eth_macaddr_get(internals->port_id, &bond_mac);
156         /* Copy slave list to protect against slave up/down changes during tx
157          * bursting */
158         slave_count = internals->active_slave_count;
159         memcpy(slaves, internals->active_slaves,
160                         sizeof(internals->active_slaves[0]) * slave_count);
161
162         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
163                 j = num_rx_total;
164                 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
165
166                 /* Read packets from this slave */
167                 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
168                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
169
170                 for (k = j; k < 2 && k < num_rx_total; k++)
171                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
172
173                 /* Handle slow protocol packets. */
174                 while (j < num_rx_total) {
175                         if (j + 3 < num_rx_total)
176                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
177
178                         hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
179                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
180
181                         /* Remove packet from array if it is slow packet or slave is not
182                          * in collecting state or bondign interface is not in promiscus
183                          * mode and packet address does not match. */
184                         if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
185                                 !collecting || (!promisc &&
186                                         !is_multicast_ether_addr(&hdr->d_addr) &&
187                                         !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
188
189                                 if (hdr->ether_type == ether_type_slow_be) {
190                                         bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
191                                                 bufs[j]);
192                                 } else
193                                         rte_pktmbuf_free(bufs[j]);
194
195                                 /* Packet is managed by mode 4 or dropped, shift the array */
196                                 num_rx_total--;
197                                 if (j < num_rx_total) {
198                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
199                                                 (num_rx_total - j));
200                                 }
201                         } else
202                                 j++;
203                 }
204         }
205
206         return num_rx_total;
207 }
208
209 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
210 uint32_t burstnumberRX;
211 uint32_t burstnumberTX;
212
213 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
214
215 static void
216 arp_op_name(uint16_t arp_op, char *buf)
217 {
218         switch (arp_op) {
219         case ARP_OP_REQUEST:
220                 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
221                 return;
222         case ARP_OP_REPLY:
223                 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
224                 return;
225         case ARP_OP_REVREQUEST:
226                 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
227                                 "Reverse ARP Request");
228                 return;
229         case ARP_OP_REVREPLY:
230                 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
231                                 "Reverse ARP Reply");
232                 return;
233         case ARP_OP_INVREQUEST:
234                 snprintf(buf, sizeof("Peer Identify Request"), "%s",
235                                 "Peer Identify Request");
236                 return;
237         case ARP_OP_INVREPLY:
238                 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
239                                 "Peer Identify Reply");
240                 return;
241         default:
242                 break;
243         }
244         snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
245         return;
246 }
247 #endif
248 #define MaxIPv4String   16
249 static void
250 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
251 {
252         uint32_t ipv4_addr;
253
254         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
255         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
256                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
257                 ipv4_addr & 0xFF);
258 }
259
260 #define MAX_CLIENTS_NUMBER      128
261 uint8_t active_clients;
262 struct client_stats_t {
263         uint8_t port;
264         uint32_t ipv4_addr;
265         uint32_t ipv4_rx_packets;
266         uint32_t ipv4_tx_packets;
267 };
268 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
269
270 static void
271 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
272 {
273         int i = 0;
274
275         for (; i < MAX_CLIENTS_NUMBER; i++)     {
276                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
277                         /* Just update RX packets number for this client */
278                         if (TXorRXindicator == &burstnumberRX)
279                                 client_stats[i].ipv4_rx_packets++;
280                         else
281                                 client_stats[i].ipv4_tx_packets++;
282                         return;
283                 }
284         }
285         /* We have a new client. Insert him to the table, and increment stats */
286         if (TXorRXindicator == &burstnumberRX)
287                 client_stats[active_clients].ipv4_rx_packets++;
288         else
289                 client_stats[active_clients].ipv4_tx_packets++;
290         client_stats[active_clients].ipv4_addr = addr;
291         client_stats[active_clients].port = port;
292         active_clients++;
293
294 }
295
296 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
297 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber)     \
298                 RTE_LOG(DEBUG, PMD, \
299                 "%s " \
300                 "port:%d " \
301                 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
302                 "SrcIP:%s " \
303                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
304                 "DstIP:%s " \
305                 "%s " \
306                 "%d\n", \
307                 info, \
308                 port, \
309                 eth_h->s_addr.addr_bytes[0], \
310                 eth_h->s_addr.addr_bytes[1], \
311                 eth_h->s_addr.addr_bytes[2], \
312                 eth_h->s_addr.addr_bytes[3], \
313                 eth_h->s_addr.addr_bytes[4], \
314                 eth_h->s_addr.addr_bytes[5], \
315                 src_ip, \
316                 eth_h->d_addr.addr_bytes[0], \
317                 eth_h->d_addr.addr_bytes[1], \
318                 eth_h->d_addr.addr_bytes[2], \
319                 eth_h->d_addr.addr_bytes[3], \
320                 eth_h->d_addr.addr_bytes[4], \
321                 eth_h->d_addr.addr_bytes[5], \
322                 dst_ip, \
323                 arp_op, \
324                 ++burstnumber)
325 #endif
326
327 static void
328 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
329                 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
330 {
331         struct ipv4_hdr *ipv4_h;
332 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
333         struct arp_hdr *arp_h;
334         char dst_ip[16];
335         char ArpOp[24];
336         char buf[16];
337 #endif
338         char src_ip[16];
339
340         uint16_t ether_type = eth_h->ether_type;
341         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
342
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344         snprintf(buf, 16, "%s", info);
345 #endif
346
347         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
348                 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
349                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
350 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
351                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
352                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
353 #endif
354                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
355         }
356 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
357         else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
358                 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
359                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
360                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
361                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
362                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
363         }
364 #endif
365 }
366 #endif
367
368 static uint16_t
369 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
370 {
371         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
372         struct bond_dev_private *internals = bd_tx_q->dev_private;
373         struct ether_hdr *eth_h;
374         uint16_t ether_type, offset;
375         uint16_t nb_recv_pkts;
376         int i;
377
378         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
379
380         for (i = 0; i < nb_recv_pkts; i++) {
381                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
382                 ether_type = eth_h->ether_type;
383                 offset = get_vlan_offset(eth_h, &ether_type);
384
385                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
386 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
387                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
388 #endif
389                         bond_mode_alb_arp_recv(eth_h, offset, internals);
390                 }
391 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
392                 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
393                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
394 #endif
395         }
396
397         return nb_recv_pkts;
398 }
399
400 static uint16_t
401 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
402                 uint16_t nb_pkts)
403 {
404         struct bond_dev_private *internals;
405         struct bond_tx_queue *bd_tx_q;
406
407         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
408         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
409
410         uint8_t num_of_slaves;
411         uint8_t slaves[RTE_MAX_ETHPORTS];
412
413         uint16_t num_tx_total = 0, num_tx_slave;
414
415         static int slave_idx = 0;
416         int i, cslave_idx = 0, tx_fail_total = 0;
417
418         bd_tx_q = (struct bond_tx_queue *)queue;
419         internals = bd_tx_q->dev_private;
420
421         /* Copy slave list to protect against slave up/down changes during tx
422          * bursting */
423         num_of_slaves = internals->active_slave_count;
424         memcpy(slaves, internals->active_slaves,
425                         sizeof(internals->active_slaves[0]) * num_of_slaves);
426
427         if (num_of_slaves < 1)
428                 return num_tx_total;
429
430         /* Populate slaves mbuf with which packets are to be sent on it  */
431         for (i = 0; i < nb_pkts; i++) {
432                 cslave_idx = (slave_idx + i) % num_of_slaves;
433                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
434         }
435
436         /* increment current slave index so the next call to tx burst starts on the
437          * next slave */
438         slave_idx = ++cslave_idx;
439
440         /* Send packet burst on each slave device */
441         for (i = 0; i < num_of_slaves; i++) {
442                 if (slave_nb_pkts[i] > 0) {
443                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
444                                         slave_bufs[i], slave_nb_pkts[i]);
445
446                         /* if tx burst fails move packets to end of bufs */
447                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
448                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
449
450                                 tx_fail_total += tx_fail_slave;
451
452                                 memcpy(&bufs[nb_pkts - tx_fail_total],
453                                                 &slave_bufs[i][num_tx_slave],
454                                                 tx_fail_slave * sizeof(bufs[0]));
455                         }
456                         num_tx_total += num_tx_slave;
457                 }
458         }
459
460         return num_tx_total;
461 }
462
463 static uint16_t
464 bond_ethdev_tx_burst_active_backup(void *queue,
465                 struct rte_mbuf **bufs, uint16_t nb_pkts)
466 {
467         struct bond_dev_private *internals;
468         struct bond_tx_queue *bd_tx_q;
469
470         bd_tx_q = (struct bond_tx_queue *)queue;
471         internals = bd_tx_q->dev_private;
472
473         if (internals->active_slave_count < 1)
474                 return 0;
475
476         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
477                         bufs, nb_pkts);
478 }
479
480 static inline uint16_t
481 ether_hash(struct ether_hdr *eth_hdr)
482 {
483         unaligned_uint16_t *word_src_addr =
484                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
485         unaligned_uint16_t *word_dst_addr =
486                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
487
488         return (word_src_addr[0] ^ word_dst_addr[0]) ^
489                         (word_src_addr[1] ^ word_dst_addr[1]) ^
490                         (word_src_addr[2] ^ word_dst_addr[2]);
491 }
492
493 static inline uint32_t
494 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
495 {
496         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
497 }
498
499 static inline uint32_t
500 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
501 {
502         unaligned_uint32_t *word_src_addr =
503                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
504         unaligned_uint32_t *word_dst_addr =
505                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
506
507         return (word_src_addr[0] ^ word_dst_addr[0]) ^
508                         (word_src_addr[1] ^ word_dst_addr[1]) ^
509                         (word_src_addr[2] ^ word_dst_addr[2]) ^
510                         (word_src_addr[3] ^ word_dst_addr[3]);
511 }
512
513 uint16_t
514 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
515 {
516         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
517
518         uint32_t hash = ether_hash(eth_hdr);
519
520         return (hash ^= hash >> 8) % slave_count;
521 }
522
523 uint16_t
524 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
525 {
526         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
527         uint16_t proto = eth_hdr->ether_type;
528         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
529         uint32_t hash, l3hash = 0;
530
531         hash = ether_hash(eth_hdr);
532
533         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
534                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
535                                 ((char *)(eth_hdr + 1) + vlan_offset);
536                 l3hash = ipv4_hash(ipv4_hdr);
537
538         } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
539                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
540                                 ((char *)(eth_hdr + 1) + vlan_offset);
541                 l3hash = ipv6_hash(ipv6_hdr);
542         }
543
544         hash = hash ^ l3hash;
545         hash ^= hash >> 16;
546         hash ^= hash >> 8;
547
548         return hash % slave_count;
549 }
550
551 uint16_t
552 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
553 {
554         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
555         uint16_t proto = eth_hdr->ether_type;
556         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
557
558         struct udp_hdr *udp_hdr = NULL;
559         struct tcp_hdr *tcp_hdr = NULL;
560         uint32_t hash, l3hash = 0, l4hash = 0;
561
562         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
563                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
564                                 ((char *)(eth_hdr + 1) + vlan_offset);
565                 size_t ip_hdr_offset;
566
567                 l3hash = ipv4_hash(ipv4_hdr);
568
569                 /* there is no L4 header in fragmented packet */
570                 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
571                         ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
572                                         IPV4_IHL_MULTIPLIER;
573
574                         if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
575                                 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
576                                                 ip_hdr_offset);
577                                 l4hash = HASH_L4_PORTS(tcp_hdr);
578                         } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
579                                 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
580                                                 ip_hdr_offset);
581                                 l4hash = HASH_L4_PORTS(udp_hdr);
582                         }
583                 }
584         } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
585                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
586                                 ((char *)(eth_hdr + 1) + vlan_offset);
587                 l3hash = ipv6_hash(ipv6_hdr);
588
589                 if (ipv6_hdr->proto == IPPROTO_TCP) {
590                         tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
591                         l4hash = HASH_L4_PORTS(tcp_hdr);
592                 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
593                         udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
594                         l4hash = HASH_L4_PORTS(udp_hdr);
595                 }
596         }
597
598         hash = l3hash ^ l4hash;
599         hash ^= hash >> 16;
600         hash ^= hash >> 8;
601
602         return hash % slave_count;
603 }
604
605 struct bwg_slave {
606         uint64_t bwg_left_int;
607         uint64_t bwg_left_remainder;
608         uint8_t slave;
609 };
610
611 void
612 bond_tlb_activate_slave(struct bond_dev_private *internals) {
613         int i;
614
615         for (i = 0; i < internals->active_slave_count; i++) {
616                 tlb_last_obytets[internals->active_slaves[i]] = 0;
617         }
618 }
619
620 static int
621 bandwidth_cmp(const void *a, const void *b)
622 {
623         const struct bwg_slave *bwg_a = a;
624         const struct bwg_slave *bwg_b = b;
625         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
626         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
627                         (int64_t)bwg_a->bwg_left_remainder;
628         if (diff > 0)
629                 return 1;
630         else if (diff < 0)
631                 return -1;
632         else if (diff2 > 0)
633                 return 1;
634         else if (diff2 < 0)
635                 return -1;
636         else
637                 return 0;
638 }
639
640 static void
641 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
642                 struct bwg_slave *bwg_slave)
643 {
644         struct rte_eth_link link_status;
645
646         rte_eth_link_get_nowait(port_id, &link_status);
647         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
648         if (link_bwg == 0)
649                 return;
650         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
651         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
652         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
653 }
654
655 static void
656 bond_ethdev_update_tlb_slave_cb(void *arg)
657 {
658         struct bond_dev_private *internals = arg;
659         struct rte_eth_stats slave_stats;
660         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
661         uint8_t slave_count;
662         uint64_t tx_bytes;
663
664         uint8_t update_stats = 0;
665         uint8_t i, slave_id;
666
667         internals->slave_update_idx++;
668
669
670         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
671                 update_stats = 1;
672
673         for (i = 0; i < internals->active_slave_count; i++) {
674                 slave_id = internals->active_slaves[i];
675                 rte_eth_stats_get(slave_id, &slave_stats);
676                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
677                 bandwidth_left(slave_id, tx_bytes,
678                                 internals->slave_update_idx, &bwg_array[i]);
679                 bwg_array[i].slave = slave_id;
680
681                 if (update_stats) {
682                         tlb_last_obytets[slave_id] = slave_stats.obytes;
683                 }
684         }
685
686         if (update_stats == 1)
687                 internals->slave_update_idx = 0;
688
689         slave_count = i;
690         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
691         for (i = 0; i < slave_count; i++)
692                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
693
694         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
695                         (struct bond_dev_private *)internals);
696 }
697
698 static uint16_t
699 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
700 {
701         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
702         struct bond_dev_private *internals = bd_tx_q->dev_private;
703
704         struct rte_eth_dev *primary_port =
705                         &rte_eth_devices[internals->primary_port];
706         uint16_t num_tx_total = 0;
707         uint8_t i, j;
708
709         uint8_t num_of_slaves = internals->active_slave_count;
710         uint8_t slaves[RTE_MAX_ETHPORTS];
711
712         struct ether_hdr *ether_hdr;
713         struct ether_addr primary_slave_addr;
714         struct ether_addr active_slave_addr;
715
716         if (num_of_slaves < 1)
717                 return num_tx_total;
718
719         memcpy(slaves, internals->tlb_slaves_order,
720                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
721
722
723         ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
724
725         if (nb_pkts > 3) {
726                 for (i = 0; i < 3; i++)
727                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
728         }
729
730         for (i = 0; i < num_of_slaves; i++) {
731                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
732                 for (j = num_tx_total; j < nb_pkts; j++) {
733                         if (j + 3 < nb_pkts)
734                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
735
736                         ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
737                         if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
738                                 ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
739 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
740                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
741 #endif
742                 }
743
744                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
745                                 bufs + num_tx_total, nb_pkts - num_tx_total);
746
747                 if (num_tx_total == nb_pkts)
748                         break;
749         }
750
751         return num_tx_total;
752 }
753
754 void
755 bond_tlb_disable(struct bond_dev_private *internals)
756 {
757         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
758 }
759
760 void
761 bond_tlb_enable(struct bond_dev_private *internals)
762 {
763         bond_ethdev_update_tlb_slave_cb(internals);
764 }
765
766 static uint16_t
767 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
768 {
769         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
770         struct bond_dev_private *internals = bd_tx_q->dev_private;
771
772         struct ether_hdr *eth_h;
773         uint16_t ether_type, offset;
774
775         struct client_data *client_info;
776
777         /*
778          * We create transmit buffers for every slave and one additional to send
779          * through tlb. In worst case every packet will be send on one port.
780          */
781         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
782         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
783
784         /*
785          * We create separate transmit buffers for update packets as they wont be
786          * counted in num_tx_total.
787          */
788         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
789         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
790
791         struct rte_mbuf *upd_pkt;
792         size_t pkt_size;
793
794         uint16_t num_send, num_not_send = 0;
795         uint16_t num_tx_total = 0;
796         uint8_t slave_idx;
797
798         int i, j;
799
800         /* Search tx buffer for ARP packets and forward them to alb */
801         for (i = 0; i < nb_pkts; i++) {
802                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
803                 ether_type = eth_h->ether_type;
804                 offset = get_vlan_offset(eth_h, &ether_type);
805
806                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
807                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
808
809                         /* Change src mac in eth header */
810                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
811
812                         /* Add packet to slave tx buffer */
813                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
814                         slave_bufs_pkts[slave_idx]++;
815                 } else {
816                         /* If packet is not ARP, send it with TLB policy */
817                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
818                                         bufs[i];
819                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
820                 }
821         }
822
823         /* Update connected client ARP tables */
824         if (internals->mode6.ntt) {
825                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
826                         client_info = &internals->mode6.client_table[i];
827
828                         if (client_info->in_use) {
829                                 /* Allocate new packet to send ARP update on current slave */
830                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
831                                 if (upd_pkt == NULL) {
832                                         RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
833                                         continue;
834                                 }
835                                 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
836                                                 + client_info->vlan_count * sizeof(struct vlan_hdr);
837                                 upd_pkt->data_len = pkt_size;
838                                 upd_pkt->pkt_len = pkt_size;
839
840                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
841                                                 internals);
842
843                                 /* Add packet to update tx buffer */
844                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
845                                 update_bufs_pkts[slave_idx]++;
846                         }
847                 }
848                 internals->mode6.ntt = 0;
849         }
850
851         /* Send ARP packets on proper slaves */
852         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
853                 if (slave_bufs_pkts[i] > 0) {
854                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
855                                         slave_bufs[i], slave_bufs_pkts[i]);
856                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
857                                 bufs[nb_pkts - 1 - num_not_send - j] =
858                                                 slave_bufs[i][nb_pkts - 1 - j];
859                         }
860
861                         num_tx_total += num_send;
862                         num_not_send += slave_bufs_pkts[i] - num_send;
863
864 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
865         /* Print TX stats including update packets */
866                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
867                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
868                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
869                         }
870 #endif
871                 }
872         }
873
874         /* Send update packets on proper slaves */
875         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
876                 if (update_bufs_pkts[i] > 0) {
877                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
878                                         update_bufs_pkts[i]);
879                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
880                                 rte_pktmbuf_free(update_bufs[i][j]);
881                         }
882 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
883                         for (j = 0; j < update_bufs_pkts[i]; j++) {
884                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
885                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
886                         }
887 #endif
888                 }
889         }
890
891         /* Send non-ARP packets using tlb policy */
892         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
893                 num_send = bond_ethdev_tx_burst_tlb(queue,
894                                 slave_bufs[RTE_MAX_ETHPORTS],
895                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
896
897                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
898                         bufs[nb_pkts - 1 - num_not_send - j] =
899                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
900                 }
901
902                 num_tx_total += num_send;
903                 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
904         }
905
906         return num_tx_total;
907 }
908
909 static uint16_t
910 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
911                 uint16_t nb_pkts)
912 {
913         struct bond_dev_private *internals;
914         struct bond_tx_queue *bd_tx_q;
915
916         uint8_t num_of_slaves;
917         uint8_t slaves[RTE_MAX_ETHPORTS];
918
919         uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
920
921         int i, op_slave_id;
922
923         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
924         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
925
926         bd_tx_q = (struct bond_tx_queue *)queue;
927         internals = bd_tx_q->dev_private;
928
929         /* Copy slave list to protect against slave up/down changes during tx
930          * bursting */
931         num_of_slaves = internals->active_slave_count;
932         memcpy(slaves, internals->active_slaves,
933                         sizeof(internals->active_slaves[0]) * num_of_slaves);
934
935         if (num_of_slaves < 1)
936                 return num_tx_total;
937
938         /* Populate slaves mbuf with the packets which are to be sent on it  */
939         for (i = 0; i < nb_pkts; i++) {
940                 /* Select output slave using hash based on xmit policy */
941                 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
942
943                 /* Populate slave mbuf arrays with mbufs for that slave */
944                 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
945         }
946
947         /* Send packet burst on each slave device */
948         for (i = 0; i < num_of_slaves; i++) {
949                 if (slave_nb_pkts[i] > 0) {
950                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
951                                         slave_bufs[i], slave_nb_pkts[i]);
952
953                         /* if tx burst fails move packets to end of bufs */
954                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
955                                 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
956
957                                 tx_fail_total += slave_tx_fail_count;
958                                 memcpy(&bufs[nb_pkts - tx_fail_total],
959                                                 &slave_bufs[i][num_tx_slave],
960                                                 slave_tx_fail_count * sizeof(bufs[0]));
961                         }
962
963                         num_tx_total += num_tx_slave;
964                 }
965         }
966
967         return num_tx_total;
968 }
969
970 static uint16_t
971 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
972                 uint16_t nb_pkts)
973 {
974         struct bond_dev_private *internals;
975         struct bond_tx_queue *bd_tx_q;
976
977         uint8_t num_of_slaves;
978         uint8_t slaves[RTE_MAX_ETHPORTS];
979          /* positions in slaves, not ID */
980         uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
981         uint8_t distributing_count;
982
983         uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
984         uint16_t i, j, op_slave_idx;
985         const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
986
987         /* Allocate additional packets in case 8023AD mode. */
988         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
989         void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
990
991         /* Total amount of packets in slave_bufs */
992         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
993         /* Slow packets placed in each slave */
994         uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
995
996         bd_tx_q = (struct bond_tx_queue *)queue;
997         internals = bd_tx_q->dev_private;
998
999         /* Copy slave list to protect against slave up/down changes during tx
1000          * bursting */
1001         num_of_slaves = internals->active_slave_count;
1002         if (num_of_slaves < 1)
1003                 return num_tx_total;
1004
1005         memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1006
1007         distributing_count = 0;
1008         for (i = 0; i < num_of_slaves; i++) {
1009                 struct port *port = &mode_8023ad_ports[slaves[i]];
1010
1011                 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1012                                 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1013                 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1014
1015                 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1016                         slave_bufs[i][j] = slow_pkts[j];
1017
1018                 if (ACTOR_STATE(port, DISTRIBUTING))
1019                         distributing_offsets[distributing_count++] = i;
1020         }
1021
1022         if (likely(distributing_count > 0)) {
1023                 /* Populate slaves mbuf with the packets which are to be sent on it */
1024                 for (i = 0; i < nb_pkts; i++) {
1025                         /* Select output slave using hash based on xmit policy */
1026                         op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1027
1028                         /* Populate slave mbuf arrays with mbufs for that slave. Use only
1029                          * slaves that are currently distributing. */
1030                         uint8_t slave_offset = distributing_offsets[op_slave_idx];
1031                         slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1032                         slave_nb_pkts[slave_offset]++;
1033                 }
1034         }
1035
1036         /* Send packet burst on each slave device */
1037         for (i = 0; i < num_of_slaves; i++) {
1038                 if (slave_nb_pkts[i] == 0)
1039                         continue;
1040
1041                 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1042                                 slave_bufs[i], slave_nb_pkts[i]);
1043
1044                 /* If tx burst fails drop slow packets */
1045                 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1046                         rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1047
1048                 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1049                 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1050
1051                 /* If tx burst fails move packets to end of bufs */
1052                 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1053                         uint16_t j = nb_pkts - num_tx_fail_total;
1054                         for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1055                                 bufs[j] = slave_bufs[i][num_tx_slave];
1056                 }
1057         }
1058
1059         return num_tx_total;
1060 }
1061
1062 static uint16_t
1063 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1064                 uint16_t nb_pkts)
1065 {
1066         struct bond_dev_private *internals;
1067         struct bond_tx_queue *bd_tx_q;
1068
1069         uint8_t tx_failed_flag = 0, num_of_slaves;
1070         uint8_t slaves[RTE_MAX_ETHPORTS];
1071
1072         uint16_t max_nb_of_tx_pkts = 0;
1073
1074         int slave_tx_total[RTE_MAX_ETHPORTS];
1075         int i, most_successful_tx_slave = -1;
1076
1077         bd_tx_q = (struct bond_tx_queue *)queue;
1078         internals = bd_tx_q->dev_private;
1079
1080         /* Copy slave list to protect against slave up/down changes during tx
1081          * bursting */
1082         num_of_slaves = internals->active_slave_count;
1083         memcpy(slaves, internals->active_slaves,
1084                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1085
1086         if (num_of_slaves < 1)
1087                 return 0;
1088
1089         /* Increment reference count on mbufs */
1090         for (i = 0; i < nb_pkts; i++)
1091                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1092
1093         /* Transmit burst on each active slave */
1094         for (i = 0; i < num_of_slaves; i++) {
1095                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1096                                         bufs, nb_pkts);
1097
1098                 if (unlikely(slave_tx_total[i] < nb_pkts))
1099                         tx_failed_flag = 1;
1100
1101                 /* record the value and slave index for the slave which transmits the
1102                  * maximum number of packets */
1103                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1104                         max_nb_of_tx_pkts = slave_tx_total[i];
1105                         most_successful_tx_slave = i;
1106                 }
1107         }
1108
1109         /* if slaves fail to transmit packets from burst, the calling application
1110          * is not expected to know about multiple references to packets so we must
1111          * handle failures of all packets except those of the most successful slave
1112          */
1113         if (unlikely(tx_failed_flag))
1114                 for (i = 0; i < num_of_slaves; i++)
1115                         if (i != most_successful_tx_slave)
1116                                 while (slave_tx_total[i] < nb_pkts)
1117                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1118
1119         return max_nb_of_tx_pkts;
1120 }
1121
1122 void
1123 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1124                 struct rte_eth_link *slave_dev_link)
1125 {
1126         struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1127         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1128
1129         if (slave_dev_link->link_status &&
1130                 bonded_eth_dev->data->dev_started) {
1131                 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1132                 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1133
1134                 internals->link_props_set = 1;
1135         }
1136 }
1137
1138 void
1139 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1140 {
1141         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1142
1143         memset(&(bonded_eth_dev->data->dev_link), 0,
1144                         sizeof(bonded_eth_dev->data->dev_link));
1145
1146         internals->link_props_set = 0;
1147 }
1148
1149 int
1150 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1151                 struct rte_eth_link *slave_dev_link)
1152 {
1153         if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1154                 bonded_dev_link->link_speed !=  slave_dev_link->link_speed)
1155                 return -1;
1156
1157         return 0;
1158 }
1159
1160 int
1161 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1162 {
1163         struct ether_addr *mac_addr;
1164
1165         if (eth_dev == NULL) {
1166                 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1167                 return -1;
1168         }
1169
1170         if (dst_mac_addr == NULL) {
1171                 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1172                 return -1;
1173         }
1174
1175         mac_addr = eth_dev->data->mac_addrs;
1176
1177         ether_addr_copy(mac_addr, dst_mac_addr);
1178         return 0;
1179 }
1180
1181 int
1182 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1183 {
1184         struct ether_addr *mac_addr;
1185
1186         if (eth_dev == NULL) {
1187                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1188                 return -1;
1189         }
1190
1191         if (new_mac_addr == NULL) {
1192                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1193                 return -1;
1194         }
1195
1196         mac_addr = eth_dev->data->mac_addrs;
1197
1198         /* If new MAC is different to current MAC then update */
1199         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1200                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1201
1202         return 0;
1203 }
1204
1205 int
1206 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1207 {
1208         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1209         int i;
1210
1211         /* Update slave devices MAC addresses */
1212         if (internals->slave_count < 1)
1213                 return -1;
1214
1215         switch (internals->mode) {
1216         case BONDING_MODE_ROUND_ROBIN:
1217         case BONDING_MODE_BALANCE:
1218         case BONDING_MODE_BROADCAST:
1219                 for (i = 0; i < internals->slave_count; i++) {
1220                         if (rte_eth_dev_default_mac_addr_set(
1221                                         internals->slaves[i].port_id,
1222                                         bonded_eth_dev->data->mac_addrs)) {
1223                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1224                                                 internals->slaves[i].port_id);
1225                                 return -1;
1226                         }
1227                 }
1228                 break;
1229         case BONDING_MODE_8023AD:
1230                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1231                 break;
1232         case BONDING_MODE_ACTIVE_BACKUP:
1233         case BONDING_MODE_TLB:
1234         case BONDING_MODE_ALB:
1235         default:
1236                 for (i = 0; i < internals->slave_count; i++) {
1237                         if (internals->slaves[i].port_id ==
1238                                         internals->current_primary_port) {
1239                                 if (rte_eth_dev_default_mac_addr_set(
1240                                                 internals->primary_port,
1241                                                 bonded_eth_dev->data->mac_addrs)) {
1242                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1243                                                         internals->current_primary_port);
1244                                         return -1;
1245                                 }
1246                         } else {
1247                                 if (rte_eth_dev_default_mac_addr_set(
1248                                                 internals->slaves[i].port_id,
1249                                                 &internals->slaves[i].persisted_mac_addr)) {
1250                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1251                                                         internals->slaves[i].port_id);
1252                                         return -1;
1253                                 }
1254                         }
1255                 }
1256         }
1257
1258         return 0;
1259 }
1260
1261 int
1262 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1263 {
1264         struct bond_dev_private *internals;
1265
1266         internals = eth_dev->data->dev_private;
1267
1268         switch (mode) {
1269         case BONDING_MODE_ROUND_ROBIN:
1270                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1271                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1272                 break;
1273         case BONDING_MODE_ACTIVE_BACKUP:
1274                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1275                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1276                 break;
1277         case BONDING_MODE_BALANCE:
1278                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1279                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1280                 break;
1281         case BONDING_MODE_BROADCAST:
1282                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1283                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1284                 break;
1285         case BONDING_MODE_8023AD:
1286                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1287                         return -1;
1288
1289                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1290                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1291                 RTE_LOG(WARNING, PMD,
1292                                 "Using mode 4, it is necessary to do TX burst and RX burst "
1293                                 "at least every 100ms.\n");
1294                 break;
1295         case BONDING_MODE_TLB:
1296                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1297                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1298                 break;
1299         case BONDING_MODE_ALB:
1300                 if (bond_mode_alb_enable(eth_dev) != 0)
1301                         return -1;
1302
1303                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1304                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1305                 break;
1306         default:
1307                 return -1;
1308         }
1309
1310         internals->mode = mode;
1311
1312         return 0;
1313 }
1314
1315 int
1316 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1317                 struct rte_eth_dev *slave_eth_dev)
1318 {
1319         struct bond_rx_queue *bd_rx_q;
1320         struct bond_tx_queue *bd_tx_q;
1321
1322         uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
1323         uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
1324         int errval;
1325         uint16_t q_id;
1326
1327         /* Stop slave */
1328         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1329
1330         /* Enable interrupts on slave device if supported */
1331         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1332                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1333
1334         /* If RSS is enabled for bonding, try to enable it for slaves  */
1335         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1336                 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1337                                 != 0) {
1338                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1339                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1340                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1341                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1342                 } else {
1343                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1344                 }
1345
1346                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1347                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1348                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1349                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1350         }
1351
1352         slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1353                         bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1354
1355         /* Configure device */
1356         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1357                         bonded_eth_dev->data->nb_rx_queues,
1358                         bonded_eth_dev->data->nb_tx_queues,
1359                         &(slave_eth_dev->data->dev_conf));
1360         if (errval != 0) {
1361                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1362                                 slave_eth_dev->data->port_id, errval);
1363                 return errval;
1364         }
1365
1366         /* Setup Rx Queues */
1367         /* Use existing queues, if any */
1368         for (q_id = old_nb_rx_queues;
1369              q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1370                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1371
1372                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1373                                 bd_rx_q->nb_rx_desc,
1374                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1375                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1376                 if (errval != 0) {
1377                         RTE_BOND_LOG(ERR,
1378                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1379                                         slave_eth_dev->data->port_id, q_id, errval);
1380                         return errval;
1381                 }
1382         }
1383
1384         /* Setup Tx Queues */
1385         /* Use existing queues, if any */
1386         for (q_id = old_nb_tx_queues;
1387              q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1388                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1389
1390                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1391                                 bd_tx_q->nb_tx_desc,
1392                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1393                                 &bd_tx_q->tx_conf);
1394                 if (errval != 0) {
1395                         RTE_BOND_LOG(ERR,
1396                                         "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1397                                         slave_eth_dev->data->port_id, q_id, errval);
1398                         return errval;
1399                 }
1400         }
1401
1402         /* Start device */
1403         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1404         if (errval != 0) {
1405                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1406                                 slave_eth_dev->data->port_id, errval);
1407                 return -1;
1408         }
1409
1410         /* If RSS is enabled for bonding, synchronize RETA */
1411         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1412                 int i;
1413                 struct bond_dev_private *internals;
1414
1415                 internals = bonded_eth_dev->data->dev_private;
1416
1417                 for (i = 0; i < internals->slave_count; i++) {
1418                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1419                                 errval = rte_eth_dev_rss_reta_update(
1420                                                 slave_eth_dev->data->port_id,
1421                                                 &internals->reta_conf[0],
1422                                                 internals->slaves[i].reta_size);
1423                                 if (errval != 0) {
1424                                         RTE_LOG(WARNING, PMD,
1425                                                         "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1426                                                         " RSS Configuration for bonding may be inconsistent.\n",
1427                                                         slave_eth_dev->data->port_id, errval);
1428                                 }
1429                                 break;
1430                         }
1431                 }
1432         }
1433
1434         /* If lsc interrupt is set, check initial slave's link status */
1435         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1436                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1437                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1438                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1439         }
1440
1441         return 0;
1442 }
1443
1444 void
1445 slave_remove(struct bond_dev_private *internals,
1446                 struct rte_eth_dev *slave_eth_dev)
1447 {
1448         uint8_t i;
1449
1450         for (i = 0; i < internals->slave_count; i++)
1451                 if (internals->slaves[i].port_id ==
1452                                 slave_eth_dev->data->port_id)
1453                         break;
1454
1455         if (i < (internals->slave_count - 1))
1456                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1457                                 sizeof(internals->slaves[0]) *
1458                                 (internals->slave_count - i - 1));
1459
1460         internals->slave_count--;
1461 }
1462
1463 static void
1464 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1465
1466 void
1467 slave_add(struct bond_dev_private *internals,
1468                 struct rte_eth_dev *slave_eth_dev)
1469 {
1470         struct bond_slave_details *slave_details =
1471                         &internals->slaves[internals->slave_count];
1472
1473         slave_details->port_id = slave_eth_dev->data->port_id;
1474         slave_details->last_link_status = 0;
1475
1476         /* Mark slave devices that don't support interrupts so we can
1477          * compensate when we start the bond
1478          */
1479         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1480                 slave_details->link_status_poll_enabled = 1;
1481         }
1482
1483         slave_details->link_status_wait_to_complete = 0;
1484         /* clean tlb_last_obytes when adding port for bonding device */
1485         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1486                         sizeof(struct ether_addr));
1487 }
1488
1489 void
1490 bond_ethdev_primary_set(struct bond_dev_private *internals,
1491                 uint8_t slave_port_id)
1492 {
1493         int i;
1494
1495         if (internals->active_slave_count < 1)
1496                 internals->current_primary_port = slave_port_id;
1497         else
1498                 /* Search bonded device slave ports for new proposed primary port */
1499                 for (i = 0; i < internals->active_slave_count; i++) {
1500                         if (internals->active_slaves[i] == slave_port_id)
1501                                 internals->current_primary_port = slave_port_id;
1502                 }
1503 }
1504
1505 static void
1506 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1507
1508 static int
1509 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1510 {
1511         struct bond_dev_private *internals;
1512         int i;
1513
1514         /* slave eth dev will be started by bonded device */
1515         if (check_for_bonded_ethdev(eth_dev)) {
1516                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1517                                 eth_dev->data->port_id);
1518                 return -1;
1519         }
1520
1521         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1522         eth_dev->data->dev_started = 1;
1523
1524         internals = eth_dev->data->dev_private;
1525
1526         if (internals->slave_count == 0) {
1527                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1528                 goto out_err;
1529         }
1530
1531         if (internals->user_defined_mac == 0) {
1532                 struct ether_addr *new_mac_addr = NULL;
1533
1534                 for (i = 0; i < internals->slave_count; i++)
1535                         if (internals->slaves[i].port_id == internals->primary_port)
1536                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1537
1538                 if (new_mac_addr == NULL)
1539                         goto out_err;
1540
1541                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1542                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1543                                         eth_dev->data->port_id);
1544                         goto out_err;
1545                 }
1546         }
1547
1548         /* Update all slave devices MACs*/
1549         if (mac_address_slaves_update(eth_dev) != 0)
1550                 goto out_err;
1551
1552         /* If bonded device is configure in promiscuous mode then re-apply config */
1553         if (internals->promiscuous_en)
1554                 bond_ethdev_promiscuous_enable(eth_dev);
1555
1556         /* Reconfigure each slave device if starting bonded device */
1557         for (i = 0; i < internals->slave_count; i++) {
1558                 if (slave_configure(eth_dev,
1559                                 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1560                         RTE_BOND_LOG(ERR,
1561                                         "bonded port (%d) failed to reconfigure slave device (%d)",
1562                                         eth_dev->data->port_id, internals->slaves[i].port_id);
1563                         goto out_err;
1564                 }
1565                 /* We will need to poll for link status if any slave doesn't
1566                  * support interrupts
1567                  */
1568                 if (internals->slaves[i].link_status_poll_enabled)
1569                         internals->link_status_polling_enabled = 1;
1570         }
1571
1572         /* start polling if needed */
1573         if (internals->link_status_polling_enabled) {
1574                 rte_eal_alarm_set(
1575                         internals->link_status_polling_interval_ms * 1000,
1576                         bond_ethdev_slave_link_status_change_monitor,
1577                         (void *)&rte_eth_devices[internals->port_id]);
1578         }
1579
1580         if (internals->user_defined_primary_port)
1581                 bond_ethdev_primary_set(internals, internals->primary_port);
1582
1583         if (internals->mode == BONDING_MODE_8023AD)
1584                 bond_mode_8023ad_start(eth_dev);
1585
1586         if (internals->mode == BONDING_MODE_TLB ||
1587                         internals->mode == BONDING_MODE_ALB)
1588                 bond_tlb_enable(internals);
1589
1590         return 0;
1591
1592 out_err:
1593         eth_dev->data->dev_started = 0;
1594         return -1;
1595 }
1596
1597 static void
1598 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1599 {
1600         uint8_t i;
1601
1602         if (dev->data->rx_queues != NULL) {
1603                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1604                         rte_free(dev->data->rx_queues[i]);
1605                         dev->data->rx_queues[i] = NULL;
1606                 }
1607                 dev->data->nb_rx_queues = 0;
1608         }
1609
1610         if (dev->data->tx_queues != NULL) {
1611                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1612                         rte_free(dev->data->tx_queues[i]);
1613                         dev->data->tx_queues[i] = NULL;
1614                 }
1615                 dev->data->nb_tx_queues = 0;
1616         }
1617 }
1618
1619 void
1620 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1621 {
1622         struct bond_dev_private *internals = eth_dev->data->dev_private;
1623         uint8_t i;
1624
1625         if (internals->mode == BONDING_MODE_8023AD) {
1626                 struct port *port;
1627                 void *pkt = NULL;
1628
1629                 bond_mode_8023ad_stop(eth_dev);
1630
1631                 /* Discard all messages to/from mode 4 state machines */
1632                 for (i = 0; i < internals->active_slave_count; i++) {
1633                         port = &mode_8023ad_ports[internals->active_slaves[i]];
1634
1635                         RTE_ASSERT(port->rx_ring != NULL);
1636                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1637                                 rte_pktmbuf_free(pkt);
1638
1639                         RTE_ASSERT(port->tx_ring != NULL);
1640                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1641                                 rte_pktmbuf_free(pkt);
1642                 }
1643         }
1644
1645         if (internals->mode == BONDING_MODE_TLB ||
1646                         internals->mode == BONDING_MODE_ALB) {
1647                 bond_tlb_disable(internals);
1648                 for (i = 0; i < internals->active_slave_count; i++)
1649                         tlb_last_obytets[internals->active_slaves[i]] = 0;
1650         }
1651
1652         internals->active_slave_count = 0;
1653         internals->link_status_polling_enabled = 0;
1654         for (i = 0; i < internals->slave_count; i++)
1655                 internals->slaves[i].last_link_status = 0;
1656
1657         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1658         eth_dev->data->dev_started = 0;
1659 }
1660
1661 void
1662 bond_ethdev_close(struct rte_eth_dev *dev)
1663 {
1664         struct bond_dev_private *internals = dev->data->dev_private;
1665
1666         bond_ethdev_free_queues(dev);
1667         rte_bitmap_reset(internals->vlan_filter_bmp);
1668 }
1669
1670 /* forward declaration */
1671 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1672
1673 static void
1674 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1675 {
1676         struct bond_dev_private *internals = dev->data->dev_private;
1677         uint16_t max_nb_rx_queues = UINT16_MAX;
1678         uint16_t max_nb_tx_queues = UINT16_MAX;
1679
1680         dev_info->max_mac_addrs = 1;
1681
1682         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
1683                                   ? internals->candidate_max_rx_pktlen
1684                                   : ETHER_MAX_JUMBO_FRAME_LEN;
1685
1686         if (internals->slave_count > 0) {
1687                 /* Max number of tx/rx queues that the bonded device can
1688                  * support is the minimum values of the bonded slaves, as
1689                  * all slaves must be capable of supporting the same number
1690                  * of tx/rx queues.
1691                  */
1692                 struct rte_eth_dev_info slave_info;
1693                 uint8_t idx;
1694
1695                 for (idx = 0; idx < internals->slave_count; idx++) {
1696                         rte_eth_dev_info_get(internals->slaves[idx].port_id,
1697                                         &slave_info);
1698
1699                         if (slave_info.max_rx_queues < max_nb_rx_queues)
1700                                 max_nb_rx_queues = slave_info.max_rx_queues;
1701
1702                         if (slave_info.max_tx_queues < max_nb_tx_queues)
1703                                 max_nb_tx_queues = slave_info.max_tx_queues;
1704                 }
1705         }
1706
1707         dev_info->max_rx_queues = max_nb_rx_queues;
1708         dev_info->max_tx_queues = max_nb_tx_queues;
1709
1710         dev_info->min_rx_bufsize = 0;
1711         dev_info->pci_dev = NULL;
1712
1713         dev_info->rx_offload_capa = internals->rx_offload_capa;
1714         dev_info->tx_offload_capa = internals->tx_offload_capa;
1715         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1716
1717         dev_info->reta_size = internals->reta_size;
1718 }
1719
1720 static int
1721 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1722 {
1723         int res;
1724         uint8_t i;
1725         struct bond_dev_private *internals = dev->data->dev_private;
1726
1727         /* don't do this while a slave is being added */
1728         rte_spinlock_lock(&internals->lock);
1729
1730         if (on)
1731                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1732         else
1733                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1734
1735         for (i = 0; i < internals->slave_count; i++) {
1736                 uint8_t port_id = internals->slaves[i].port_id;
1737
1738                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1739                 if (res == ENOTSUP)
1740                         RTE_LOG(WARNING, PMD,
1741                                 "Setting VLAN filter on slave port %u not supported.\n",
1742                                 port_id);
1743         }
1744
1745         rte_spinlock_unlock(&internals->lock);
1746         return 0;
1747 }
1748
1749 static int
1750 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1751                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1752                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1753 {
1754         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1755                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1756                                         0, dev->data->numa_node);
1757         if (bd_rx_q == NULL)
1758                 return -1;
1759
1760         bd_rx_q->queue_id = rx_queue_id;
1761         bd_rx_q->dev_private = dev->data->dev_private;
1762
1763         bd_rx_q->nb_rx_desc = nb_rx_desc;
1764
1765         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1766         bd_rx_q->mb_pool = mb_pool;
1767
1768         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1769
1770         return 0;
1771 }
1772
1773 static int
1774 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1775                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1776                 const struct rte_eth_txconf *tx_conf)
1777 {
1778         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
1779                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1780                                         0, dev->data->numa_node);
1781
1782         if (bd_tx_q == NULL)
1783                 return -1;
1784
1785         bd_tx_q->queue_id = tx_queue_id;
1786         bd_tx_q->dev_private = dev->data->dev_private;
1787
1788         bd_tx_q->nb_tx_desc = nb_tx_desc;
1789         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1790
1791         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1792
1793         return 0;
1794 }
1795
1796 static void
1797 bond_ethdev_rx_queue_release(void *queue)
1798 {
1799         if (queue == NULL)
1800                 return;
1801
1802         rte_free(queue);
1803 }
1804
1805 static void
1806 bond_ethdev_tx_queue_release(void *queue)
1807 {
1808         if (queue == NULL)
1809                 return;
1810
1811         rte_free(queue);
1812 }
1813
1814 static void
1815 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1816 {
1817         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1818         struct bond_dev_private *internals;
1819
1820         /* Default value for polling slave found is true as we don't want to
1821          * disable the polling thread if we cannot get the lock */
1822         int i, polling_slave_found = 1;
1823
1824         if (cb_arg == NULL)
1825                 return;
1826
1827         bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1828         internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1829
1830         if (!bonded_ethdev->data->dev_started ||
1831                 !internals->link_status_polling_enabled)
1832                 return;
1833
1834         /* If device is currently being configured then don't check slaves link
1835          * status, wait until next period */
1836         if (rte_spinlock_trylock(&internals->lock)) {
1837                 if (internals->slave_count > 0)
1838                         polling_slave_found = 0;
1839
1840                 for (i = 0; i < internals->slave_count; i++) {
1841                         if (!internals->slaves[i].link_status_poll_enabled)
1842                                 continue;
1843
1844                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1845                         polling_slave_found = 1;
1846
1847                         /* Update slave link status */
1848                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1849                                         internals->slaves[i].link_status_wait_to_complete);
1850
1851                         /* if link status has changed since last checked then call lsc
1852                          * event callback */
1853                         if (slave_ethdev->data->dev_link.link_status !=
1854                                         internals->slaves[i].last_link_status) {
1855                                 internals->slaves[i].last_link_status =
1856                                                 slave_ethdev->data->dev_link.link_status;
1857
1858                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1859                                                 RTE_ETH_EVENT_INTR_LSC,
1860                                                 &bonded_ethdev->data->port_id);
1861                         }
1862                 }
1863                 rte_spinlock_unlock(&internals->lock);
1864         }
1865
1866         if (polling_slave_found)
1867                 /* Set alarm to continue monitoring link status of slave ethdev's */
1868                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1869                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1870 }
1871
1872 static int
1873 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1874                 int wait_to_complete)
1875 {
1876         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1877
1878         if (!bonded_eth_dev->data->dev_started ||
1879                 internals->active_slave_count == 0) {
1880                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1881                 return 0;
1882         } else {
1883                 struct rte_eth_dev *slave_eth_dev;
1884                 int i, link_up = 0;
1885
1886                 for (i = 0; i < internals->active_slave_count; i++) {
1887                         slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1888
1889                         (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1890                                         wait_to_complete);
1891                         if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1892                                 link_up = 1;
1893                                 break;
1894                         }
1895                 }
1896
1897                 bonded_eth_dev->data->dev_link.link_status = link_up;
1898         }
1899
1900         return 0;
1901 }
1902
1903 static void
1904 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1905 {
1906         struct bond_dev_private *internals = dev->data->dev_private;
1907         struct rte_eth_stats slave_stats;
1908         int i, j;
1909
1910         for (i = 0; i < internals->slave_count; i++) {
1911                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1912
1913                 stats->ipackets += slave_stats.ipackets;
1914                 stats->opackets += slave_stats.opackets;
1915                 stats->ibytes += slave_stats.ibytes;
1916                 stats->obytes += slave_stats.obytes;
1917                 stats->imissed += slave_stats.imissed;
1918                 stats->ierrors += slave_stats.ierrors;
1919                 stats->oerrors += slave_stats.oerrors;
1920                 stats->rx_nombuf += slave_stats.rx_nombuf;
1921
1922                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1923                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1924                         stats->q_opackets[j] += slave_stats.q_opackets[j];
1925                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1926                         stats->q_obytes[j] += slave_stats.q_obytes[j];
1927                         stats->q_errors[j] += slave_stats.q_errors[j];
1928                 }
1929
1930         }
1931 }
1932
1933 static void
1934 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1935 {
1936         struct bond_dev_private *internals = dev->data->dev_private;
1937         int i;
1938
1939         for (i = 0; i < internals->slave_count; i++)
1940                 rte_eth_stats_reset(internals->slaves[i].port_id);
1941 }
1942
1943 static void
1944 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1945 {
1946         struct bond_dev_private *internals = eth_dev->data->dev_private;
1947         int i;
1948
1949         internals->promiscuous_en = 1;
1950
1951         switch (internals->mode) {
1952         /* Promiscuous mode is propagated to all slaves */
1953         case BONDING_MODE_ROUND_ROBIN:
1954         case BONDING_MODE_BALANCE:
1955         case BONDING_MODE_BROADCAST:
1956                 for (i = 0; i < internals->slave_count; i++)
1957                         rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1958                 break;
1959         /* In mode4 promiscus mode is managed when slave is added/removed */
1960         case BONDING_MODE_8023AD:
1961                 break;
1962         /* Promiscuous mode is propagated only to primary slave */
1963         case BONDING_MODE_ACTIVE_BACKUP:
1964         case BONDING_MODE_TLB:
1965         case BONDING_MODE_ALB:
1966         default:
1967                 rte_eth_promiscuous_enable(internals->current_primary_port);
1968         }
1969 }
1970
1971 static void
1972 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1973 {
1974         struct bond_dev_private *internals = dev->data->dev_private;
1975         int i;
1976
1977         internals->promiscuous_en = 0;
1978
1979         switch (internals->mode) {
1980         /* Promiscuous mode is propagated to all slaves */
1981         case BONDING_MODE_ROUND_ROBIN:
1982         case BONDING_MODE_BALANCE:
1983         case BONDING_MODE_BROADCAST:
1984                 for (i = 0; i < internals->slave_count; i++)
1985                         rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1986                 break;
1987         /* In mode4 promiscus mode is set managed when slave is added/removed */
1988         case BONDING_MODE_8023AD:
1989                 break;
1990         /* Promiscuous mode is propagated only to primary slave */
1991         case BONDING_MODE_ACTIVE_BACKUP:
1992         case BONDING_MODE_TLB:
1993         case BONDING_MODE_ALB:
1994         default:
1995                 rte_eth_promiscuous_disable(internals->current_primary_port);
1996         }
1997 }
1998
1999 static void
2000 bond_ethdev_delayed_lsc_propagation(void *arg)
2001 {
2002         if (arg == NULL)
2003                 return;
2004
2005         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2006                         RTE_ETH_EVENT_INTR_LSC, NULL);
2007 }
2008
2009 void
2010 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
2011                 void *param)
2012 {
2013         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
2014         struct bond_dev_private *internals;
2015         struct rte_eth_link link;
2016
2017         int i, valid_slave = 0;
2018         uint8_t active_pos;
2019         uint8_t lsc_flag = 0;
2020
2021         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2022                 return;
2023
2024         bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
2025         slave_eth_dev = &rte_eth_devices[port_id];
2026
2027         if (check_for_bonded_ethdev(bonded_eth_dev))
2028                 return;
2029
2030         internals = bonded_eth_dev->data->dev_private;
2031
2032         /* If the device isn't started don't handle interrupts */
2033         if (!bonded_eth_dev->data->dev_started)
2034                 return;
2035
2036         /* verify that port_id is a valid slave of bonded port */
2037         for (i = 0; i < internals->slave_count; i++) {
2038                 if (internals->slaves[i].port_id == port_id) {
2039                         valid_slave = 1;
2040                         break;
2041                 }
2042         }
2043
2044         if (!valid_slave)
2045                 return;
2046
2047         /* Search for port in active port list */
2048         active_pos = find_slave_by_id(internals->active_slaves,
2049                         internals->active_slave_count, port_id);
2050
2051         rte_eth_link_get_nowait(port_id, &link);
2052         if (link.link_status) {
2053                 if (active_pos < internals->active_slave_count)
2054                         return;
2055
2056                 /* if no active slave ports then set this port to be primary port */
2057                 if (internals->active_slave_count < 1) {
2058                         /* If first active slave, then change link status */
2059                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2060                         internals->current_primary_port = port_id;
2061                         lsc_flag = 1;
2062
2063                         mac_address_slaves_update(bonded_eth_dev);
2064
2065                         /* Inherit eth dev link properties from first active slave */
2066                         link_properties_set(bonded_eth_dev,
2067                                         &(slave_eth_dev->data->dev_link));
2068                 } else {
2069                         if (link_properties_valid(
2070                                 &bonded_eth_dev->data->dev_link, &link) != 0) {
2071                                 slave_eth_dev->data->dev_flags &=
2072                                         (~RTE_ETH_DEV_BONDED_SLAVE);
2073                                 RTE_LOG(ERR, PMD,
2074                                         "port %u invalid speed/duplex\n",
2075                                         port_id);
2076                                 return;
2077                         }
2078                 }
2079
2080                 activate_slave(bonded_eth_dev, port_id);
2081
2082                 /* If user has defined the primary port then default to using it */
2083                 if (internals->user_defined_primary_port &&
2084                                 internals->primary_port == port_id)
2085                         bond_ethdev_primary_set(internals, port_id);
2086         } else {
2087                 if (active_pos == internals->active_slave_count)
2088                         return;
2089
2090                 /* Remove from active slave list */
2091                 deactivate_slave(bonded_eth_dev, port_id);
2092
2093                 /* No active slaves, change link status to down and reset other
2094                  * link properties */
2095                 if (internals->active_slave_count < 1) {
2096                         lsc_flag = 1;
2097                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2098
2099                         link_properties_reset(bonded_eth_dev);
2100                 }
2101
2102                 /* Update primary id, take first active slave from list or if none
2103                  * available set to -1 */
2104                 if (port_id == internals->current_primary_port) {
2105                         if (internals->active_slave_count > 0)
2106                                 bond_ethdev_primary_set(internals,
2107                                                 internals->active_slaves[0]);
2108                         else
2109                                 internals->current_primary_port = internals->primary_port;
2110                 }
2111         }
2112
2113         if (lsc_flag) {
2114                 /* Cancel any possible outstanding interrupts if delays are enabled */
2115                 if (internals->link_up_delay_ms > 0 ||
2116                         internals->link_down_delay_ms > 0)
2117                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2118                                         bonded_eth_dev);
2119
2120                 if (bonded_eth_dev->data->dev_link.link_status) {
2121                         if (internals->link_up_delay_ms > 0)
2122                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2123                                                 bond_ethdev_delayed_lsc_propagation,
2124                                                 (void *)bonded_eth_dev);
2125                         else
2126                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2127                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2128
2129                 } else {
2130                         if (internals->link_down_delay_ms > 0)
2131                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2132                                                 bond_ethdev_delayed_lsc_propagation,
2133                                                 (void *)bonded_eth_dev);
2134                         else
2135                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2136                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2137                 }
2138         }
2139 }
2140
2141 static int
2142 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2143                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2144 {
2145         unsigned i, j;
2146         int result = 0;
2147         int slave_reta_size;
2148         unsigned reta_count;
2149         struct bond_dev_private *internals = dev->data->dev_private;
2150
2151         if (reta_size != internals->reta_size)
2152                 return -EINVAL;
2153
2154          /* Copy RETA table */
2155         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2156
2157         for (i = 0; i < reta_count; i++) {
2158                 internals->reta_conf[i].mask = reta_conf[i].mask;
2159                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2160                         if ((reta_conf[i].mask >> j) & 0x01)
2161                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2162         }
2163
2164         /* Fill rest of array */
2165         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2166                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2167                                 sizeof(internals->reta_conf[0]) * reta_count);
2168
2169         /* Propagate RETA over slaves */
2170         for (i = 0; i < internals->slave_count; i++) {
2171                 slave_reta_size = internals->slaves[i].reta_size;
2172                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2173                                 &internals->reta_conf[0], slave_reta_size);
2174                 if (result < 0)
2175                         return result;
2176         }
2177
2178         return 0;
2179 }
2180
2181 static int
2182 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2183                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2184 {
2185         int i, j;
2186         struct bond_dev_private *internals = dev->data->dev_private;
2187
2188         if (reta_size != internals->reta_size)
2189                 return -EINVAL;
2190
2191          /* Copy RETA table */
2192         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2193                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2194                         if ((reta_conf[i].mask >> j) & 0x01)
2195                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2196
2197         return 0;
2198 }
2199
2200 static int
2201 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2202                 struct rte_eth_rss_conf *rss_conf)
2203 {
2204         int i, result = 0;
2205         struct bond_dev_private *internals = dev->data->dev_private;
2206         struct rte_eth_rss_conf bond_rss_conf;
2207
2208         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2209
2210         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2211
2212         if (bond_rss_conf.rss_hf != 0)
2213                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2214
2215         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2216                         sizeof(internals->rss_key)) {
2217                 if (bond_rss_conf.rss_key_len == 0)
2218                         bond_rss_conf.rss_key_len = 40;
2219                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2220                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2221                                 internals->rss_key_len);
2222         }
2223
2224         for (i = 0; i < internals->slave_count; i++) {
2225                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2226                                 &bond_rss_conf);
2227                 if (result < 0)
2228                         return result;
2229         }
2230
2231         return 0;
2232 }
2233
2234 static int
2235 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2236                 struct rte_eth_rss_conf *rss_conf)
2237 {
2238         struct bond_dev_private *internals = dev->data->dev_private;
2239
2240         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2241         rss_conf->rss_key_len = internals->rss_key_len;
2242         if (rss_conf->rss_key)
2243                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2244
2245         return 0;
2246 }
2247
2248 const struct eth_dev_ops default_dev_ops = {
2249         .dev_start            = bond_ethdev_start,
2250         .dev_stop             = bond_ethdev_stop,
2251         .dev_close            = bond_ethdev_close,
2252         .dev_configure        = bond_ethdev_configure,
2253         .dev_infos_get        = bond_ethdev_info,
2254         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
2255         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2256         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2257         .rx_queue_release     = bond_ethdev_rx_queue_release,
2258         .tx_queue_release     = bond_ethdev_tx_queue_release,
2259         .link_update          = bond_ethdev_link_update,
2260         .stats_get            = bond_ethdev_stats_get,
2261         .stats_reset          = bond_ethdev_stats_reset,
2262         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
2263         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
2264         .reta_update          = bond_ethdev_rss_reta_update,
2265         .reta_query           = bond_ethdev_rss_reta_query,
2266         .rss_hash_update      = bond_ethdev_rss_hash_update,
2267         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get
2268 };
2269
2270 static int
2271 bond_probe(const char *name, const char *params)
2272 {
2273         struct bond_dev_private *internals;
2274         struct rte_kvargs *kvlist;
2275         uint8_t bonding_mode, socket_id;
2276         int  arg_count, port_id;
2277
2278         RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2279
2280         kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2281         if (kvlist == NULL)
2282                 return -1;
2283
2284         /* Parse link bonding mode */
2285         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2286                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2287                                 &bond_ethdev_parse_slave_mode_kvarg,
2288                                 &bonding_mode) != 0) {
2289                         RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2290                                         name);
2291                         goto parse_error;
2292                 }
2293         } else {
2294                 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2295                                 "device %s\n", name);
2296                 goto parse_error;
2297         }
2298
2299         /* Parse socket id to create bonding device on */
2300         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2301         if (arg_count == 1) {
2302                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2303                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2304                                 != 0) {
2305                         RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2306                                         "bonded device %s\n", name);
2307                         goto parse_error;
2308                 }
2309         } else if (arg_count > 1) {
2310                 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2311                                 "bonded device %s\n", name);
2312                 goto parse_error;
2313         } else {
2314                 socket_id = rte_socket_id();
2315         }
2316
2317         /* Create link bonding eth device */
2318         port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2319         if (port_id < 0) {
2320                 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2321                                 "socket %u.\n", name, bonding_mode, socket_id);
2322                 goto parse_error;
2323         }
2324         internals = rte_eth_devices[port_id].data->dev_private;
2325         internals->kvlist = kvlist;
2326
2327         RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2328                         "socket %u.\n", name, port_id, bonding_mode, socket_id);
2329         return 0;
2330
2331 parse_error:
2332         rte_kvargs_free(kvlist);
2333
2334         return -1;
2335 }
2336
2337 static int
2338 bond_remove(const char *name)
2339 {
2340         int  ret;
2341
2342         if (name == NULL)
2343                 return -EINVAL;
2344
2345         RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2346
2347         /* free link bonding eth device */
2348         ret = rte_eth_bond_free(name);
2349         if (ret < 0)
2350                 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2351
2352         return ret;
2353 }
2354
2355 /* this part will resolve the slave portids after all the other pdev and vdev
2356  * have been allocated */
2357 static int
2358 bond_ethdev_configure(struct rte_eth_dev *dev)
2359 {
2360         char *name = dev->data->name;
2361         struct bond_dev_private *internals = dev->data->dev_private;
2362         struct rte_kvargs *kvlist = internals->kvlist;
2363         int arg_count;
2364         uint8_t port_id = dev - rte_eth_devices;
2365
2366         static const uint8_t default_rss_key[40] = {
2367                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2368                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2369                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2370                 0xBE, 0xAC, 0x01, 0xFA
2371         };
2372
2373         unsigned i, j;
2374
2375         /* If RSS is enabled, fill table and key with default values */
2376         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2377                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2378                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2379                 memcpy(internals->rss_key, default_rss_key, 40);
2380
2381                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2382                         internals->reta_conf[i].mask = ~0LL;
2383                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2384                                 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2385                 }
2386         }
2387
2388         /* set the max_rx_pktlen */
2389         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2390
2391         /*
2392          * if no kvlist, it means that this bonded device has been created
2393          * through the bonding api.
2394          */
2395         if (!kvlist)
2396                 return 0;
2397
2398         /* Parse MAC address for bonded device */
2399         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2400         if (arg_count == 1) {
2401                 struct ether_addr bond_mac;
2402
2403                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2404                                 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2405                         RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2406                                         name);
2407                         return -1;
2408                 }
2409
2410                 /* Set MAC address */
2411                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2412                         RTE_LOG(ERR, EAL,
2413                                         "Failed to set mac address on bonded device %s\n",
2414                                         name);
2415                         return -1;
2416                 }
2417         } else if (arg_count > 1) {
2418                 RTE_LOG(ERR, EAL,
2419                                 "MAC address can be specified only once for bonded device %s\n",
2420                                 name);
2421                 return -1;
2422         }
2423
2424         /* Parse/set balance mode transmit policy */
2425         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2426         if (arg_count == 1) {
2427                 uint8_t xmit_policy;
2428
2429                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2430                                 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2431                                                 0) {
2432                         RTE_LOG(INFO, EAL,
2433                                         "Invalid xmit policy specified for bonded device %s\n",
2434                                         name);
2435                         return -1;
2436                 }
2437
2438                 /* Set balance mode transmit policy*/
2439                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2440                         RTE_LOG(ERR, EAL,
2441                                         "Failed to set balance xmit policy on bonded device %s\n",
2442                                         name);
2443                         return -1;
2444                 }
2445         } else if (arg_count > 1) {
2446                 RTE_LOG(ERR, EAL,
2447                                 "Transmit policy can be specified only once for bonded device"
2448                                 " %s\n", name);
2449                 return -1;
2450         }
2451
2452         /* Parse/add slave ports to bonded device */
2453         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2454                 struct bond_ethdev_slave_ports slave_ports;
2455                 unsigned i;
2456
2457                 memset(&slave_ports, 0, sizeof(slave_ports));
2458
2459                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2460                                 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2461                         RTE_LOG(ERR, EAL,
2462                                         "Failed to parse slave ports for bonded device %s\n",
2463                                         name);
2464                         return -1;
2465                 }
2466
2467                 for (i = 0; i < slave_ports.slave_count; i++) {
2468                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2469                                 RTE_LOG(ERR, EAL,
2470                                                 "Failed to add port %d as slave to bonded device %s\n",
2471                                                 slave_ports.slaves[i], name);
2472                         }
2473                 }
2474
2475         } else {
2476                 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2477                 return -1;
2478         }
2479
2480         /* Parse/set primary slave port id*/
2481         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2482         if (arg_count == 1) {
2483                 uint8_t primary_slave_port_id;
2484
2485                 if (rte_kvargs_process(kvlist,
2486                                 PMD_BOND_PRIMARY_SLAVE_KVARG,
2487                                 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2488                                 &primary_slave_port_id) < 0) {
2489                         RTE_LOG(INFO, EAL,
2490                                         "Invalid primary slave port id specified for bonded device"
2491                                         " %s\n", name);
2492                         return -1;
2493                 }
2494
2495                 /* Set balance mode transmit policy*/
2496                 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2497                                 != 0) {
2498                         RTE_LOG(ERR, EAL,
2499                                         "Failed to set primary slave port %d on bonded device %s\n",
2500                                         primary_slave_port_id, name);
2501                         return -1;
2502                 }
2503         } else if (arg_count > 1) {
2504                 RTE_LOG(INFO, EAL,
2505                                 "Primary slave can be specified only once for bonded device"
2506                                 " %s\n", name);
2507                 return -1;
2508         }
2509
2510         /* Parse link status monitor polling interval */
2511         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2512         if (arg_count == 1) {
2513                 uint32_t lsc_poll_interval_ms;
2514
2515                 if (rte_kvargs_process(kvlist,
2516                                 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2517                                 &bond_ethdev_parse_time_ms_kvarg,
2518                                 &lsc_poll_interval_ms) < 0) {
2519                         RTE_LOG(INFO, EAL,
2520                                         "Invalid lsc polling interval value specified for bonded"
2521                                         " device %s\n", name);
2522                         return -1;
2523                 }
2524
2525                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2526                                 != 0) {
2527                         RTE_LOG(ERR, EAL,
2528                                         "Failed to set lsc monitor polling interval (%u ms) on"
2529                                         " bonded device %s\n", lsc_poll_interval_ms, name);
2530                         return -1;
2531                 }
2532         } else if (arg_count > 1) {
2533                 RTE_LOG(INFO, EAL,
2534                                 "LSC polling interval can be specified only once for bonded"
2535                                 " device %s\n", name);
2536                 return -1;
2537         }
2538
2539         /* Parse link up interrupt propagation delay */
2540         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2541         if (arg_count == 1) {
2542                 uint32_t link_up_delay_ms;
2543
2544                 if (rte_kvargs_process(kvlist,
2545                                 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2546                                 &bond_ethdev_parse_time_ms_kvarg,
2547                                 &link_up_delay_ms) < 0) {
2548                         RTE_LOG(INFO, EAL,
2549                                         "Invalid link up propagation delay value specified for"
2550                                         " bonded device %s\n", name);
2551                         return -1;
2552                 }
2553
2554                 /* Set balance mode transmit policy*/
2555                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2556                                 != 0) {
2557                         RTE_LOG(ERR, EAL,
2558                                         "Failed to set link up propagation delay (%u ms) on bonded"
2559                                         " device %s\n", link_up_delay_ms, name);
2560                         return -1;
2561                 }
2562         } else if (arg_count > 1) {
2563                 RTE_LOG(INFO, EAL,
2564                                 "Link up propagation delay can be specified only once for"
2565                                 " bonded device %s\n", name);
2566                 return -1;
2567         }
2568
2569         /* Parse link down interrupt propagation delay */
2570         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2571         if (arg_count == 1) {
2572                 uint32_t link_down_delay_ms;
2573
2574                 if (rte_kvargs_process(kvlist,
2575                                 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2576                                 &bond_ethdev_parse_time_ms_kvarg,
2577                                 &link_down_delay_ms) < 0) {
2578                         RTE_LOG(INFO, EAL,
2579                                         "Invalid link down propagation delay value specified for"
2580                                         " bonded device %s\n", name);
2581                         return -1;
2582                 }
2583
2584                 /* Set balance mode transmit policy*/
2585                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2586                                 != 0) {
2587                         RTE_LOG(ERR, EAL,
2588                                         "Failed to set link down propagation delay (%u ms) on"
2589                                         " bonded device %s\n", link_down_delay_ms, name);
2590                         return -1;
2591                 }
2592         } else if (arg_count > 1) {
2593                 RTE_LOG(INFO, EAL,
2594                                 "Link down propagation delay can be specified only once for"
2595                                 " bonded device %s\n", name);
2596                 return -1;
2597         }
2598
2599         return 0;
2600 }
2601
2602 static struct rte_vdev_driver bond_drv = {
2603         .probe = bond_probe,
2604         .remove = bond_remove,
2605 };
2606
2607 RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
2608 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2609
2610 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2611         "slave=<ifc> "
2612         "primary=<ifc> "
2613         "mode=[0-6] "
2614         "xmit_policy=[l2 | l23 | l34] "
2615         "socket_id=<int> "
2616         "mac=<mac addr> "
2617         "lsc_poll_period_ms=<int> "
2618         "up_delay=<int> "
2619         "down_delay=<int>");