4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
53 #define REORDER_PERIOD_MS 10
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
63 size_t vlan_offset = 0;
65 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
68 vlan_offset = sizeof(struct vlan_hdr);
69 *proto = vlan_hdr->eth_proto;
71 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72 vlan_hdr = vlan_hdr + 1;
73 *proto = vlan_hdr->eth_proto;
74 vlan_offset += sizeof(struct vlan_hdr);
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
83 struct bond_dev_private *internals;
85 uint16_t num_rx_slave = 0;
86 uint16_t num_rx_total = 0;
90 /* Cast to structure, containing bonded device's port id and queue id */
91 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
93 internals = bd_rx_q->dev_private;
96 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97 /* Offset of pointer to *bufs increases as packets are received
98 * from other slaves */
99 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
102 num_rx_total += num_rx_slave;
103 nb_pkts -= num_rx_slave;
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114 struct bond_dev_private *internals;
116 /* Cast to structure, containing bonded device's port id and queue id */
117 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
119 internals = bd_rx_q->dev_private;
121 return rte_eth_rx_burst(internals->current_primary_port,
122 bd_rx_q->queue_id, bufs, nb_pkts);
125 static inline uint8_t
126 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
128 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
130 return !vlan_tci && (ethertype == ether_type_slow_be &&
131 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
135 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
138 /* Cast to structure, containing bonded device's port id and queue id */
139 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
140 struct bond_dev_private *internals = bd_rx_q->dev_private;
141 struct ether_addr bond_mac;
143 struct ether_hdr *hdr;
145 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
146 uint16_t num_rx_total = 0; /* Total number of received packets */
147 uint8_t slaves[RTE_MAX_ETHPORTS];
150 uint8_t collecting; /* current slave collecting status */
151 const uint8_t promisc = internals->promiscuous_en;
155 rte_eth_macaddr_get(internals->port_id, &bond_mac);
156 /* Copy slave list to protect against slave up/down changes during tx
158 slave_count = internals->active_slave_count;
159 memcpy(slaves, internals->active_slaves,
160 sizeof(internals->active_slaves[0]) * slave_count);
162 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
164 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
166 /* Read packets from this slave */
167 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
168 &bufs[num_rx_total], nb_pkts - num_rx_total);
170 for (k = j; k < 2 && k < num_rx_total; k++)
171 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
173 /* Handle slow protocol packets. */
174 while (j < num_rx_total) {
175 if (j + 3 < num_rx_total)
176 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
178 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
179 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
181 /* Remove packet from array if it is slow packet or slave is not
182 * in collecting state or bondign interface is not in promiscus
183 * mode and packet address does not match. */
184 if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
185 !collecting || (!promisc &&
186 !is_multicast_ether_addr(&hdr->d_addr) &&
187 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
189 if (hdr->ether_type == ether_type_slow_be) {
190 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
193 rte_pktmbuf_free(bufs[j]);
195 /* Packet is managed by mode 4 or dropped, shift the array */
197 if (j < num_rx_total) {
198 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
209 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
210 uint32_t burstnumberRX;
211 uint32_t burstnumberTX;
213 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
216 arp_op_name(uint16_t arp_op, char *buf)
220 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
223 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
225 case ARP_OP_REVREQUEST:
226 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
227 "Reverse ARP Request");
229 case ARP_OP_REVREPLY:
230 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
231 "Reverse ARP Reply");
233 case ARP_OP_INVREQUEST:
234 snprintf(buf, sizeof("Peer Identify Request"), "%s",
235 "Peer Identify Request");
237 case ARP_OP_INVREPLY:
238 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
239 "Peer Identify Reply");
244 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
248 #define MaxIPv4String 16
250 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
254 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
255 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
256 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
260 #define MAX_CLIENTS_NUMBER 128
261 uint8_t active_clients;
262 struct client_stats_t {
265 uint32_t ipv4_rx_packets;
266 uint32_t ipv4_tx_packets;
268 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
271 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
275 for (; i < MAX_CLIENTS_NUMBER; i++) {
276 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
277 /* Just update RX packets number for this client */
278 if (TXorRXindicator == &burstnumberRX)
279 client_stats[i].ipv4_rx_packets++;
281 client_stats[i].ipv4_tx_packets++;
285 /* We have a new client. Insert him to the table, and increment stats */
286 if (TXorRXindicator == &burstnumberRX)
287 client_stats[active_clients].ipv4_rx_packets++;
289 client_stats[active_clients].ipv4_tx_packets++;
290 client_stats[active_clients].ipv4_addr = addr;
291 client_stats[active_clients].port = port;
296 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
297 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
298 RTE_LOG(DEBUG, PMD, \
301 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
303 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
309 eth_h->s_addr.addr_bytes[0], \
310 eth_h->s_addr.addr_bytes[1], \
311 eth_h->s_addr.addr_bytes[2], \
312 eth_h->s_addr.addr_bytes[3], \
313 eth_h->s_addr.addr_bytes[4], \
314 eth_h->s_addr.addr_bytes[5], \
316 eth_h->d_addr.addr_bytes[0], \
317 eth_h->d_addr.addr_bytes[1], \
318 eth_h->d_addr.addr_bytes[2], \
319 eth_h->d_addr.addr_bytes[3], \
320 eth_h->d_addr.addr_bytes[4], \
321 eth_h->d_addr.addr_bytes[5], \
328 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
329 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
331 struct ipv4_hdr *ipv4_h;
332 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
333 struct arp_hdr *arp_h;
340 uint16_t ether_type = eth_h->ether_type;
341 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344 snprintf(buf, 16, "%s", info);
347 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
348 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
349 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
350 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
351 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
352 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
354 update_client_stats(ipv4_h->src_addr, port, burstnumber);
356 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
357 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
358 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
359 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
360 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
361 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
362 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
369 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
371 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
372 struct bond_dev_private *internals = bd_tx_q->dev_private;
373 struct ether_hdr *eth_h;
374 uint16_t ether_type, offset;
375 uint16_t nb_recv_pkts;
378 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
380 for (i = 0; i < nb_recv_pkts; i++) {
381 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
382 ether_type = eth_h->ether_type;
383 offset = get_vlan_offset(eth_h, ðer_type);
385 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
386 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
387 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
389 bond_mode_alb_arp_recv(eth_h, offset, internals);
391 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
392 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
393 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
401 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
404 struct bond_dev_private *internals;
405 struct bond_tx_queue *bd_tx_q;
407 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
408 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
410 uint8_t num_of_slaves;
411 uint8_t slaves[RTE_MAX_ETHPORTS];
413 uint16_t num_tx_total = 0, num_tx_slave;
415 static int slave_idx = 0;
416 int i, cslave_idx = 0, tx_fail_total = 0;
418 bd_tx_q = (struct bond_tx_queue *)queue;
419 internals = bd_tx_q->dev_private;
421 /* Copy slave list to protect against slave up/down changes during tx
423 num_of_slaves = internals->active_slave_count;
424 memcpy(slaves, internals->active_slaves,
425 sizeof(internals->active_slaves[0]) * num_of_slaves);
427 if (num_of_slaves < 1)
430 /* Populate slaves mbuf with which packets are to be sent on it */
431 for (i = 0; i < nb_pkts; i++) {
432 cslave_idx = (slave_idx + i) % num_of_slaves;
433 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
436 /* increment current slave index so the next call to tx burst starts on the
438 slave_idx = ++cslave_idx;
440 /* Send packet burst on each slave device */
441 for (i = 0; i < num_of_slaves; i++) {
442 if (slave_nb_pkts[i] > 0) {
443 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
444 slave_bufs[i], slave_nb_pkts[i]);
446 /* if tx burst fails move packets to end of bufs */
447 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
448 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
450 tx_fail_total += tx_fail_slave;
452 memcpy(&bufs[nb_pkts - tx_fail_total],
453 &slave_bufs[i][num_tx_slave],
454 tx_fail_slave * sizeof(bufs[0]));
456 num_tx_total += num_tx_slave;
464 bond_ethdev_tx_burst_active_backup(void *queue,
465 struct rte_mbuf **bufs, uint16_t nb_pkts)
467 struct bond_dev_private *internals;
468 struct bond_tx_queue *bd_tx_q;
470 bd_tx_q = (struct bond_tx_queue *)queue;
471 internals = bd_tx_q->dev_private;
473 if (internals->active_slave_count < 1)
476 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
480 static inline uint16_t
481 ether_hash(struct ether_hdr *eth_hdr)
483 unaligned_uint16_t *word_src_addr =
484 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
485 unaligned_uint16_t *word_dst_addr =
486 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
488 return (word_src_addr[0] ^ word_dst_addr[0]) ^
489 (word_src_addr[1] ^ word_dst_addr[1]) ^
490 (word_src_addr[2] ^ word_dst_addr[2]);
493 static inline uint32_t
494 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
496 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
499 static inline uint32_t
500 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
502 unaligned_uint32_t *word_src_addr =
503 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
504 unaligned_uint32_t *word_dst_addr =
505 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
507 return (word_src_addr[0] ^ word_dst_addr[0]) ^
508 (word_src_addr[1] ^ word_dst_addr[1]) ^
509 (word_src_addr[2] ^ word_dst_addr[2]) ^
510 (word_src_addr[3] ^ word_dst_addr[3]);
514 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
516 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
518 uint32_t hash = ether_hash(eth_hdr);
520 return (hash ^= hash >> 8) % slave_count;
524 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
526 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
527 uint16_t proto = eth_hdr->ether_type;
528 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
529 uint32_t hash, l3hash = 0;
531 hash = ether_hash(eth_hdr);
533 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
534 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
535 ((char *)(eth_hdr + 1) + vlan_offset);
536 l3hash = ipv4_hash(ipv4_hdr);
538 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
539 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
540 ((char *)(eth_hdr + 1) + vlan_offset);
541 l3hash = ipv6_hash(ipv6_hdr);
544 hash = hash ^ l3hash;
548 return hash % slave_count;
552 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
554 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
555 uint16_t proto = eth_hdr->ether_type;
556 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
558 struct udp_hdr *udp_hdr = NULL;
559 struct tcp_hdr *tcp_hdr = NULL;
560 uint32_t hash, l3hash = 0, l4hash = 0;
562 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
563 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
564 ((char *)(eth_hdr + 1) + vlan_offset);
565 size_t ip_hdr_offset;
567 l3hash = ipv4_hash(ipv4_hdr);
569 /* there is no L4 header in fragmented packet */
570 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
571 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
574 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
575 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
577 l4hash = HASH_L4_PORTS(tcp_hdr);
578 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
579 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
581 l4hash = HASH_L4_PORTS(udp_hdr);
584 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
585 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
586 ((char *)(eth_hdr + 1) + vlan_offset);
587 l3hash = ipv6_hash(ipv6_hdr);
589 if (ipv6_hdr->proto == IPPROTO_TCP) {
590 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
591 l4hash = HASH_L4_PORTS(tcp_hdr);
592 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
593 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
594 l4hash = HASH_L4_PORTS(udp_hdr);
598 hash = l3hash ^ l4hash;
602 return hash % slave_count;
606 uint64_t bwg_left_int;
607 uint64_t bwg_left_remainder;
612 bond_tlb_activate_slave(struct bond_dev_private *internals) {
615 for (i = 0; i < internals->active_slave_count; i++) {
616 tlb_last_obytets[internals->active_slaves[i]] = 0;
621 bandwidth_cmp(const void *a, const void *b)
623 const struct bwg_slave *bwg_a = a;
624 const struct bwg_slave *bwg_b = b;
625 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
626 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
627 (int64_t)bwg_a->bwg_left_remainder;
641 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
642 struct bwg_slave *bwg_slave)
644 struct rte_eth_link link_status;
646 rte_eth_link_get_nowait(port_id, &link_status);
647 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
650 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
651 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
652 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
656 bond_ethdev_update_tlb_slave_cb(void *arg)
658 struct bond_dev_private *internals = arg;
659 struct rte_eth_stats slave_stats;
660 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
664 uint8_t update_stats = 0;
667 internals->slave_update_idx++;
670 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
673 for (i = 0; i < internals->active_slave_count; i++) {
674 slave_id = internals->active_slaves[i];
675 rte_eth_stats_get(slave_id, &slave_stats);
676 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
677 bandwidth_left(slave_id, tx_bytes,
678 internals->slave_update_idx, &bwg_array[i]);
679 bwg_array[i].slave = slave_id;
682 tlb_last_obytets[slave_id] = slave_stats.obytes;
686 if (update_stats == 1)
687 internals->slave_update_idx = 0;
690 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
691 for (i = 0; i < slave_count; i++)
692 internals->tlb_slaves_order[i] = bwg_array[i].slave;
694 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
695 (struct bond_dev_private *)internals);
699 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
701 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
702 struct bond_dev_private *internals = bd_tx_q->dev_private;
704 struct rte_eth_dev *primary_port =
705 &rte_eth_devices[internals->primary_port];
706 uint16_t num_tx_total = 0;
709 uint8_t num_of_slaves = internals->active_slave_count;
710 uint8_t slaves[RTE_MAX_ETHPORTS];
712 struct ether_hdr *ether_hdr;
713 struct ether_addr primary_slave_addr;
714 struct ether_addr active_slave_addr;
716 if (num_of_slaves < 1)
719 memcpy(slaves, internals->tlb_slaves_order,
720 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
723 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
726 for (i = 0; i < 3; i++)
727 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
730 for (i = 0; i < num_of_slaves; i++) {
731 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
732 for (j = num_tx_total; j < nb_pkts; j++) {
734 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
736 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
737 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
738 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
739 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
740 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
744 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
745 bufs + num_tx_total, nb_pkts - num_tx_total);
747 if (num_tx_total == nb_pkts)
755 bond_tlb_disable(struct bond_dev_private *internals)
757 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
761 bond_tlb_enable(struct bond_dev_private *internals)
763 bond_ethdev_update_tlb_slave_cb(internals);
767 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
769 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
770 struct bond_dev_private *internals = bd_tx_q->dev_private;
772 struct ether_hdr *eth_h;
773 uint16_t ether_type, offset;
775 struct client_data *client_info;
778 * We create transmit buffers for every slave and one additional to send
779 * through tlb. In worst case every packet will be send on one port.
781 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
782 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
785 * We create separate transmit buffers for update packets as they wont be
786 * counted in num_tx_total.
788 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
789 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
791 struct rte_mbuf *upd_pkt;
794 uint16_t num_send, num_not_send = 0;
795 uint16_t num_tx_total = 0;
800 /* Search tx buffer for ARP packets and forward them to alb */
801 for (i = 0; i < nb_pkts; i++) {
802 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
803 ether_type = eth_h->ether_type;
804 offset = get_vlan_offset(eth_h, ðer_type);
806 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
807 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
809 /* Change src mac in eth header */
810 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
812 /* Add packet to slave tx buffer */
813 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
814 slave_bufs_pkts[slave_idx]++;
816 /* If packet is not ARP, send it with TLB policy */
817 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
819 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
823 /* Update connected client ARP tables */
824 if (internals->mode6.ntt) {
825 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
826 client_info = &internals->mode6.client_table[i];
828 if (client_info->in_use) {
829 /* Allocate new packet to send ARP update on current slave */
830 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
831 if (upd_pkt == NULL) {
832 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
835 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
836 + client_info->vlan_count * sizeof(struct vlan_hdr);
837 upd_pkt->data_len = pkt_size;
838 upd_pkt->pkt_len = pkt_size;
840 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
843 /* Add packet to update tx buffer */
844 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
845 update_bufs_pkts[slave_idx]++;
848 internals->mode6.ntt = 0;
851 /* Send ARP packets on proper slaves */
852 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
853 if (slave_bufs_pkts[i] > 0) {
854 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
855 slave_bufs[i], slave_bufs_pkts[i]);
856 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
857 bufs[nb_pkts - 1 - num_not_send - j] =
858 slave_bufs[i][nb_pkts - 1 - j];
861 num_tx_total += num_send;
862 num_not_send += slave_bufs_pkts[i] - num_send;
864 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
865 /* Print TX stats including update packets */
866 for (j = 0; j < slave_bufs_pkts[i]; j++) {
867 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
868 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
874 /* Send update packets on proper slaves */
875 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
876 if (update_bufs_pkts[i] > 0) {
877 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
878 update_bufs_pkts[i]);
879 for (j = num_send; j < update_bufs_pkts[i]; j++) {
880 rte_pktmbuf_free(update_bufs[i][j]);
882 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
883 for (j = 0; j < update_bufs_pkts[i]; j++) {
884 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
885 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
891 /* Send non-ARP packets using tlb policy */
892 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
893 num_send = bond_ethdev_tx_burst_tlb(queue,
894 slave_bufs[RTE_MAX_ETHPORTS],
895 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
897 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
898 bufs[nb_pkts - 1 - num_not_send - j] =
899 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
902 num_tx_total += num_send;
903 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
910 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
913 struct bond_dev_private *internals;
914 struct bond_tx_queue *bd_tx_q;
916 uint8_t num_of_slaves;
917 uint8_t slaves[RTE_MAX_ETHPORTS];
919 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
923 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
924 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
926 bd_tx_q = (struct bond_tx_queue *)queue;
927 internals = bd_tx_q->dev_private;
929 /* Copy slave list to protect against slave up/down changes during tx
931 num_of_slaves = internals->active_slave_count;
932 memcpy(slaves, internals->active_slaves,
933 sizeof(internals->active_slaves[0]) * num_of_slaves);
935 if (num_of_slaves < 1)
938 /* Populate slaves mbuf with the packets which are to be sent on it */
939 for (i = 0; i < nb_pkts; i++) {
940 /* Select output slave using hash based on xmit policy */
941 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
943 /* Populate slave mbuf arrays with mbufs for that slave */
944 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
947 /* Send packet burst on each slave device */
948 for (i = 0; i < num_of_slaves; i++) {
949 if (slave_nb_pkts[i] > 0) {
950 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
951 slave_bufs[i], slave_nb_pkts[i]);
953 /* if tx burst fails move packets to end of bufs */
954 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
955 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
957 tx_fail_total += slave_tx_fail_count;
958 memcpy(&bufs[nb_pkts - tx_fail_total],
959 &slave_bufs[i][num_tx_slave],
960 slave_tx_fail_count * sizeof(bufs[0]));
963 num_tx_total += num_tx_slave;
971 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
974 struct bond_dev_private *internals;
975 struct bond_tx_queue *bd_tx_q;
977 uint8_t num_of_slaves;
978 uint8_t slaves[RTE_MAX_ETHPORTS];
979 /* positions in slaves, not ID */
980 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
981 uint8_t distributing_count;
983 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
984 uint16_t i, j, op_slave_idx;
985 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
987 /* Allocate additional packets in case 8023AD mode. */
988 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
989 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
991 /* Total amount of packets in slave_bufs */
992 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
993 /* Slow packets placed in each slave */
994 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
996 bd_tx_q = (struct bond_tx_queue *)queue;
997 internals = bd_tx_q->dev_private;
999 /* Copy slave list to protect against slave up/down changes during tx
1001 num_of_slaves = internals->active_slave_count;
1002 if (num_of_slaves < 1)
1003 return num_tx_total;
1005 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1007 distributing_count = 0;
1008 for (i = 0; i < num_of_slaves; i++) {
1009 struct port *port = &mode_8023ad_ports[slaves[i]];
1011 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1012 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1013 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1015 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1016 slave_bufs[i][j] = slow_pkts[j];
1018 if (ACTOR_STATE(port, DISTRIBUTING))
1019 distributing_offsets[distributing_count++] = i;
1022 if (likely(distributing_count > 0)) {
1023 /* Populate slaves mbuf with the packets which are to be sent on it */
1024 for (i = 0; i < nb_pkts; i++) {
1025 /* Select output slave using hash based on xmit policy */
1026 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1028 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1029 * slaves that are currently distributing. */
1030 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1031 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1032 slave_nb_pkts[slave_offset]++;
1036 /* Send packet burst on each slave device */
1037 for (i = 0; i < num_of_slaves; i++) {
1038 if (slave_nb_pkts[i] == 0)
1041 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1042 slave_bufs[i], slave_nb_pkts[i]);
1044 /* If tx burst fails drop slow packets */
1045 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1046 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1048 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1049 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1051 /* If tx burst fails move packets to end of bufs */
1052 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1053 uint16_t j = nb_pkts - num_tx_fail_total;
1054 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1055 bufs[j] = slave_bufs[i][num_tx_slave];
1059 return num_tx_total;
1063 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1066 struct bond_dev_private *internals;
1067 struct bond_tx_queue *bd_tx_q;
1069 uint8_t tx_failed_flag = 0, num_of_slaves;
1070 uint8_t slaves[RTE_MAX_ETHPORTS];
1072 uint16_t max_nb_of_tx_pkts = 0;
1074 int slave_tx_total[RTE_MAX_ETHPORTS];
1075 int i, most_successful_tx_slave = -1;
1077 bd_tx_q = (struct bond_tx_queue *)queue;
1078 internals = bd_tx_q->dev_private;
1080 /* Copy slave list to protect against slave up/down changes during tx
1082 num_of_slaves = internals->active_slave_count;
1083 memcpy(slaves, internals->active_slaves,
1084 sizeof(internals->active_slaves[0]) * num_of_slaves);
1086 if (num_of_slaves < 1)
1089 /* Increment reference count on mbufs */
1090 for (i = 0; i < nb_pkts; i++)
1091 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1093 /* Transmit burst on each active slave */
1094 for (i = 0; i < num_of_slaves; i++) {
1095 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1098 if (unlikely(slave_tx_total[i] < nb_pkts))
1101 /* record the value and slave index for the slave which transmits the
1102 * maximum number of packets */
1103 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1104 max_nb_of_tx_pkts = slave_tx_total[i];
1105 most_successful_tx_slave = i;
1109 /* if slaves fail to transmit packets from burst, the calling application
1110 * is not expected to know about multiple references to packets so we must
1111 * handle failures of all packets except those of the most successful slave
1113 if (unlikely(tx_failed_flag))
1114 for (i = 0; i < num_of_slaves; i++)
1115 if (i != most_successful_tx_slave)
1116 while (slave_tx_total[i] < nb_pkts)
1117 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1119 return max_nb_of_tx_pkts;
1123 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1124 struct rte_eth_link *slave_dev_link)
1126 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1127 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1129 if (slave_dev_link->link_status &&
1130 bonded_eth_dev->data->dev_started) {
1131 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1132 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1134 internals->link_props_set = 1;
1139 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1141 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1143 memset(&(bonded_eth_dev->data->dev_link), 0,
1144 sizeof(bonded_eth_dev->data->dev_link));
1146 internals->link_props_set = 0;
1150 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1151 struct rte_eth_link *slave_dev_link)
1153 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1154 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1161 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1163 struct ether_addr *mac_addr;
1165 if (eth_dev == NULL) {
1166 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1170 if (dst_mac_addr == NULL) {
1171 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1175 mac_addr = eth_dev->data->mac_addrs;
1177 ether_addr_copy(mac_addr, dst_mac_addr);
1182 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1184 struct ether_addr *mac_addr;
1186 if (eth_dev == NULL) {
1187 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1191 if (new_mac_addr == NULL) {
1192 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1196 mac_addr = eth_dev->data->mac_addrs;
1198 /* If new MAC is different to current MAC then update */
1199 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1200 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1206 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1208 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1211 /* Update slave devices MAC addresses */
1212 if (internals->slave_count < 1)
1215 switch (internals->mode) {
1216 case BONDING_MODE_ROUND_ROBIN:
1217 case BONDING_MODE_BALANCE:
1218 case BONDING_MODE_BROADCAST:
1219 for (i = 0; i < internals->slave_count; i++) {
1220 if (rte_eth_dev_default_mac_addr_set(
1221 internals->slaves[i].port_id,
1222 bonded_eth_dev->data->mac_addrs)) {
1223 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1224 internals->slaves[i].port_id);
1229 case BONDING_MODE_8023AD:
1230 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1232 case BONDING_MODE_ACTIVE_BACKUP:
1233 case BONDING_MODE_TLB:
1234 case BONDING_MODE_ALB:
1236 for (i = 0; i < internals->slave_count; i++) {
1237 if (internals->slaves[i].port_id ==
1238 internals->current_primary_port) {
1239 if (rte_eth_dev_default_mac_addr_set(
1240 internals->primary_port,
1241 bonded_eth_dev->data->mac_addrs)) {
1242 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1243 internals->current_primary_port);
1247 if (rte_eth_dev_default_mac_addr_set(
1248 internals->slaves[i].port_id,
1249 &internals->slaves[i].persisted_mac_addr)) {
1250 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1251 internals->slaves[i].port_id);
1262 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1264 struct bond_dev_private *internals;
1266 internals = eth_dev->data->dev_private;
1269 case BONDING_MODE_ROUND_ROBIN:
1270 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1271 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1273 case BONDING_MODE_ACTIVE_BACKUP:
1274 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1275 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1277 case BONDING_MODE_BALANCE:
1278 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1279 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1281 case BONDING_MODE_BROADCAST:
1282 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1283 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1285 case BONDING_MODE_8023AD:
1286 if (bond_mode_8023ad_enable(eth_dev) != 0)
1289 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1290 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1291 RTE_LOG(WARNING, PMD,
1292 "Using mode 4, it is necessary to do TX burst and RX burst "
1293 "at least every 100ms.\n");
1295 case BONDING_MODE_TLB:
1296 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1297 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1299 case BONDING_MODE_ALB:
1300 if (bond_mode_alb_enable(eth_dev) != 0)
1303 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1304 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1310 internals->mode = mode;
1316 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1317 struct rte_eth_dev *slave_eth_dev)
1319 struct bond_rx_queue *bd_rx_q;
1320 struct bond_tx_queue *bd_tx_q;
1322 uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
1323 uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
1328 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1330 /* Enable interrupts on slave device if supported */
1331 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1332 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1334 /* If RSS is enabled for bonding, try to enable it for slaves */
1335 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1336 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1338 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1339 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1340 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1341 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1343 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1346 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1347 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1348 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1349 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1352 slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1353 bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1355 /* Configure device */
1356 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1357 bonded_eth_dev->data->nb_rx_queues,
1358 bonded_eth_dev->data->nb_tx_queues,
1359 &(slave_eth_dev->data->dev_conf));
1361 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1362 slave_eth_dev->data->port_id, errval);
1366 /* Setup Rx Queues */
1367 /* Use existing queues, if any */
1368 for (q_id = old_nb_rx_queues;
1369 q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1370 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1372 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1373 bd_rx_q->nb_rx_desc,
1374 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1375 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1378 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1379 slave_eth_dev->data->port_id, q_id, errval);
1384 /* Setup Tx Queues */
1385 /* Use existing queues, if any */
1386 for (q_id = old_nb_tx_queues;
1387 q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1388 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1390 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1391 bd_tx_q->nb_tx_desc,
1392 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1396 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1397 slave_eth_dev->data->port_id, q_id, errval);
1403 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1405 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1406 slave_eth_dev->data->port_id, errval);
1410 /* If RSS is enabled for bonding, synchronize RETA */
1411 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1413 struct bond_dev_private *internals;
1415 internals = bonded_eth_dev->data->dev_private;
1417 for (i = 0; i < internals->slave_count; i++) {
1418 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1419 errval = rte_eth_dev_rss_reta_update(
1420 slave_eth_dev->data->port_id,
1421 &internals->reta_conf[0],
1422 internals->slaves[i].reta_size);
1424 RTE_LOG(WARNING, PMD,
1425 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1426 " RSS Configuration for bonding may be inconsistent.\n",
1427 slave_eth_dev->data->port_id, errval);
1434 /* If lsc interrupt is set, check initial slave's link status */
1435 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1436 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1437 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1438 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1445 slave_remove(struct bond_dev_private *internals,
1446 struct rte_eth_dev *slave_eth_dev)
1450 for (i = 0; i < internals->slave_count; i++)
1451 if (internals->slaves[i].port_id ==
1452 slave_eth_dev->data->port_id)
1455 if (i < (internals->slave_count - 1))
1456 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1457 sizeof(internals->slaves[0]) *
1458 (internals->slave_count - i - 1));
1460 internals->slave_count--;
1464 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1467 slave_add(struct bond_dev_private *internals,
1468 struct rte_eth_dev *slave_eth_dev)
1470 struct bond_slave_details *slave_details =
1471 &internals->slaves[internals->slave_count];
1473 slave_details->port_id = slave_eth_dev->data->port_id;
1474 slave_details->last_link_status = 0;
1476 /* Mark slave devices that don't support interrupts so we can
1477 * compensate when we start the bond
1479 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1480 slave_details->link_status_poll_enabled = 1;
1483 slave_details->link_status_wait_to_complete = 0;
1484 /* clean tlb_last_obytes when adding port for bonding device */
1485 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1486 sizeof(struct ether_addr));
1490 bond_ethdev_primary_set(struct bond_dev_private *internals,
1491 uint8_t slave_port_id)
1495 if (internals->active_slave_count < 1)
1496 internals->current_primary_port = slave_port_id;
1498 /* Search bonded device slave ports for new proposed primary port */
1499 for (i = 0; i < internals->active_slave_count; i++) {
1500 if (internals->active_slaves[i] == slave_port_id)
1501 internals->current_primary_port = slave_port_id;
1506 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1509 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1511 struct bond_dev_private *internals;
1514 /* slave eth dev will be started by bonded device */
1515 if (check_for_bonded_ethdev(eth_dev)) {
1516 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1517 eth_dev->data->port_id);
1521 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1522 eth_dev->data->dev_started = 1;
1524 internals = eth_dev->data->dev_private;
1526 if (internals->slave_count == 0) {
1527 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1531 if (internals->user_defined_mac == 0) {
1532 struct ether_addr *new_mac_addr = NULL;
1534 for (i = 0; i < internals->slave_count; i++)
1535 if (internals->slaves[i].port_id == internals->primary_port)
1536 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1538 if (new_mac_addr == NULL)
1541 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1542 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1543 eth_dev->data->port_id);
1548 /* Update all slave devices MACs*/
1549 if (mac_address_slaves_update(eth_dev) != 0)
1552 /* If bonded device is configure in promiscuous mode then re-apply config */
1553 if (internals->promiscuous_en)
1554 bond_ethdev_promiscuous_enable(eth_dev);
1556 /* Reconfigure each slave device if starting bonded device */
1557 for (i = 0; i < internals->slave_count; i++) {
1558 if (slave_configure(eth_dev,
1559 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1561 "bonded port (%d) failed to reconfigure slave device (%d)",
1562 eth_dev->data->port_id, internals->slaves[i].port_id);
1565 /* We will need to poll for link status if any slave doesn't
1566 * support interrupts
1568 if (internals->slaves[i].link_status_poll_enabled)
1569 internals->link_status_polling_enabled = 1;
1572 /* start polling if needed */
1573 if (internals->link_status_polling_enabled) {
1575 internals->link_status_polling_interval_ms * 1000,
1576 bond_ethdev_slave_link_status_change_monitor,
1577 (void *)&rte_eth_devices[internals->port_id]);
1580 if (internals->user_defined_primary_port)
1581 bond_ethdev_primary_set(internals, internals->primary_port);
1583 if (internals->mode == BONDING_MODE_8023AD)
1584 bond_mode_8023ad_start(eth_dev);
1586 if (internals->mode == BONDING_MODE_TLB ||
1587 internals->mode == BONDING_MODE_ALB)
1588 bond_tlb_enable(internals);
1593 eth_dev->data->dev_started = 0;
1598 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1602 if (dev->data->rx_queues != NULL) {
1603 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1604 rte_free(dev->data->rx_queues[i]);
1605 dev->data->rx_queues[i] = NULL;
1607 dev->data->nb_rx_queues = 0;
1610 if (dev->data->tx_queues != NULL) {
1611 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1612 rte_free(dev->data->tx_queues[i]);
1613 dev->data->tx_queues[i] = NULL;
1615 dev->data->nb_tx_queues = 0;
1620 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1622 struct bond_dev_private *internals = eth_dev->data->dev_private;
1625 if (internals->mode == BONDING_MODE_8023AD) {
1629 bond_mode_8023ad_stop(eth_dev);
1631 /* Discard all messages to/from mode 4 state machines */
1632 for (i = 0; i < internals->active_slave_count; i++) {
1633 port = &mode_8023ad_ports[internals->active_slaves[i]];
1635 RTE_ASSERT(port->rx_ring != NULL);
1636 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1637 rte_pktmbuf_free(pkt);
1639 RTE_ASSERT(port->tx_ring != NULL);
1640 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1641 rte_pktmbuf_free(pkt);
1645 if (internals->mode == BONDING_MODE_TLB ||
1646 internals->mode == BONDING_MODE_ALB) {
1647 bond_tlb_disable(internals);
1648 for (i = 0; i < internals->active_slave_count; i++)
1649 tlb_last_obytets[internals->active_slaves[i]] = 0;
1652 internals->active_slave_count = 0;
1653 internals->link_status_polling_enabled = 0;
1654 for (i = 0; i < internals->slave_count; i++)
1655 internals->slaves[i].last_link_status = 0;
1657 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1658 eth_dev->data->dev_started = 0;
1662 bond_ethdev_close(struct rte_eth_dev *dev)
1664 struct bond_dev_private *internals = dev->data->dev_private;
1666 bond_ethdev_free_queues(dev);
1667 rte_bitmap_reset(internals->vlan_filter_bmp);
1670 /* forward declaration */
1671 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1674 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1676 struct bond_dev_private *internals = dev->data->dev_private;
1677 uint16_t max_nb_rx_queues = UINT16_MAX;
1678 uint16_t max_nb_tx_queues = UINT16_MAX;
1680 dev_info->max_mac_addrs = 1;
1682 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
1683 ? internals->candidate_max_rx_pktlen
1684 : ETHER_MAX_JUMBO_FRAME_LEN;
1686 if (internals->slave_count > 0) {
1687 /* Max number of tx/rx queues that the bonded device can
1688 * support is the minimum values of the bonded slaves, as
1689 * all slaves must be capable of supporting the same number
1692 struct rte_eth_dev_info slave_info;
1695 for (idx = 0; idx < internals->slave_count; idx++) {
1696 rte_eth_dev_info_get(internals->slaves[idx].port_id,
1699 if (slave_info.max_rx_queues < max_nb_rx_queues)
1700 max_nb_rx_queues = slave_info.max_rx_queues;
1702 if (slave_info.max_tx_queues < max_nb_tx_queues)
1703 max_nb_tx_queues = slave_info.max_tx_queues;
1707 dev_info->max_rx_queues = max_nb_rx_queues;
1708 dev_info->max_tx_queues = max_nb_tx_queues;
1710 dev_info->min_rx_bufsize = 0;
1711 dev_info->pci_dev = NULL;
1713 dev_info->rx_offload_capa = internals->rx_offload_capa;
1714 dev_info->tx_offload_capa = internals->tx_offload_capa;
1715 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1717 dev_info->reta_size = internals->reta_size;
1721 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1725 struct bond_dev_private *internals = dev->data->dev_private;
1727 /* don't do this while a slave is being added */
1728 rte_spinlock_lock(&internals->lock);
1731 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1733 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1735 for (i = 0; i < internals->slave_count; i++) {
1736 uint8_t port_id = internals->slaves[i].port_id;
1738 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1740 RTE_LOG(WARNING, PMD,
1741 "Setting VLAN filter on slave port %u not supported.\n",
1745 rte_spinlock_unlock(&internals->lock);
1750 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1751 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1752 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1754 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1755 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1756 0, dev->data->numa_node);
1757 if (bd_rx_q == NULL)
1760 bd_rx_q->queue_id = rx_queue_id;
1761 bd_rx_q->dev_private = dev->data->dev_private;
1763 bd_rx_q->nb_rx_desc = nb_rx_desc;
1765 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1766 bd_rx_q->mb_pool = mb_pool;
1768 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1774 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1775 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1776 const struct rte_eth_txconf *tx_conf)
1778 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1779 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1780 0, dev->data->numa_node);
1782 if (bd_tx_q == NULL)
1785 bd_tx_q->queue_id = tx_queue_id;
1786 bd_tx_q->dev_private = dev->data->dev_private;
1788 bd_tx_q->nb_tx_desc = nb_tx_desc;
1789 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1791 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1797 bond_ethdev_rx_queue_release(void *queue)
1806 bond_ethdev_tx_queue_release(void *queue)
1815 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1817 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1818 struct bond_dev_private *internals;
1820 /* Default value for polling slave found is true as we don't want to
1821 * disable the polling thread if we cannot get the lock */
1822 int i, polling_slave_found = 1;
1827 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1828 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1830 if (!bonded_ethdev->data->dev_started ||
1831 !internals->link_status_polling_enabled)
1834 /* If device is currently being configured then don't check slaves link
1835 * status, wait until next period */
1836 if (rte_spinlock_trylock(&internals->lock)) {
1837 if (internals->slave_count > 0)
1838 polling_slave_found = 0;
1840 for (i = 0; i < internals->slave_count; i++) {
1841 if (!internals->slaves[i].link_status_poll_enabled)
1844 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1845 polling_slave_found = 1;
1847 /* Update slave link status */
1848 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1849 internals->slaves[i].link_status_wait_to_complete);
1851 /* if link status has changed since last checked then call lsc
1853 if (slave_ethdev->data->dev_link.link_status !=
1854 internals->slaves[i].last_link_status) {
1855 internals->slaves[i].last_link_status =
1856 slave_ethdev->data->dev_link.link_status;
1858 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1859 RTE_ETH_EVENT_INTR_LSC,
1860 &bonded_ethdev->data->port_id);
1863 rte_spinlock_unlock(&internals->lock);
1866 if (polling_slave_found)
1867 /* Set alarm to continue monitoring link status of slave ethdev's */
1868 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1869 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1873 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1874 int wait_to_complete)
1876 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1878 if (!bonded_eth_dev->data->dev_started ||
1879 internals->active_slave_count == 0) {
1880 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1883 struct rte_eth_dev *slave_eth_dev;
1886 for (i = 0; i < internals->active_slave_count; i++) {
1887 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1889 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1891 if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1897 bonded_eth_dev->data->dev_link.link_status = link_up;
1904 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1906 struct bond_dev_private *internals = dev->data->dev_private;
1907 struct rte_eth_stats slave_stats;
1910 for (i = 0; i < internals->slave_count; i++) {
1911 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1913 stats->ipackets += slave_stats.ipackets;
1914 stats->opackets += slave_stats.opackets;
1915 stats->ibytes += slave_stats.ibytes;
1916 stats->obytes += slave_stats.obytes;
1917 stats->imissed += slave_stats.imissed;
1918 stats->ierrors += slave_stats.ierrors;
1919 stats->oerrors += slave_stats.oerrors;
1920 stats->rx_nombuf += slave_stats.rx_nombuf;
1922 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1923 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1924 stats->q_opackets[j] += slave_stats.q_opackets[j];
1925 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1926 stats->q_obytes[j] += slave_stats.q_obytes[j];
1927 stats->q_errors[j] += slave_stats.q_errors[j];
1934 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1936 struct bond_dev_private *internals = dev->data->dev_private;
1939 for (i = 0; i < internals->slave_count; i++)
1940 rte_eth_stats_reset(internals->slaves[i].port_id);
1944 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1946 struct bond_dev_private *internals = eth_dev->data->dev_private;
1949 internals->promiscuous_en = 1;
1951 switch (internals->mode) {
1952 /* Promiscuous mode is propagated to all slaves */
1953 case BONDING_MODE_ROUND_ROBIN:
1954 case BONDING_MODE_BALANCE:
1955 case BONDING_MODE_BROADCAST:
1956 for (i = 0; i < internals->slave_count; i++)
1957 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1959 /* In mode4 promiscus mode is managed when slave is added/removed */
1960 case BONDING_MODE_8023AD:
1962 /* Promiscuous mode is propagated only to primary slave */
1963 case BONDING_MODE_ACTIVE_BACKUP:
1964 case BONDING_MODE_TLB:
1965 case BONDING_MODE_ALB:
1967 rte_eth_promiscuous_enable(internals->current_primary_port);
1972 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1974 struct bond_dev_private *internals = dev->data->dev_private;
1977 internals->promiscuous_en = 0;
1979 switch (internals->mode) {
1980 /* Promiscuous mode is propagated to all slaves */
1981 case BONDING_MODE_ROUND_ROBIN:
1982 case BONDING_MODE_BALANCE:
1983 case BONDING_MODE_BROADCAST:
1984 for (i = 0; i < internals->slave_count; i++)
1985 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1987 /* In mode4 promiscus mode is set managed when slave is added/removed */
1988 case BONDING_MODE_8023AD:
1990 /* Promiscuous mode is propagated only to primary slave */
1991 case BONDING_MODE_ACTIVE_BACKUP:
1992 case BONDING_MODE_TLB:
1993 case BONDING_MODE_ALB:
1995 rte_eth_promiscuous_disable(internals->current_primary_port);
2000 bond_ethdev_delayed_lsc_propagation(void *arg)
2005 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2006 RTE_ETH_EVENT_INTR_LSC, NULL);
2010 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
2013 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
2014 struct bond_dev_private *internals;
2015 struct rte_eth_link link;
2017 int i, valid_slave = 0;
2019 uint8_t lsc_flag = 0;
2021 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2024 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
2025 slave_eth_dev = &rte_eth_devices[port_id];
2027 if (check_for_bonded_ethdev(bonded_eth_dev))
2030 internals = bonded_eth_dev->data->dev_private;
2032 /* If the device isn't started don't handle interrupts */
2033 if (!bonded_eth_dev->data->dev_started)
2036 /* verify that port_id is a valid slave of bonded port */
2037 for (i = 0; i < internals->slave_count; i++) {
2038 if (internals->slaves[i].port_id == port_id) {
2047 /* Search for port in active port list */
2048 active_pos = find_slave_by_id(internals->active_slaves,
2049 internals->active_slave_count, port_id);
2051 rte_eth_link_get_nowait(port_id, &link);
2052 if (link.link_status) {
2053 if (active_pos < internals->active_slave_count)
2056 /* if no active slave ports then set this port to be primary port */
2057 if (internals->active_slave_count < 1) {
2058 /* If first active slave, then change link status */
2059 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2060 internals->current_primary_port = port_id;
2063 mac_address_slaves_update(bonded_eth_dev);
2065 /* Inherit eth dev link properties from first active slave */
2066 link_properties_set(bonded_eth_dev,
2067 &(slave_eth_dev->data->dev_link));
2069 if (link_properties_valid(
2070 &bonded_eth_dev->data->dev_link, &link) != 0) {
2071 slave_eth_dev->data->dev_flags &=
2072 (~RTE_ETH_DEV_BONDED_SLAVE);
2074 "port %u invalid speed/duplex\n",
2080 activate_slave(bonded_eth_dev, port_id);
2082 /* If user has defined the primary port then default to using it */
2083 if (internals->user_defined_primary_port &&
2084 internals->primary_port == port_id)
2085 bond_ethdev_primary_set(internals, port_id);
2087 if (active_pos == internals->active_slave_count)
2090 /* Remove from active slave list */
2091 deactivate_slave(bonded_eth_dev, port_id);
2093 /* No active slaves, change link status to down and reset other
2094 * link properties */
2095 if (internals->active_slave_count < 1) {
2097 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2099 link_properties_reset(bonded_eth_dev);
2102 /* Update primary id, take first active slave from list or if none
2103 * available set to -1 */
2104 if (port_id == internals->current_primary_port) {
2105 if (internals->active_slave_count > 0)
2106 bond_ethdev_primary_set(internals,
2107 internals->active_slaves[0]);
2109 internals->current_primary_port = internals->primary_port;
2114 /* Cancel any possible outstanding interrupts if delays are enabled */
2115 if (internals->link_up_delay_ms > 0 ||
2116 internals->link_down_delay_ms > 0)
2117 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2120 if (bonded_eth_dev->data->dev_link.link_status) {
2121 if (internals->link_up_delay_ms > 0)
2122 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2123 bond_ethdev_delayed_lsc_propagation,
2124 (void *)bonded_eth_dev);
2126 _rte_eth_dev_callback_process(bonded_eth_dev,
2127 RTE_ETH_EVENT_INTR_LSC, NULL);
2130 if (internals->link_down_delay_ms > 0)
2131 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2132 bond_ethdev_delayed_lsc_propagation,
2133 (void *)bonded_eth_dev);
2135 _rte_eth_dev_callback_process(bonded_eth_dev,
2136 RTE_ETH_EVENT_INTR_LSC, NULL);
2142 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2143 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2147 int slave_reta_size;
2148 unsigned reta_count;
2149 struct bond_dev_private *internals = dev->data->dev_private;
2151 if (reta_size != internals->reta_size)
2154 /* Copy RETA table */
2155 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2157 for (i = 0; i < reta_count; i++) {
2158 internals->reta_conf[i].mask = reta_conf[i].mask;
2159 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2160 if ((reta_conf[i].mask >> j) & 0x01)
2161 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2164 /* Fill rest of array */
2165 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2166 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2167 sizeof(internals->reta_conf[0]) * reta_count);
2169 /* Propagate RETA over slaves */
2170 for (i = 0; i < internals->slave_count; i++) {
2171 slave_reta_size = internals->slaves[i].reta_size;
2172 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2173 &internals->reta_conf[0], slave_reta_size);
2182 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2183 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2186 struct bond_dev_private *internals = dev->data->dev_private;
2188 if (reta_size != internals->reta_size)
2191 /* Copy RETA table */
2192 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2193 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2194 if ((reta_conf[i].mask >> j) & 0x01)
2195 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2201 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2202 struct rte_eth_rss_conf *rss_conf)
2205 struct bond_dev_private *internals = dev->data->dev_private;
2206 struct rte_eth_rss_conf bond_rss_conf;
2208 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2210 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2212 if (bond_rss_conf.rss_hf != 0)
2213 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2215 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2216 sizeof(internals->rss_key)) {
2217 if (bond_rss_conf.rss_key_len == 0)
2218 bond_rss_conf.rss_key_len = 40;
2219 internals->rss_key_len = bond_rss_conf.rss_key_len;
2220 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2221 internals->rss_key_len);
2224 for (i = 0; i < internals->slave_count; i++) {
2225 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2235 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2236 struct rte_eth_rss_conf *rss_conf)
2238 struct bond_dev_private *internals = dev->data->dev_private;
2240 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2241 rss_conf->rss_key_len = internals->rss_key_len;
2242 if (rss_conf->rss_key)
2243 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2248 const struct eth_dev_ops default_dev_ops = {
2249 .dev_start = bond_ethdev_start,
2250 .dev_stop = bond_ethdev_stop,
2251 .dev_close = bond_ethdev_close,
2252 .dev_configure = bond_ethdev_configure,
2253 .dev_infos_get = bond_ethdev_info,
2254 .vlan_filter_set = bond_ethdev_vlan_filter_set,
2255 .rx_queue_setup = bond_ethdev_rx_queue_setup,
2256 .tx_queue_setup = bond_ethdev_tx_queue_setup,
2257 .rx_queue_release = bond_ethdev_rx_queue_release,
2258 .tx_queue_release = bond_ethdev_tx_queue_release,
2259 .link_update = bond_ethdev_link_update,
2260 .stats_get = bond_ethdev_stats_get,
2261 .stats_reset = bond_ethdev_stats_reset,
2262 .promiscuous_enable = bond_ethdev_promiscuous_enable,
2263 .promiscuous_disable = bond_ethdev_promiscuous_disable,
2264 .reta_update = bond_ethdev_rss_reta_update,
2265 .reta_query = bond_ethdev_rss_reta_query,
2266 .rss_hash_update = bond_ethdev_rss_hash_update,
2267 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
2271 bond_probe(const char *name, const char *params)
2273 struct bond_dev_private *internals;
2274 struct rte_kvargs *kvlist;
2275 uint8_t bonding_mode, socket_id;
2276 int arg_count, port_id;
2278 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2280 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2284 /* Parse link bonding mode */
2285 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2286 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2287 &bond_ethdev_parse_slave_mode_kvarg,
2288 &bonding_mode) != 0) {
2289 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2294 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2295 "device %s\n", name);
2299 /* Parse socket id to create bonding device on */
2300 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2301 if (arg_count == 1) {
2302 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2303 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2305 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2306 "bonded device %s\n", name);
2309 } else if (arg_count > 1) {
2310 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2311 "bonded device %s\n", name);
2314 socket_id = rte_socket_id();
2317 /* Create link bonding eth device */
2318 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2320 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2321 "socket %u.\n", name, bonding_mode, socket_id);
2324 internals = rte_eth_devices[port_id].data->dev_private;
2325 internals->kvlist = kvlist;
2327 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2328 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2332 rte_kvargs_free(kvlist);
2338 bond_remove(const char *name)
2345 RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2347 /* free link bonding eth device */
2348 ret = rte_eth_bond_free(name);
2350 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2355 /* this part will resolve the slave portids after all the other pdev and vdev
2356 * have been allocated */
2358 bond_ethdev_configure(struct rte_eth_dev *dev)
2360 char *name = dev->data->name;
2361 struct bond_dev_private *internals = dev->data->dev_private;
2362 struct rte_kvargs *kvlist = internals->kvlist;
2364 uint8_t port_id = dev - rte_eth_devices;
2366 static const uint8_t default_rss_key[40] = {
2367 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2368 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2369 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2370 0xBE, 0xAC, 0x01, 0xFA
2375 /* If RSS is enabled, fill table and key with default values */
2376 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2377 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2378 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2379 memcpy(internals->rss_key, default_rss_key, 40);
2381 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2382 internals->reta_conf[i].mask = ~0LL;
2383 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2384 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2388 /* set the max_rx_pktlen */
2389 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2392 * if no kvlist, it means that this bonded device has been created
2393 * through the bonding api.
2398 /* Parse MAC address for bonded device */
2399 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2400 if (arg_count == 1) {
2401 struct ether_addr bond_mac;
2403 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2404 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2405 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2410 /* Set MAC address */
2411 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2413 "Failed to set mac address on bonded device %s\n",
2417 } else if (arg_count > 1) {
2419 "MAC address can be specified only once for bonded device %s\n",
2424 /* Parse/set balance mode transmit policy */
2425 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2426 if (arg_count == 1) {
2427 uint8_t xmit_policy;
2429 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2430 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2433 "Invalid xmit policy specified for bonded device %s\n",
2438 /* Set balance mode transmit policy*/
2439 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2441 "Failed to set balance xmit policy on bonded device %s\n",
2445 } else if (arg_count > 1) {
2447 "Transmit policy can be specified only once for bonded device"
2452 /* Parse/add slave ports to bonded device */
2453 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2454 struct bond_ethdev_slave_ports slave_ports;
2457 memset(&slave_ports, 0, sizeof(slave_ports));
2459 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2460 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2462 "Failed to parse slave ports for bonded device %s\n",
2467 for (i = 0; i < slave_ports.slave_count; i++) {
2468 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2470 "Failed to add port %d as slave to bonded device %s\n",
2471 slave_ports.slaves[i], name);
2476 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2480 /* Parse/set primary slave port id*/
2481 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2482 if (arg_count == 1) {
2483 uint8_t primary_slave_port_id;
2485 if (rte_kvargs_process(kvlist,
2486 PMD_BOND_PRIMARY_SLAVE_KVARG,
2487 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2488 &primary_slave_port_id) < 0) {
2490 "Invalid primary slave port id specified for bonded device"
2495 /* Set balance mode transmit policy*/
2496 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2499 "Failed to set primary slave port %d on bonded device %s\n",
2500 primary_slave_port_id, name);
2503 } else if (arg_count > 1) {
2505 "Primary slave can be specified only once for bonded device"
2510 /* Parse link status monitor polling interval */
2511 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2512 if (arg_count == 1) {
2513 uint32_t lsc_poll_interval_ms;
2515 if (rte_kvargs_process(kvlist,
2516 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2517 &bond_ethdev_parse_time_ms_kvarg,
2518 &lsc_poll_interval_ms) < 0) {
2520 "Invalid lsc polling interval value specified for bonded"
2521 " device %s\n", name);
2525 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2528 "Failed to set lsc monitor polling interval (%u ms) on"
2529 " bonded device %s\n", lsc_poll_interval_ms, name);
2532 } else if (arg_count > 1) {
2534 "LSC polling interval can be specified only once for bonded"
2535 " device %s\n", name);
2539 /* Parse link up interrupt propagation delay */
2540 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2541 if (arg_count == 1) {
2542 uint32_t link_up_delay_ms;
2544 if (rte_kvargs_process(kvlist,
2545 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2546 &bond_ethdev_parse_time_ms_kvarg,
2547 &link_up_delay_ms) < 0) {
2549 "Invalid link up propagation delay value specified for"
2550 " bonded device %s\n", name);
2554 /* Set balance mode transmit policy*/
2555 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2558 "Failed to set link up propagation delay (%u ms) on bonded"
2559 " device %s\n", link_up_delay_ms, name);
2562 } else if (arg_count > 1) {
2564 "Link up propagation delay can be specified only once for"
2565 " bonded device %s\n", name);
2569 /* Parse link down interrupt propagation delay */
2570 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2571 if (arg_count == 1) {
2572 uint32_t link_down_delay_ms;
2574 if (rte_kvargs_process(kvlist,
2575 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2576 &bond_ethdev_parse_time_ms_kvarg,
2577 &link_down_delay_ms) < 0) {
2579 "Invalid link down propagation delay value specified for"
2580 " bonded device %s\n", name);
2584 /* Set balance mode transmit policy*/
2585 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2588 "Failed to set link down propagation delay (%u ms) on"
2589 " bonded device %s\n", link_down_delay_ms, name);
2592 } else if (arg_count > 1) {
2594 "Link down propagation delay can be specified only once for"
2595 " bonded device %s\n", name);
2602 static struct rte_vdev_driver bond_drv = {
2603 .probe = bond_probe,
2604 .remove = bond_remove,
2607 RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
2608 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2610 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2614 "xmit_policy=[l2 | l23 | l34] "
2617 "lsc_poll_period_ms=<int> "
2619 "down_delay=<int>");