New upstream version 17.08
[deb_dpdk.git] / drivers / net / bonding / rte_eth_bond_8023ad.c
index 7b863d6..20b5a89 100644 (file)
@@ -44,7 +44,6 @@
 #include "rte_eth_bond_private.h"
 
 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
-
 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
 #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
                        bond_dbg_get_time_diff_ms(), slave_id, \
@@ -435,7 +434,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
                         * In other case (was fast and now it is slow) just switch
                         * timeout to slow without forcing send of LACP (because standard
                         * say so)*/
-                       if (!is_partner_fast)
+                       if (is_partner_fast)
                                SM_FLAG_SET(port, NTT);
                } else
                        return; /* Nothing changed */
@@ -632,21 +631,53 @@ tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
        lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
        lacpdu->terminator_length = 0;
 
-       if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
-               /* If TX ring full, drop packet and free message. Retransmission
-                * will happen in next function call. */
-               rte_pktmbuf_free(lacp_pkt);
-               set_warning_flags(port, WRN_TX_QUEUE_FULL);
-               return;
+       MODE4_DEBUG("Sending LACP frame\n");
+       BOND_PRINT_LACP(lacpdu);
+
+       if (internals->mode4.dedicated_queues.enabled == 0) {
+               int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
+               if (retval != 0) {
+                       /* If TX ring full, drop packet and free message.
+                          Retransmission will happen in next function call. */
+                       rte_pktmbuf_free(lacp_pkt);
+                       set_warning_flags(port, WRN_TX_QUEUE_FULL);
+                       return;
+               }
+       } else {
+               uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
+                               internals->mode4.dedicated_queues.tx_qid,
+                               &lacp_pkt, 1);
+               if (pkts_sent != 1) {
+                       rte_pktmbuf_free(lacp_pkt);
+                       set_warning_flags(port, WRN_TX_QUEUE_FULL);
+                       return;
+               }
        }
 
-       MODE4_DEBUG("sending LACP frame\n");
-       BOND_PRINT_LACP(lacpdu);
 
        timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
        SM_FLAG_CLR(port, NTT);
 }
 
+static uint8_t
+max_index(uint64_t *a, int n)
+{
+       if (n <= 0)
+               return -1;
+
+       int i, max_i = 0;
+       uint64_t max = a[0];
+
+       for (i = 1; i < n; ++i) {
+               if (a[i] > max) {
+                       max = a[i];
+                       max_i = i;
+               }
+       }
+
+       return max_i;
+}
+
 /**
  * Function assigns port to aggregator.
  *
@@ -657,8 +688,13 @@ static void
 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
 {
        struct port *agg, *port;
-       uint8_t slaves_count, new_agg_id, i;
+       uint8_t slaves_count, new_agg_id, i, j = 0;
        uint8_t *slaves;
+       uint64_t agg_bandwidth[8] = {0};
+       uint64_t agg_count[8] = {0};
+       uint8_t default_slave = 0;
+       uint8_t mode_count_id, mode_band_id;
+       struct rte_eth_link link_info;
 
        slaves = internals->active_slaves;
        slaves_count = internals->active_slave_count;
@@ -671,6 +707,10 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
                if (agg->aggregator_port_id != slaves[i])
                        continue;
 
+               agg_count[agg->aggregator_port_id] += 1;
+               rte_eth_link_get_nowait(slaves[i], &link_info);
+               agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
+
                /* Actors system ID is not checked since all slave device have the same
                 * ID (MAC address). */
                if ((agg->actor.key == port->actor.key &&
@@ -681,15 +721,36 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
                        (agg->actor.key &
                                rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
 
-                       break;
+                       if (j == 0)
+                               default_slave = i;
+                       j++;
                }
        }
 
-       /* By default, port uses it self as agregator */
-       if (i == slaves_count)
-               new_agg_id = slave_id;
-       else
-               new_agg_id = slaves[i];
+       switch (internals->mode4.agg_selection) {
+       case AGG_COUNT:
+               mode_count_id = max_index(
+                               (uint64_t *)agg_count, slaves_count);
+               new_agg_id = mode_count_id;
+               break;
+       case AGG_BANDWIDTH:
+               mode_band_id = max_index(
+                               (uint64_t *)agg_bandwidth, slaves_count);
+               new_agg_id = mode_band_id;
+               break;
+       case AGG_STABLE:
+               if (default_slave == slaves_count)
+                       new_agg_id = slave_id;
+               else
+                       new_agg_id = slaves[default_slave];
+               break;
+       default:
+               if (default_slave == slaves_count)
+                       new_agg_id = slave_id;
+               else
+                       new_agg_id = slaves[default_slave];
+               break;
+       }
 
        if (new_agg_id != port->aggregator_port_id) {
                port->aggregator_port_id = new_agg_id;
@@ -740,6 +801,22 @@ link_speed_key(uint16_t speed) {
        return key_speed;
 }
 
+static void
+rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+               struct rte_mbuf *lacp_pkt) {
+       struct lacpdu_header *lacp;
+
+       if (lacp_pkt != NULL) {
+               lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+               RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+               /* This is LACP frame so pass it to rx_machine */
+               rx_machine(internals, slave_id, &lacp->lacpdu);
+               rte_pktmbuf_free(lacp_pkt);
+       } else
+               rx_machine(internals, slave_id, NULL);
+}
+
 static void
 bond_mode_8023ad_periodic_cb(void *arg)
 {
@@ -748,8 +825,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
        struct port *port;
        struct rte_eth_link link_info;
        struct ether_addr slave_addr;
+       struct rte_mbuf *lacp_pkt = NULL;
 
-       void *pkt = NULL;
        uint8_t i, slave_id;
 
 
@@ -758,7 +835,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
                uint16_t key;
 
                slave_id = internals->active_slaves[i];
-               rte_eth_link_get(slave_id, &link_info);
+               rte_eth_link_get_nowait(slave_id, &link_info);
                rte_eth_macaddr_get(slave_id, &slave_addr);
 
                if (link_info.link_status != 0) {
@@ -809,20 +886,28 @@ bond_mode_8023ad_periodic_cb(void *arg)
 
                SM_FLAG_SET(port, LACP_ENABLED);
 
-               /* Find LACP packet to this port. Do not check subtype, it is done in
-                * function that queued packet */
-               if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
-                       struct rte_mbuf *lacp_pkt = pkt;
-                       struct lacpdu_header *lacp;
+               if (internals->mode4.dedicated_queues.enabled == 0) {
+                       /* Find LACP packet to this port. Do not check subtype,
+                        * it is done in function that queued packet
+                        */
+                       int retval = rte_ring_dequeue(port->rx_ring,
+                                       (void **)&lacp_pkt);
 
-                       lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
-                       RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+                       if (retval != 0)
+                               lacp_pkt = NULL;
 
-                       /* This is LACP frame so pass it to rx_machine */
-                       rx_machine(internals, slave_id, &lacp->lacpdu);
-                       rte_pktmbuf_free(lacp_pkt);
-               } else
-                       rx_machine(internals, slave_id, NULL);
+                       rx_machine_update(internals, slave_id, lacp_pkt);
+               } else {
+                       uint16_t rx_count = rte_eth_rx_burst(slave_id,
+                                       internals->mode4.dedicated_queues.rx_qid,
+                                       &lacp_pkt, 1);
+
+                       if (rx_count == 1)
+                               bond_mode_8023ad_handle_slow_pkt(internals,
+                                               slave_id, lacp_pkt);
+                       else
+                               rx_machine_update(internals, slave_id, NULL);
+               }
 
                periodic_machine(internals, slave_id);
                mux_machine(internals, slave_id);
@@ -872,7 +957,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
 
        /* default states */
        port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
-       port->partner_state = STATE_LACP_ACTIVE;
+       port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
        port->sm_flags = SM_FLAGS_BEGIN;
 
        /* use this port as agregator */
@@ -886,7 +971,10 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
 
        RTE_ASSERT(port->rx_ring == NULL);
        RTE_ASSERT(port->tx_ring == NULL);
-       socket_id = rte_eth_devices[slave_id].data->numa_node;
+
+       socket_id = rte_eth_dev_socket_id(slave_id);
+       if (socket_id == (int)LCORE_ID_ANY)
+               socket_id = rte_socket_id();
 
        element_size = sizeof(struct slow_protocol_frame) +
                                RTE_PKTMBUF_HEADROOM;
@@ -905,7 +993,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
                        32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
                0, element_size, socket_id);
 
-       /* Any memory allocation failure in initalization is critical because
+       /* Any memory allocation failure in initialization is critical because
         * resources can't be free, so reinitialization is impossible. */
        if (port->mbuf_pool == NULL) {
                rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
@@ -1036,6 +1124,18 @@ bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
        conf->slowrx_cb = mode4->slowrx_cb;
 }
 
+static void
+bond_mode_8023ad_conf_get_v1708(struct rte_eth_dev *dev,
+               struct rte_eth_bond_8023ad_conf *conf)
+{
+       struct bond_dev_private *internals = dev->data->dev_private;
+       struct mode8023ad_private *mode4 = &internals->mode4;
+
+       bond_mode_8023ad_conf_get(dev, conf);
+       conf->slowrx_cb = mode4->slowrx_cb;
+       conf->agg_selection = mode4->agg_selection;
+}
+
 static void
 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
 {
@@ -1048,6 +1148,7 @@ bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
        conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
        conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
        conf->slowrx_cb = NULL;
+       conf->agg_selection = AGG_STABLE;
 }
 
 static void
@@ -1064,6 +1165,10 @@ bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
        mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
        mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
        mode4->update_timeout_us = conf->update_timeout_ms * 1000;
+
+       mode4->dedicated_queues.enabled = 0;
+       mode4->dedicated_queues.rx_qid = UINT16_MAX;
+       mode4->dedicated_queues.tx_qid = UINT16_MAX;
 }
 
 static void
@@ -1100,9 +1205,31 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
                bond_mode_8023ad_conf_get_default(conf);
        }
 
+       bond_mode_8023ad_stop(dev);
+       bond_mode_8023ad_conf_assign(mode4, conf);
+
+
+       if (dev->data->dev_started)
+               bond_mode_8023ad_start(dev);
+}
+
+static void
+bond_mode_8023ad_setup_v1708(struct rte_eth_dev *dev,
+               struct rte_eth_bond_8023ad_conf *conf)
+{
+       struct rte_eth_bond_8023ad_conf def_conf;
+       struct bond_dev_private *internals = dev->data->dev_private;
+       struct mode8023ad_private *mode4 = &internals->mode4;
+
+       if (conf == NULL) {
+               conf = &def_conf;
+               bond_mode_8023ad_conf_get_default(conf);
+       }
+
        bond_mode_8023ad_stop(dev);
        bond_mode_8023ad_conf_assign(mode4, conf);
        mode4->slowrx_cb = conf->slowrx_cb;
+       mode4->agg_selection = AGG_STABLE;
 
        if (dev->data->dev_started)
                bond_mode_8023ad_start(dev);
@@ -1188,18 +1315,36 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
                m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
                rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
 
-               if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
-                       /* reset timer */
-                       port->rx_marker_timer = 0;
-                       wrn = WRN_TX_QUEUE_FULL;
-                       goto free_out;
+               if (internals->mode4.dedicated_queues.enabled == 0) {
+                       int retval = rte_ring_enqueue(port->tx_ring, pkt);
+                       if (retval != 0) {
+                               /* reset timer */
+                               port->rx_marker_timer = 0;
+                               wrn = WRN_TX_QUEUE_FULL;
+                               goto free_out;
+                       }
+               } else {
+                       /* Send packet directly to the slow queue */
+                       uint16_t tx_count = rte_eth_tx_burst(slave_id,
+                                       internals->mode4.dedicated_queues.tx_qid,
+                                       &pkt, 1);
+                       if (tx_count != 1) {
+                               /* reset timer */
+                               port->rx_marker_timer = 0;
+                               wrn = WRN_TX_QUEUE_FULL;
+                               goto free_out;
+                       }
                }
        } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
-               if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
-                       /* If RX fing full free lacpdu message and drop packet */
-                       wrn = WRN_RX_QUEUE_FULL;
-                       goto free_out;
-               }
+               if (internals->mode4.dedicated_queues.enabled == 0) {
+                       int retval = rte_ring_enqueue(port->rx_ring, pkt);
+                       if (retval != 0) {
+                               /* If RX fing full free lacpdu message and drop packet */
+                               wrn = WRN_RX_QUEUE_FULL;
+                               goto free_out;
+                       }
+               } else
+                       rx_machine_update(internals, slave_id, pkt);
        } else {
                wrn = WRN_UNKNOWN_SLOW_TYPE;
                goto free_out;
@@ -1246,10 +1391,71 @@ rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
        bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
        return 0;
 }
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+
+int
+rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+               struct rte_eth_bond_8023ad_conf *conf)
+{
+       struct rte_eth_dev *bond_dev;
+
+       if (valid_bonded_port_id(port_id) != 0)
+               return -EINVAL;
+
+       if (conf == NULL)
+               return -EINVAL;
+
+       bond_dev = &rte_eth_devices[port_id];
+       bond_mode_8023ad_conf_get_v1708(bond_dev, conf);
+       return 0;
+}
 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
                struct rte_eth_bond_8023ad_conf *conf),
-               rte_eth_bond_8023ad_conf_get_v1607);
+               rte_eth_bond_8023ad_conf_get_v1708);
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1708, 17.08);
+
+int
+rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+               enum rte_bond_8023ad_agg_selection agg_selection)
+{
+       struct rte_eth_dev *bond_dev;
+       struct bond_dev_private *internals;
+       struct mode8023ad_private *mode4;
+
+       bond_dev = &rte_eth_devices[port_id];
+       internals = bond_dev->data->dev_private;
+
+       if (valid_bonded_port_id(port_id) != 0)
+               return -EINVAL;
+       if (internals->mode != 4)
+               return -EINVAL;
+
+       mode4 = &internals->mode4;
+       if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
+                       || agg_selection == AGG_STABLE)
+               mode4->agg_selection = agg_selection;
+       return 0;
+}
+
+int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
+{
+       struct rte_eth_dev *bond_dev;
+       struct bond_dev_private *internals;
+       struct mode8023ad_private *mode4;
+
+       bond_dev = &rte_eth_devices[port_id];
+       internals = bond_dev->data->dev_private;
+
+       if (valid_bonded_port_id(port_id) != 0)
+               return -EINVAL;
+       if (internals->mode != 4)
+               return -EINVAL;
+       mode4 = &internals->mode4;
+
+       return mode4->agg_selection;
+}
+
+
 
 static int
 bond_8023ad_setup_validate(uint8_t port_id,
@@ -1310,10 +1516,34 @@ rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
 
        return 0;
 }
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+
+
+int
+rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+               struct rte_eth_bond_8023ad_conf *conf)
+{
+       struct rte_eth_dev *bond_dev;
+       int err;
+
+       err = bond_8023ad_setup_validate(port_id, conf);
+       if (err != 0)
+               return err;
+
+       bond_dev = &rte_eth_devices[port_id];
+       bond_mode_8023ad_setup_v1708(bond_dev, conf);
+
+       return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1708, 17.08);
 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
                struct rte_eth_bond_8023ad_conf *conf),
-               rte_eth_bond_8023ad_setup_v1607);
+               rte_eth_bond_8023ad_setup_v1708);
+
+
+
+
+
 
 int
 rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
@@ -1504,3 +1734,49 @@ bond_mode_8023ad_ext_periodic_cb(void *arg)
        rte_eal_alarm_set(internals->mode4.update_timeout_us,
                        bond_mode_8023ad_ext_periodic_cb, arg);
 }
+
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
+{
+       int retval = 0;
+       struct rte_eth_dev *dev = &rte_eth_devices[port];
+       struct bond_dev_private *internals = (struct bond_dev_private *)
+               dev->data->dev_private;
+
+       if (check_for_bonded_ethdev(dev) != 0)
+               return -1;
+
+       if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
+               return -1;
+
+       /* Device must be stopped to set up slow queue */
+       if (dev->data->dev_started)
+               return -1;
+
+       internals->mode4.dedicated_queues.enabled = 1;
+
+       bond_ethdev_mode_set(dev, internals->mode);
+       return retval;
+}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
+{
+       int retval = 0;
+       struct rte_eth_dev *dev = &rte_eth_devices[port];
+       struct bond_dev_private *internals = (struct bond_dev_private *)
+               dev->data->dev_private;
+
+       if (check_for_bonded_ethdev(dev) != 0)
+               return -1;
+
+       /* Device must be stopped to set up slow queue */
+       if (dev->data->dev_started)
+               return -1;
+
+       internals->mode4.dedicated_queues.enabled = 0;
+
+       bond_ethdev_mode_set(dev, internals->mode);
+
+       return retval;
+}