#include <vnet/snap/snap.h>
#include <vnet/bonding/node.h>
+#ifndef CLIB_MARCH_VARIANT
bond_main_t bond_main;
+#endif /* CLIB_MARCH_VARIANT */
#define foreach_bond_input_error \
_(NONE, "no error") \
_(IF_DOWN, "interface down") \
- _(NO_SLAVE, "no slave") \
- _(NO_BOND, "no bond interface")\
- _(PASS_THRU, "pass through")
+ _(PASSIVE_IF, "traffic received on passive interface") \
+ _(PASS_THRU, "pass through (CDP, LLDP, slow protocols)")
typedef enum
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
- vnet_hw_interface_t *hw, *hw1;
- vnet_main_t *vnm = vnet_get_main ();
- hw = vnet_get_sup_hw_interface (vnm, t->sw_if_index);
- hw1 = vnet_get_sup_hw_interface (vnm, t->bond_sw_if_index);
- s = format (s, "src %U, dst %U, %s -> %s",
+ s = format (s, "src %U, dst %U, %U -> %U",
format_ethernet_address, t->ethernet.src_address,
format_ethernet_address, t->ethernet.dst_address,
- hw->name, hw1->name);
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ t->sw_if_index,
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ t->bond_sw_if_index);
return s;
}
+typedef enum
+{
+ BOND_INPUT_NEXT_DROP,
+ BOND_INPUT_N_NEXT,
+} bond_output_next_t;
+
static_always_inline u8
packet_is_cdp (ethernet_header_t * eth)
{
}
static inline void
-bond_sw_if_index_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
- slave_if_t * sif, ethernet_header_t * eth,
- vlib_buffer_t * b0)
+bond_sw_if_idx_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_buffer_t * b, u32 bond_sw_if_index,
+ u32 * n_rx_packets, u32 * n_rx_bytes)
{
- bond_if_t *bif;
- u16 thread_index = vlib_get_thread_index ();
u16 *ethertype_p, ethertype;
ethernet_vlan_header_t *vlan;
+ ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b);
- if (PREDICT_TRUE (sif != 0))
+ (*n_rx_packets)++;
+ *n_rx_bytes += b->current_length;
+ ethertype = clib_mem_unaligned (ð->type, u16);
+ if (!ethernet_frame_is_tagged (ntohs (ethertype)))
{
- bif = bond_get_master_by_sw_if_index (sif->group);
- if (PREDICT_TRUE (bif != 0))
+ // Let some layer2 packets pass through.
+ if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
+ && !packet_is_cdp (eth)
+ && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
{
- if (PREDICT_TRUE (vec_len (bif->slaves) >= 1))
- {
- if (PREDICT_TRUE (bif->admin_up == 1))
- {
- if (!ethernet_frame_is_tagged (ntohs (eth->type)))
- {
- // Let some layer2 packets pass through.
- if (PREDICT_TRUE ((eth->type !=
- htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
- && !packet_is_cdp (eth)
- && (eth->type !=
- htons
- (ETHERNET_TYPE_802_1_LLDP))))
- {
- // Change the physical interface to
- // bond interface
- vnet_buffer (b0)->sw_if_index[VLIB_RX] =
- bif->sw_if_index;
-
- /* increase rx counters */
- vlib_increment_simple_counter
- (vnet_main.interface_main.sw_if_counters +
- VNET_INTERFACE_COUNTER_RX, thread_index,
- bif->sw_if_index, 1);
- }
- else
- {
- vlib_error_count (vm, node->node_index,
- BOND_INPUT_ERROR_PASS_THRU, 1);
- }
- }
- else
- {
- vlan = (void *) (eth + 1);
- ethertype_p = &vlan->type;
- if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
- {
- vlan++;
- ethertype_p = &vlan->type;
- }
- ethertype = *ethertype_p;
- if (PREDICT_TRUE ((ethertype !=
- htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
- && (ethertype !=
- htons (ETHERNET_TYPE_CDP))
- && (ethertype !=
- htons
- (ETHERNET_TYPE_802_1_LLDP))))
- {
- // Change the physical interface to
- // bond interface
- vnet_buffer (b0)->sw_if_index[VLIB_RX] =
- bif->sw_if_index;
-
- /* increase rx counters */
- vlib_increment_simple_counter
- (vnet_main.interface_main.sw_if_counters +
- VNET_INTERFACE_COUNTER_RX, thread_index,
- bif->sw_if_index, 1);
- }
- else
- {
- vlib_error_count (vm, node->node_index,
- BOND_INPUT_ERROR_PASS_THRU, 1);
- }
- }
- }
- else
- {
- vlib_error_count (vm, node->node_index,
- BOND_INPUT_ERROR_IF_DOWN, 1);
- }
- }
- else
- {
- vlib_error_count (vm, node->node_index,
- BOND_INPUT_ERROR_NO_SLAVE, 1);
- }
+ /* Change the physical interface to bond interface */
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
+ return;
}
- else
+ }
+ else
+ {
+ vlan = (void *) (eth + 1);
+ ethertype_p = &vlan->type;
+ ethertype = clib_mem_unaligned (ethertype_p, u16);
+ if (ethertype == ntohs (ETHERNET_TYPE_VLAN))
+ {
+ vlan++;
+ ethertype_p = &vlan->type;
+ }
+ ethertype = clib_mem_unaligned (ethertype_p, u16);
+ if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
+ && (ethertype != htons (ETHERNET_TYPE_CDP))
+ && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
{
- vlib_error_count (vm, node->node_index,
- BOND_INPUT_ERROR_NO_BOND, 1);
+ /* Change the physical interface to bond interface */
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
+ return;
}
}
- else
+
+ vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_PASS_THRU, 1);
+ return;
+}
+
+static inline void
+bond_update_next (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 * last_slave_sw_if_index, u32 slave_sw_if_index,
+ u32 * bond_sw_if_index, vlib_buffer_t * b,
+ u32 * next_index, vlib_error_t * error)
+{
+ slave_if_t *sif;
+ bond_if_t *bif;
+
+ *next_index = BOND_INPUT_NEXT_DROP;
+ *error = 0;
+
+ if (PREDICT_TRUE (*last_slave_sw_if_index == slave_sw_if_index))
+ goto next;
+
+ *last_slave_sw_if_index = slave_sw_if_index;
+
+ sif = bond_get_slave_by_sw_if_index (slave_sw_if_index);
+ ALWAYS_ASSERT (sif);
+
+ bif = bond_get_master_by_dev_instance (sif->bif_dev_instance);
+
+ ALWAYS_ASSERT (bif);
+ ASSERT (vec_len (bif->slaves));
+
+ if (PREDICT_FALSE (bif->admin_up == 0))
+ {
+ *bond_sw_if_index = slave_sw_if_index;
+ *error = node->errors[BOND_INPUT_ERROR_IF_DOWN];
+ }
+
+ if (PREDICT_FALSE ((bif->mode == BOND_MODE_ACTIVE_BACKUP) &&
+ vec_len (bif->active_slaves) &&
+ (slave_sw_if_index != bif->active_slaves[0])))
{
- vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_NO_SLAVE, 1);
+ *bond_sw_if_index = slave_sw_if_index;
+ *error = node->errors[BOND_INPUT_ERROR_PASSIVE_IF];
+ return;
}
+ *bond_sw_if_index = bif->sw_if_index;
+
+next:
+ vnet_feature_next (next_index, b);
+}
+
+static_always_inline void
+bond_update_next_x4 (vlib_buffer_t * b0, vlib_buffer_t * b1,
+ vlib_buffer_t * b2, vlib_buffer_t * b3)
+{
+ u32 tmp0, tmp1, tmp2, tmp3;
+
+ tmp0 = tmp1 = tmp2 = tmp3 = BOND_INPUT_NEXT_DROP;
+ vnet_feature_next (&tmp0, b0);
+ vnet_feature_next (&tmp1, b1);
+ vnet_feature_next (&tmp2, b2);
+ vnet_feature_next (&tmp3, b3);
}
-static uword
-bond_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+VLIB_NODE_FN (bond_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 next_index;
- u32 *from, *to_next, n_left_from, n_left_to_next;
- ethernet_header_t *eth, *eth1, *eth2, *eth3;
- u32 next0, next1, next2, next3;
- bond_packet_trace_t *t0;
- uword n_trace = vlib_get_trace_count (vm, node);
- u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3;
- slave_if_t *sif, *sif1, *sif2, *sif3;
- u16 thread_index = vlib_get_thread_index ();
+ u16 thread_index = vm->thread_index;
+ u32 *from, n_left;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ u32 last_slave_sw_if_index = ~0;
+ u32 bond_sw_if_index = 0;
+ vlib_error_t error = 0;
+ u32 next_index = 0;
+ u32 n_rx_bytes = 0, n_rx_packets = 0;
/* Vector of buffer / pkt indices we're supposed to process */
from = vlib_frame_vector_args (frame);
/* Number of buffers / pkts */
- n_left_from = frame->n_vectors;
+ n_left = frame->n_vectors;
- /* Speculatively send the first buffer to the last disposition we used */
- next_index = node->cached_next_index;
+ vlib_get_buffers (vm, from, bufs, n_left);
- while (n_left_from > 0)
- {
- /* set up to enqueue to our disposition with index = next_index */
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ b = bufs;
+ next = nexts;
+ sw_if_index = sw_if_indices;
- while (n_left_from >= 12 && n_left_to_next >= 4)
+ while (n_left >= 4)
+ {
+ u32 x = 0;
+ /* Prefetch next iteration */
+ if (PREDICT_TRUE (n_left >= 16))
{
- // Prefetch next iteration
- {
- vlib_buffer_t *b4, *b5, *b6, *b7;
-
- b4 = vlib_get_buffer (vm, from[4]);
- b5 = vlib_get_buffer (vm, from[5]);
- b6 = vlib_get_buffer (vm, from[6]);
- b7 = vlib_get_buffer (vm, from[7]);
-
- vlib_prefetch_buffer_header (b4, STORE);
- vlib_prefetch_buffer_header (b5, STORE);
- vlib_prefetch_buffer_header (b6, STORE);
- vlib_prefetch_buffer_header (b7, STORE);
-
- CLIB_PREFETCH (b4->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b5->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b6->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b7->data, CLIB_CACHE_LINE_BYTES, LOAD);
- }
-
- next0 = 0;
- next1 = 0;
- next2 = 0;
- next3 = 0;
-
- bi0 = from[0];
- bi1 = from[1];
- bi2 = from[2];
- bi3 = from[3];
-
- to_next[0] = bi0;
- to_next[1] = bi1;
- to_next[2] = bi2;
- to_next[3] = bi3;
-
- from += 4;
- to_next += 4;
- n_left_from -= 4;
- n_left_to_next -= 4;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
-
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
- b0);
- vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
- b1);
- vnet_feature_next (vnet_buffer (b2)->sw_if_index[VLIB_RX], &next2,
- b2);
- vnet_feature_next (vnet_buffer (b3)->sw_if_index[VLIB_RX], &next3,
- b3);
-
- eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
- eth1 = (ethernet_header_t *) vlib_buffer_get_current (b1);
- eth2 = (ethernet_header_t *) vlib_buffer_get_current (b2);
- eth3 = (ethernet_header_t *) vlib_buffer_get_current (b3);
-
- sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
- sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
- sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
-
- // sw_if_index points to the physical interface
- sif = bond_get_slave_by_sw_if_index (sw_if_index);
- sif1 = bond_get_slave_by_sw_if_index (sw_if_index1);
- sif2 = bond_get_slave_by_sw_if_index (sw_if_index2);
- sif3 = bond_get_slave_by_sw_if_index (sw_if_index3);
-
- bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
- bond_sw_if_index_rewrite (vm, node, sif1, eth1, b1);
- bond_sw_if_index_rewrite (vm, node, sif2, eth2, b2);
- bond_sw_if_index_rewrite (vm, node, sif3, eth3, b3);
-
- if (PREDICT_FALSE (n_trace > 0))
- {
- vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
- vlib_set_trace_count (vm, node, --n_trace);
- t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- t0->ethernet = *eth;
- t0->sw_if_index = sw_if_index;
- t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-
- if (PREDICT_TRUE (n_trace > 0))
- {
- vlib_trace_buffer (vm, node, next1, b1,
- 0 /* follow_chain */ );
- vlib_set_trace_count (vm, node, --n_trace);
- t0 = vlib_add_trace (vm, node, b1, sizeof (*t0));
- t0->ethernet = *eth1;
- t0->sw_if_index = sw_if_index1;
- t0->bond_sw_if_index =
- vnet_buffer (b1)->sw_if_index[VLIB_RX];
-
- if (PREDICT_TRUE (n_trace > 0))
- {
- vlib_trace_buffer (vm, node, next1, b2,
- 0 /* follow_chain */ );
- vlib_set_trace_count (vm, node, --n_trace);
- t0 = vlib_add_trace (vm, node, b2, sizeof (*t0));
- t0->ethernet = *eth2;
- t0->sw_if_index = sw_if_index2;
- t0->bond_sw_if_index =
- vnet_buffer (b2)->sw_if_index[VLIB_RX];
-
- if (PREDICT_TRUE (n_trace > 0))
- {
- vlib_trace_buffer (vm, node, next1, b2,
- 0 /* follow_chain */ );
- vlib_set_trace_count (vm, node, --n_trace);
- t0 = vlib_add_trace (vm, node, b3, sizeof (*t0));
- t0->ethernet = *eth3;
- t0->sw_if_index = sw_if_index3;
- t0->bond_sw_if_index =
- vnet_buffer (b3)->sw_if_index[VLIB_RX];
- }
- }
- }
- }
+ vlib_prefetch_buffer_data (b[8], LOAD);
+ vlib_prefetch_buffer_data (b[9], LOAD);
+ vlib_prefetch_buffer_data (b[10], LOAD);
+ vlib_prefetch_buffer_data (b[11], LOAD);
+
+ vlib_prefetch_buffer_header (b[12], LOAD);
+ vlib_prefetch_buffer_header (b[13], LOAD);
+ vlib_prefetch_buffer_header (b[14], LOAD);
+ vlib_prefetch_buffer_header (b[15], LOAD);
+ }
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, bi2, bi3, next0, next1,
- next2, next3);
- }
+ x |= sw_if_index[0] ^ last_slave_sw_if_index;
+ x |= sw_if_index[1] ^ last_slave_sw_if_index;
+ x |= sw_if_index[2] ^ last_slave_sw_if_index;
+ x |= sw_if_index[3] ^ last_slave_sw_if_index;
- while (n_left_from > 0 && n_left_to_next > 0)
+ if (PREDICT_TRUE (x == 0))
{
- // Prefetch next iteration
- if (n_left_from > 1)
+ /*
+ * Optimize to call update_next only if there is a feature arc
+ * after bond-input. Test feature count greater than 1 because
+ * bond-input itself is a feature arc for this slave interface.
+ */
+ ASSERT ((vnet_buffer (b[0])->feature_arc_index ==
+ vnet_buffer (b[1])->feature_arc_index) &&
+ (vnet_buffer (b[0])->feature_arc_index ==
+ vnet_buffer (b[2])->feature_arc_index) &&
+ (vnet_buffer (b[0])->feature_arc_index ==
+ vnet_buffer (b[3])->feature_arc_index));
+ if (PREDICT_FALSE (vnet_get_feature_count
+ (vnet_buffer (b[0])->feature_arc_index,
+ last_slave_sw_if_index) > 1))
+ bond_update_next_x4 (b[0], b[1], b[2], b[3]);
+
+ next[0] = next[1] = next[2] = next[3] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
{
- vlib_buffer_t *p2;
-
- p2 = vlib_get_buffer (vm, from[1]);
- vlib_prefetch_buffer_header (p2, STORE);
- CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ b[0]->error = error;
+ b[1]->error = error;
+ b[2]->error = error;
+ b[3]->error = error;
+ }
+ else
+ {
+ bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+ bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+ bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+ bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
}
+ }
+ else
+ {
+ bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
+ &bond_sw_if_index, b[0], &next_index, &error);
+ next[0] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
+ b[0]->error = error;
+ else
+ bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+
+ bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[1],
+ &bond_sw_if_index, b[1], &next_index, &error);
+ next[1] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
+ b[1]->error = error;
+ else
+ bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+
+ bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[2],
+ &bond_sw_if_index, b[2], &next_index, &error);
+ next[2] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
+ b[2]->error = error;
+ else
+ bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+
+ bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[3],
+ &bond_sw_if_index, b[3], &next_index, &error);
+ next[3] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
+ b[3]->error = error;
+ else
+ bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
+ }
- next0 = 0;
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
- b0 = vlib_get_buffer (vm, bi0);
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
- b0);
+ /* next */
+ n_left -= 4;
+ b += 4;
+ sw_if_index += 4;
+ next += 4;
+ }
- eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ while (n_left)
+ {
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
+ &bond_sw_if_index, b[0], &next_index, &error);
+ next[0] = next_index;
+ if (next_index == BOND_INPUT_NEXT_DROP)
+ b[0]->error = error;
+ else
+ bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
+ &n_rx_packets, &n_rx_bytes);
- sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- // sw_if_index points to the physical interface
- sif = bond_get_slave_by_sw_if_index (sw_if_index);
- bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ /* next */
+ n_left -= 1;
+ b += 1;
+ sw_if_index += 1;
+ next += 1;
+ }
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ n_left = frame->n_vectors; /* number of packets to process */
+ b = bufs;
+ sw_if_index = sw_if_indices;
+ bond_packet_trace_t *t0;
+
+ while (n_left)
+ {
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0));
+ t0->sw_if_index = sw_if_index[0];
+ clib_memcpy_fast (&t0->ethernet, vlib_buffer_get_current (b[0]),
+ sizeof (ethernet_header_t));
+ t0->bond_sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ }
+ /* next */
+ n_left--;
+ b++;
+ sw_if_index++;
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
+ /* increase rx counters */
+ vlib_increment_combined_counter
+ (vnet_main.interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX, thread_index, bond_sw_if_index, n_rx_packets,
+ n_rx_bytes);
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
vlib_node_increment_counter (vm, bond_input_node.index,
BOND_INPUT_ERROR_NONE, frame->n_vectors);
- vnet_device_increment_rx_packets (thread_index, frame->n_vectors);
-
return frame->n_vectors;
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (bond_input_node) = {
- .function = bond_input_fn,
.name = "bond-input",
.vector_size = sizeof (u32),
.format_buffer = format_ethernet_header_with_length,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = BOND_INPUT_N_ERROR,
.error_strings = bond_input_error_strings,
- .n_next_nodes = 0,
+ .n_next_nodes = BOND_INPUT_N_NEXT,
.next_nodes =
{
- [0] = "error-drop"
+ [BOND_INPUT_NEXT_DROP] = "error-drop"
}
};
.node_name = "bond-input",
.runs_before = VNET_FEATURES ("ethernet-input"),
};
-VLIB_NODE_FUNCTION_MULTIARCH (bond_input_node, bond_input_fn)
/* *INDENT-ON* */
static clib_error_t *
sif = bond_get_slave_by_sw_if_index (sw_if_index);
if (sif)
{
- sif->port_enabled = flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ if (sif->lacp_enabled)
+ return 0;
+
+ /* port_enabled is both admin up and hw link up */
+ sif->port_enabled = ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) &&
+ vnet_sw_interface_is_link_up (vnm, sw_if_index));
if (sif->port_enabled == 0)
- {
- if (sif->lacp_enabled == 0)
- {
- bond_disable_collecting_distributing (vm, sif);
- }
- }
+ bond_disable_collecting_distributing (vm, sif);
else
- {
- if (sif->lacp_enabled == 0)
- {
- bond_enable_collecting_distributing (vm, sif);
- }
- }
+ bond_enable_collecting_distributing (vm, sif);
}
return 0;
slave_if_t *sif;
vnet_sw_interface_t *sw;
vlib_main_t *vm = bm->vlib_main;
- vnet_interface_main_t *im = &vnm->interface_main;
- sw = pool_elt_at_index (im->sw_interfaces, hw_if_index);
+ sw = vnet_get_hw_sw_interface (vnm, hw_if_index);
sif = bond_get_slave_by_sw_if_index (sw->sw_if_index);
if (sif)
{
- if (!(flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
- {
- if (sif->lacp_enabled == 0)
- {
- bond_disable_collecting_distributing (vm, sif);
- }
- }
+ if (sif->lacp_enabled)
+ return 0;
+
+ /* port_enabled is both admin up and hw link up */
+ sif->port_enabled = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) &&
+ vnet_sw_interface_is_admin_up (vnm,
+ sw->sw_if_index));
+ if (sif->port_enabled == 0)
+ bond_disable_collecting_distributing (vm, sif);
else
- {
- if (sif->lacp_enabled == 0)
- {
- bond_enable_collecting_distributing (vm, sif);
- }
- }
+ bond_enable_collecting_distributing (vm, sif);
}
return 0;