X-Git-Url: https://gerrit.fd.io/r/gitweb?p=vpp.git;a=blobdiff_plain;f=src%2Fvnet%2Fbonding%2Fdevice.c;h=d333dfcc0d1f2a25473b9344d2d670d3f2cd9412;hp=cebfd694ac624f3127945594b4e71a3b6d923453;hb=8feeaff56;hpb=a005e7f0ef1564cab8db1a148091fd43d2d5af48 diff --git a/src/vnet/bonding/device.c b/src/vnet/bonding/device.c index cebfd694ac6..d333dfcc0d1 100644 --- a/src/vnet/bonding/device.c +++ b/src/vnet/bonding/device.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #define foreach_bond_tx_error \ _(NONE, "no error") \ @@ -61,6 +64,7 @@ format_bond_tx_trace (u8 * s, va_list * args) return s; } +#ifndef CLIB_MARCH_VARIANT u8 * format_bond_interface_name (u8 * s, va_list * args) { @@ -68,10 +72,41 @@ format_bond_interface_name (u8 * s, va_list * args) bond_main_t *bm = &bond_main; bond_if_t *bif = pool_elt_at_index (bm->interfaces, dev_instance); - s = format (s, "BondEthernet%lu", bif->dev_instance); + s = format (s, "BondEthernet%lu", bif->id); return s; } +#endif + +static __clib_unused clib_error_t * +bond_set_l2_mode_function (vnet_main_t * vnm, + struct vnet_hw_interface_t *bif_hw, + i32 l2_if_adjust) +{ + bond_if_t *bif; + u32 *sw_if_index; + struct vnet_hw_interface_t *sif_hw; + + bif = bond_get_master_by_sw_if_index (bif_hw->sw_if_index); + if (!bif) + return 0; + + if ((bif_hw->l2_if_count == 1) && (l2_if_adjust == 1)) + { + /* Just added first L2 interface on this port */ + vec_foreach (sw_if_index, bif->slaves) + { + sif_hw = vnet_get_sup_hw_interface (vnm, *sw_if_index); + ethernet_set_flags (vnm, sif_hw->hw_if_index, + ETHERNET_INTERFACE_FLAG_ACCEPT_ALL); + + /* ensure all packets go to ethernet-input */ + ethernet_set_rx_redirect (vnm, sif_hw, 1); + } + } + + return 0; +} static __clib_unused clib_error_t * bond_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index, @@ -96,60 +131,53 @@ bond_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) return 0; } -static inline u32 -bond_load_balance_broadcast (vlib_main_t * vm, vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline void +bond_tx_add_to_queue (bond_per_thread_data_t * ptd, u32 port, u32 bi) { - vnet_main_t *vnm = vnet_get_main (); + u32 idx = ptd->per_port_queue[port].n_buffers++; + ptd->per_port_queue[port].buffers[idx] = bi; +} + +static_always_inline u32 +bond_lb_broadcast (vlib_main_t * vm, vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves) +{ + bond_main_t *bm = &bond_main; vlib_buffer_t *c0; - int i; - u32 *to_next = 0; + int port; u32 sw_if_index; - vlib_frame_t *f; + u16 thread_index = vm->thread_index; + bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data, + thread_index); - - for (i = 1; i < vec_len (bif->active_slaves); i++) + for (port = 1; port < n_slaves; port++) { - sw_if_index = *vec_elt_at_index (bif->active_slaves, i); - f = vnet_get_frame_to_sw_interface (vnm, sw_if_index); - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; + sw_if_index = *vec_elt_at_index (bif->active_slaves, port); c0 = vlib_buffer_copy (vm, b0); if (PREDICT_TRUE (c0 != 0)) { vnet_buffer (c0)->sw_if_index[VLIB_TX] = sw_if_index; - to_next[0] = vlib_get_buffer_index (vm, c0); - f->n_vectors++; - vnet_put_frame_to_sw_interface (vnm, sw_if_index, f); + bond_tx_add_to_queue (ptd, port, vlib_get_buffer_index (vm, c0)); } } return 0; } -static inline u32 -bond_load_balance_l2 (vlib_main_t * vm, vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline u32 +bond_lb_l2 (vlib_main_t * vm, vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves) { ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0); - u32 a = 0, b = 0, c = 0, t1, t2; - u16 t11, t22; + u64 *dst = (u64 *) & eth->dst_address[0]; + u64 a = clib_mem_unaligned (dst, u64); + u32 *src = (u32 *) & eth->src_address[2]; + u32 b = clib_mem_unaligned (src, u32); - memcpy (&t1, eth->src_address, sizeof (t1)); - memcpy (&t11, ð->src_address[4], sizeof (t11)); - a = t1 ^ t11; - - memcpy (&t2, eth->dst_address, sizeof (t2)); - memcpy (&t22, ð->dst_address[4], sizeof (t22)); - b = t2 ^ t22; - - hash_v3_mix32 (a, b, c); - hash_v3_finalize32 (a, b, c); - - return c % vec_len (bif->active_slaves); + return lb_hash_hash_2_tuples (a, b); } -static inline u16 * +static_always_inline u16 * bond_locate_ethertype (ethernet_header_t * eth) { u16 *ethertype_p; @@ -172,109 +200,109 @@ bond_locate_ethertype (ethernet_header_t * eth) return ethertype_p; } -static inline u32 -bond_load_balance_l23 (vlib_main_t * vm, vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline u32 +bond_lb_l23 (vlib_main_t * vm, vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves) { ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0); u8 ip_version; ip4_header_t *ip4; u16 ethertype, *ethertype_p; + u32 *mac1, *mac2, *mac3; ethertype_p = bond_locate_ethertype (eth); - ethertype = *ethertype_p; + ethertype = clib_mem_unaligned (ethertype_p, u16); if ((ethertype != htons (ETHERNET_TYPE_IP4)) && (ethertype != htons (ETHERNET_TYPE_IP6))) - return (bond_load_balance_l2 (vm, node, bif, b0)); + return (bond_lb_l2 (vm, node, bif, b0, n_slaves)); ip4 = (ip4_header_t *) (ethertype_p + 1); ip_version = (ip4->ip_version_and_header_length >> 4); if (ip_version == 0x4) { - u16 t11, t22; - u32 a = 0, b = 0, c = 0, t1, t2; - - memcpy (&t1, eth->src_address, sizeof (t1)); - memcpy (&t11, ð->src_address[4], sizeof (t11)); - a = t1 ^ t11; - - memcpy (&t2, eth->dst_address, sizeof (t2)); - memcpy (&t22, ð->dst_address[4], sizeof (t22)); - b = t2 ^ t22; - - c = ip4->src_address.data_u32 ^ ip4->dst_address.data_u32; - - hash_v3_mix32 (a, b, c); - hash_v3_finalize32 (a, b, c); - - return c % vec_len (bif->active_slaves); + u32 a, c; + + mac1 = (u32 *) & eth->dst_address[0]; + mac2 = (u32 *) & eth->dst_address[4]; + mac3 = (u32 *) & eth->src_address[2]; + + a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^ + clib_mem_unaligned (mac3, u32); + c = + lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64), + a); + return c; } else if (ip_version == 0x6) { - u64 a, b, c; - u64 t1 = 0, t2 = 0; + u64 a; + u32 c; ip6_header_t *ip6 = (ip6_header_t *) (eth + 1); - memcpy (&t1, eth->src_address, sizeof (eth->src_address)); - memcpy (&t2, eth->dst_address, sizeof (eth->dst_address)); - a = t1 ^ t2; - - b = (ip6->src_address.as_u64[0] ^ ip6->src_address.as_u64[1]); - c = (ip6->dst_address.as_u64[0] ^ ip6->dst_address.as_u64[1]); - - hash_mix64 (a, b, c); - return c % vec_len (bif->active_slaves); + mac1 = (u32 *) & eth->dst_address[0]; + mac2 = (u32 *) & eth->dst_address[4]; + mac3 = (u32 *) & eth->src_address[2]; + + a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^ + clib_mem_unaligned (mac3, u32); + c = + lb_hash_hash (clib_mem_unaligned + (&ip6->src_address.as_uword[0], uword), + clib_mem_unaligned (&ip6->src_address.as_uword[1], + uword), + clib_mem_unaligned (&ip6->dst_address.as_uword[0], + uword), + clib_mem_unaligned (&ip6->dst_address.as_uword[1], + uword), a); + return c; } - return (bond_load_balance_l2 (vm, node, bif, b0)); + return (bond_lb_l2 (vm, node, bif, b0, n_slaves)); } -static inline u32 -bond_load_balance_l34 (vlib_main_t * vm, vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline u32 +bond_lb_l34 (vlib_main_t * vm, vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves) { ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0); u8 ip_version; - uword is_tcp_udp = 0; + uword is_tcp_udp; ip4_header_t *ip4; u16 ethertype, *ethertype_p; ethertype_p = bond_locate_ethertype (eth); - ethertype = *ethertype_p; + ethertype = clib_mem_unaligned (ethertype_p, u16); if ((ethertype != htons (ETHERNET_TYPE_IP4)) && (ethertype != htons (ETHERNET_TYPE_IP6))) - return (bond_load_balance_l2 (vm, node, bif, b0)); + return (bond_lb_l2 (vm, node, bif, b0, n_slaves)); ip4 = (ip4_header_t *) (ethertype_p + 1); ip_version = (ip4->ip_version_and_header_length >> 4); if (ip_version == 0x4) { - u32 a = 0, b = 0, c = 0, t1, t2; + u32 a, t1, t2; tcp_header_t *tcp = (void *) (ip4 + 1); + is_tcp_udp = (ip4->protocol == IP_PROTOCOL_TCP) || (ip4->protocol == IP_PROTOCOL_UDP); - - a = ip4->src_address.data_u32 ^ ip4->dst_address.data_u32; - - t1 = is_tcp_udp ? tcp->src : 0; - t2 = is_tcp_udp ? tcp->dst : 0; - b = t1 + (t2 << 16); - - hash_v3_mix32 (a, b, c); - hash_v3_finalize32 (a, b, c); - - return c % vec_len (bif->active_slaves); + t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0; + t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0; + a = t1 ^ t2; + return + lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64), + a); } else if (ip_version == 0x6) { - u64 a, b, c; - u64 t1, t2; + u64 a; + u32 c, t1, t2; ip6_header_t *ip6 = (ip6_header_t *) (eth + 1); tcp_header_t *tcp = (void *) (ip6 + 1); + is_tcp_udp = 0; if (PREDICT_TRUE ((ip6->protocol == IP_PROTOCOL_TCP) || (ip6->protocol == IP_PROTOCOL_UDP))) { @@ -292,72 +320,334 @@ bond_load_balance_l34 (vlib_main_t * vm, vlib_node_runtime_t * node, tcp = (tcp_header_t *) ((u8 *) hbh + ((hbh->length + 1) << 3)); } } - a = (ip6->src_address.as_u64[0] ^ ip6->src_address.as_u64[1]); - b = (ip6->dst_address.as_u64[0] ^ ip6->dst_address.as_u64[1]); - - t1 = is_tcp_udp ? tcp->src : 0; - t2 = is_tcp_udp ? tcp->dst : 0; - c = (t2 << 16) | t1; - hash_mix64 (a, b, c); - - return c % vec_len (bif->active_slaves); + t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0; + t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0; + a = t1 ^ t2; + c = + lb_hash_hash (clib_mem_unaligned + (&ip6->src_address.as_uword[0], uword), + clib_mem_unaligned (&ip6->src_address.as_uword[1], + uword), + clib_mem_unaligned (&ip6->dst_address.as_uword[0], + uword), + clib_mem_unaligned (&ip6->dst_address.as_uword[1], + uword), a); + return c; } - return (bond_load_balance_l2 (vm, node, bif, b0)); + return (bond_lb_l2 (vm, node, bif, b0, n_slaves)); } -static inline u32 -bond_load_balance_round_robin (vlib_main_t * vm, - vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline u32 +bond_lb_round_robin (vlib_main_t * vm, + vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves) { bif->lb_rr_last_index++; - bif->lb_rr_last_index %= vec_len (bif->active_slaves); + if (bif->lb_rr_last_index >= n_slaves) + bif->lb_rr_last_index = 0; return bif->lb_rr_last_index; } -static inline u32 -bond_load_balance_active_backup (vlib_main_t * vm, - vlib_node_runtime_t * node, - bond_if_t * bif, vlib_buffer_t * b0) +static_always_inline void +bond_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + bond_if_t * bif, vlib_buffer_t ** b, + u32 * h, u32 n_left, uword n_slaves, u32 lb_alg) { - /* First interface is the active, the rest is backup */ - return 0; + while (n_left >= 4) + { + // Prefetch next iteration + if (n_left >= 8) + { + vlib_buffer_t **pb = b + 4; + + vlib_prefetch_buffer_header (pb[0], LOAD); + vlib_prefetch_buffer_header (pb[1], LOAD); + vlib_prefetch_buffer_header (pb[2], LOAD); + vlib_prefetch_buffer_header (pb[3], LOAD); + + CLIB_PREFETCH (pb[0]->data, CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (pb[1]->data, CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (pb[2]->data, CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (pb[3]->data, CLIB_CACHE_LINE_BYTES, LOAD); + } + + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]); + + if (lb_alg == BOND_LB_L2) + { + h[0] = bond_lb_l2 (vm, node, bif, b[0], n_slaves); + h[1] = bond_lb_l2 (vm, node, bif, b[1], n_slaves); + h[2] = bond_lb_l2 (vm, node, bif, b[2], n_slaves); + h[3] = bond_lb_l2 (vm, node, bif, b[3], n_slaves); + } + else if (lb_alg == BOND_LB_L34) + { + h[0] = bond_lb_l34 (vm, node, bif, b[0], n_slaves); + h[1] = bond_lb_l34 (vm, node, bif, b[1], n_slaves); + h[2] = bond_lb_l34 (vm, node, bif, b[2], n_slaves); + h[3] = bond_lb_l34 (vm, node, bif, b[3], n_slaves); + } + else if (lb_alg == BOND_LB_L23) + { + h[0] = bond_lb_l23 (vm, node, bif, b[0], n_slaves); + h[1] = bond_lb_l23 (vm, node, bif, b[1], n_slaves); + h[2] = bond_lb_l23 (vm, node, bif, b[2], n_slaves); + h[3] = bond_lb_l23 (vm, node, bif, b[3], n_slaves); + } + else if (lb_alg == BOND_LB_RR) + { + h[0] = bond_lb_round_robin (vm, node, bif, b[0], n_slaves); + h[1] = bond_lb_round_robin (vm, node, bif, b[1], n_slaves); + h[2] = bond_lb_round_robin (vm, node, bif, b[2], n_slaves); + h[3] = bond_lb_round_robin (vm, node, bif, b[3], n_slaves); + } + else if (lb_alg == BOND_LB_BC) + { + h[0] = bond_lb_broadcast (vm, node, bif, b[0], n_slaves); + h[1] = bond_lb_broadcast (vm, node, bif, b[1], n_slaves); + h[2] = bond_lb_broadcast (vm, node, bif, b[2], n_slaves); + h[3] = bond_lb_broadcast (vm, node, bif, b[3], n_slaves); + } + else + { + ASSERT (0); + } + + n_left -= 4; + b += 4; + h += 4; + } + + while (n_left > 0) + { + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); + + if (bif->lb == BOND_LB_L2) + h[0] = bond_lb_l2 (vm, node, bif, b[0], n_slaves); + else if (bif->lb == BOND_LB_L34) + h[0] = bond_lb_l34 (vm, node, bif, b[0], n_slaves); + else if (bif->lb == BOND_LB_L23) + h[0] = bond_lb_l23 (vm, node, bif, b[0], n_slaves); + else if (bif->lb == BOND_LB_RR) + h[0] = bond_lb_round_robin (vm, node, bif, b[0], n_slaves); + else if (bif->lb == BOND_LB_BC) + h[0] = bond_lb_broadcast (vm, node, bif, b[0], n_slaves); + else + { + ASSERT (0); + } + + n_left -= 1; + b += 1; + } } -static bond_load_balance_func_t bond_load_balance_table[] = { -#define _(v,f,s, p) { bond_load_balance_##p }, - foreach_bond_lb_algo -#undef _ -}; +static_always_inline void +bond_hash_to_port (u32 * h, u32 n_left, u32 n_slaves, int use_modulo_shortcut) +{ + u32 mask = n_slaves - 1; -static uword -bond_tx_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +#ifdef CLIB_HAVE_VEC256 + /* only lower 16 bits of hash due to single precision fp arithmetic */ + u32x8 mask8, sc8u, h8a, h8b; + f32x8 sc8f; + + if (use_modulo_shortcut) + { + mask8 = u32x8_splat (mask); + } + else + { + mask8 = u32x8_splat (0xffff); + sc8u = u32x8_splat (n_slaves); + sc8f = f32x8_from_u32x8 (sc8u); + } + + while (n_left > 16) + { + h8a = u32x8_load_unaligned (h) & mask8; + h8b = u32x8_load_unaligned (h + 8) & mask8; + + if (use_modulo_shortcut == 0) + { + h8a -= sc8u * u32x8_from_f32x8 (f32x8_from_u32x8 (h8a) / sc8f); + h8b -= sc8u * u32x8_from_f32x8 (f32x8_from_u32x8 (h8b) / sc8f); + } + + u32x8_store_unaligned (h8a, h); + u32x8_store_unaligned (h8b, h + 8); + n_left -= 16; + h += 16; + } +#endif + + while (n_left > 4) + { + if (use_modulo_shortcut) + { + h[0] &= mask; + h[1] &= mask; + h[2] &= mask; + h[3] &= mask; + } + else + { + h[0] %= n_slaves; + h[1] %= n_slaves; + h[2] %= n_slaves; + h[3] %= n_slaves; + } + n_left -= 4; + h += 4; + } + while (n_left) + { + if (use_modulo_shortcut) + h[0] &= mask; + else + h[0] %= n_slaves; + n_left -= 1; + h += 1; + } +} + +static_always_inline void +bond_update_sw_if_index (bond_per_thread_data_t * ptd, bond_if_t * bif, + u32 * bi, vlib_buffer_t ** b, u32 * data, u32 n_left, + int single_sw_if_index) +{ + u32 sw_if_index = data[0]; + u32 *h = data; + + while (n_left >= 4) + { + // Prefetch next iteration + if (n_left >= 8) + { + vlib_buffer_t **pb = b + 4; + vlib_prefetch_buffer_header (pb[0], LOAD); + vlib_prefetch_buffer_header (pb[1], LOAD); + vlib_prefetch_buffer_header (pb[2], LOAD); + vlib_prefetch_buffer_header (pb[3], LOAD); + } + + if (PREDICT_FALSE (single_sw_if_index)) + { + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index; + vnet_buffer (b[1])->sw_if_index[VLIB_TX] = sw_if_index; + vnet_buffer (b[2])->sw_if_index[VLIB_TX] = sw_if_index; + vnet_buffer (b[3])->sw_if_index[VLIB_TX] = sw_if_index; + + bond_tx_add_to_queue (ptd, 0, bi[0]); + bond_tx_add_to_queue (ptd, 0, bi[1]); + bond_tx_add_to_queue (ptd, 0, bi[2]); + bond_tx_add_to_queue (ptd, 0, bi[3]); + } + else + { + u32 sw_if_index[4]; + + sw_if_index[0] = *vec_elt_at_index (bif->active_slaves, h[0]); + sw_if_index[1] = *vec_elt_at_index (bif->active_slaves, h[1]); + sw_if_index[2] = *vec_elt_at_index (bif->active_slaves, h[2]); + sw_if_index[3] = *vec_elt_at_index (bif->active_slaves, h[3]); + + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index[0]; + vnet_buffer (b[1])->sw_if_index[VLIB_TX] = sw_if_index[1]; + vnet_buffer (b[2])->sw_if_index[VLIB_TX] = sw_if_index[2]; + vnet_buffer (b[3])->sw_if_index[VLIB_TX] = sw_if_index[3]; + + bond_tx_add_to_queue (ptd, h[0], bi[0]); + bond_tx_add_to_queue (ptd, h[1], bi[1]); + bond_tx_add_to_queue (ptd, h[2], bi[2]); + bond_tx_add_to_queue (ptd, h[3], bi[3]); + } + + bi += 4; + h += 4; + b += 4; + n_left -= 4; + } + while (n_left) + { + if (PREDICT_FALSE (single_sw_if_index)) + { + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index; + bond_tx_add_to_queue (ptd, 0, bi[0]); + } + else + { + u32 sw_if_index0 = *vec_elt_at_index (bif->active_slaves, h[0]); + + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = sw_if_index0; + bond_tx_add_to_queue (ptd, h[0], bi[0]); + } + + bi += 1; + h += 1; + b += 1; + n_left -= 1; + } +} + +static_always_inline void +bond_tx_trace (vlib_main_t * vm, vlib_node_runtime_t * node, bond_if_t * bif, + vlib_buffer_t ** b, u32 n_left, u32 * h) +{ + uword n_trace = vlib_get_trace_count (vm, node); + + while (n_trace > 0 && n_left > 0) + { + bond_packet_trace_t *t0; + ethernet_header_t *eth; + u32 next0 = 0; + + vlib_trace_buffer (vm, node, next0, b[0], 0 /* follow_chain */ ); + vlib_set_trace_count (vm, node, --n_trace); + t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0)); + eth = (ethernet_header_t *) vlib_buffer_get_current (b[0]); + t0->ethernet = *eth; + t0->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; + if (!h) + { + t0->bond_sw_if_index = *vec_elt_at_index (bif->active_slaves, 0); + } + else + { + t0->bond_sw_if_index = *vec_elt_at_index (bif->active_slaves, h[0]); + h++; + } + b++; + n_left--; + } +} + +VNET_DEVICE_CLASS_TX_FN (bond_dev_class) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { vnet_interface_output_runtime_t *rund = (void *) node->runtime_data; bond_main_t *bm = &bond_main; + u16 thread_index = vm->thread_index; bond_if_t *bif = pool_elt_at_index (bm->interfaces, rund->dev_instance); - u32 bi0, bi1, bi2, bi3; - vlib_buffer_t *b0, *b1, *b2, *b3; + uword n_slaves; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE]; u32 *from = vlib_frame_vector_args (frame); - u32 n_left_from; - ethernet_header_t *eth; - u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0; - u32 port, port1, port2, port3; - u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3; - bond_packet_trace_t *t0; - uword n_trace = vlib_get_trace_count (vm, node); - u16 thread_index = vlib_get_thread_index (); + u32 n_left = frame->n_vectors; + u32 hashes[VLIB_FRAME_SIZE], *h; vnet_main_t *vnm = vnet_get_main (); - u32 *to_next; - u32 sif_if_index, sif_if_index1, sif_if_index2, sif_if_index3; - vlib_frame_t *f; + bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data, + thread_index); + u32 p, sw_if_index; if (PREDICT_FALSE (bif->admin_up == 0)) { - vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); + vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, thread_index, bif->sw_if_index, @@ -367,278 +657,155 @@ bond_tx_fn (vlib_main_t * vm, vlib_node_runtime_t * node, return frame->n_vectors; } - clib_spinlock_lock_if_init (&bif->lockp); - if (PREDICT_FALSE (vec_len (bif->active_slaves) == 0)) + n_slaves = vec_len (bif->active_slaves); + if (PREDICT_FALSE (n_slaves == 0)) { - bi0 = from[0]; - b0 = vlib_get_buffer (vm, bi0); - vlib_increment_combined_counter - (vnet_main.interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_TX, thread_index, bif->sw_if_index, - frame->n_vectors, b0->current_length); - - vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); + vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, thread_index, bif->sw_if_index, frame->n_vectors); vlib_error_count (vm, node->node_index, BOND_TX_ERROR_NO_SLAVE, frame->n_vectors); - clib_spinlock_unlock_if_init (&bif->lockp); return frame->n_vectors; } - vec_validate_aligned (bif->per_thread_info[thread_index].frame, - vec_len (bif->active_slaves), CLIB_CACHE_LINE_BYTES); + vlib_get_buffers (vm, from, bufs, n_left); + + /* active-backup mode, ship everything to first sw if index */ + if ((bif->lb == BOND_LB_AB) || PREDICT_FALSE (n_slaves == 1)) + { + sw_if_index = *vec_elt_at_index (bif->active_slaves, 0); - /* Number of buffers / pkts */ - n_left_from = frame->n_vectors; + bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, 0); + bond_update_sw_if_index (ptd, bif, from, bufs, &sw_if_index, n_left, + /* single_sw_if_index */ 1); + goto done; + } - while (n_left_from > 0) + if (bif->lb == BOND_LB_BC) { - while (n_left_from >= 4) - { - // Prefetch next iteration - if (n_left_from >= 8) - { - vlib_buffer_t *p4, *p5, *p6, *p7; - - p4 = vlib_get_buffer (vm, from[4]); - p5 = vlib_get_buffer (vm, from[5]); - p6 = vlib_get_buffer (vm, from[6]); - p7 = vlib_get_buffer (vm, from[7]); - - vlib_prefetch_buffer_header (p4, STORE); - vlib_prefetch_buffer_header (p5, STORE); - vlib_prefetch_buffer_header (p6, STORE); - vlib_prefetch_buffer_header (p7, STORE); - - CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, LOAD); - } + sw_if_index = *vec_elt_at_index (bif->active_slaves, 0); + + bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves, + BOND_LB_BC); + bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, 0); + bond_update_sw_if_index (ptd, bif, from, bufs, &sw_if_index, n_left, + /* single_sw_if_index */ 1); + goto done; + } - bi0 = from[0]; - bi1 = from[1]; - bi2 = from[2]; - bi3 = from[3]; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - b2 = vlib_get_buffer (vm, bi2); - b3 = vlib_get_buffer (vm, bi3); - - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3); - - sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; - sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX]; - sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_TX]; - sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_TX]; - - port = - (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, - b0); - port1 = - (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, - b1); - port2 = - (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, - b2); - port3 = - (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, - b3); - - sif_if_index = *vec_elt_at_index (bif->active_slaves, port); - sif_if_index1 = *vec_elt_at_index (bif->active_slaves, port1); - sif_if_index2 = *vec_elt_at_index (bif->active_slaves, port2); - sif_if_index3 = *vec_elt_at_index (bif->active_slaves, port3); - - vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index; - vnet_buffer (b1)->sw_if_index[VLIB_TX] = sif_if_index1; - vnet_buffer (b2)->sw_if_index[VLIB_TX] = sif_if_index2; - vnet_buffer (b3)->sw_if_index[VLIB_TX] = sif_if_index3; - - if (bif->per_thread_info[thread_index].frame[port] == 0) - bif->per_thread_info[thread_index].frame[port] = - vnet_get_frame_to_sw_interface (vnm, sif_if_index); - - if (bif->per_thread_info[thread_index].frame[port1] == 0) - bif->per_thread_info[thread_index].frame[port1] = - vnet_get_frame_to_sw_interface (vnm, sif_if_index1); - - if (bif->per_thread_info[thread_index].frame[port2] == 0) - bif->per_thread_info[thread_index].frame[port2] = - vnet_get_frame_to_sw_interface (vnm, sif_if_index2); - - if (bif->per_thread_info[thread_index].frame[port3] == 0) - bif->per_thread_info[thread_index].frame[port3] = - vnet_get_frame_to_sw_interface (vnm, sif_if_index3); - - f = bif->per_thread_info[thread_index].frame[port]; - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; - to_next[0] = vlib_get_buffer_index (vm, b0); - f->n_vectors++; + if (bif->lb == BOND_LB_L2) + bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves, + BOND_LB_L2); + else if (bif->lb == BOND_LB_L34) + bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves, + BOND_LB_L34); + else if (bif->lb == BOND_LB_L23) + bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves, + BOND_LB_L23); + else if (bif->lb == BOND_LB_RR) + bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves, + BOND_LB_RR); + else + ASSERT (0); - f = bif->per_thread_info[thread_index].frame[port1]; - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; - to_next[0] = vlib_get_buffer_index (vm, b1); - f->n_vectors++; + /* calculate port out of hash */ + h = hashes; + if (BOND_MODULO_SHORTCUT (n_slaves)) + bond_hash_to_port (h, frame->n_vectors, n_slaves, 1); + else + bond_hash_to_port (h, frame->n_vectors, n_slaves, 0); - f = bif->per_thread_info[thread_index].frame[port2]; - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; - to_next[0] = vlib_get_buffer_index (vm, b2); - f->n_vectors++; + bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, h); - f = bif->per_thread_info[thread_index].frame[port3]; - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; - to_next[0] = vlib_get_buffer_index (vm, b3); - f->n_vectors++; + bond_update_sw_if_index (ptd, bif, from, bufs, hashes, frame->n_vectors, + /* single_sw_if_index */ 0); - if (PREDICT_FALSE (n_trace > 0)) - { - vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); - eth = (ethernet_header_t *) vlib_buffer_get_current (b0); - t0->ethernet = *eth; - t0->sw_if_index = sw_if_index; - t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; - - if (PREDICT_TRUE (n_trace > 0)) - { - vlib_trace_buffer (vm, node, next1, b1, - 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b1, sizeof (*t0)); - eth = (ethernet_header_t *) vlib_buffer_get_current (b1); - t0->ethernet = *eth; - t0->sw_if_index = sw_if_index1; - t0->bond_sw_if_index = - vnet_buffer (b1)->sw_if_index[VLIB_TX]; - - if (PREDICT_TRUE (n_trace > 0)) - { - vlib_trace_buffer (vm, node, next2, b2, - 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b2, sizeof (*t0)); - eth = - (ethernet_header_t *) vlib_buffer_get_current (b2); - t0->ethernet = *eth; - t0->sw_if_index = sw_if_index2; - t0->bond_sw_if_index = - vnet_buffer (b2)->sw_if_index[VLIB_TX]; - - if (PREDICT_TRUE (n_trace > 0)) - { - vlib_trace_buffer (vm, node, next3, b3, - 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b3, sizeof (*t0)); - eth = - (ethernet_header_t *) - vlib_buffer_get_current (b3); - t0->ethernet = *eth; - t0->sw_if_index = sw_if_index3; - t0->bond_sw_if_index = - vnet_buffer (b3)->sw_if_index[VLIB_TX]; - } - } - } - } - from += 4; - n_left_from -= 4; - } +done: + for (p = 0; p < n_slaves; p++) + { + vlib_frame_t *f; + u32 *to_next; - while (n_left_from > 0) + sw_if_index = *vec_elt_at_index (bif->active_slaves, p); + if (PREDICT_TRUE (ptd->per_port_queue[p].n_buffers)) { - // Prefetch next iteration - if (n_left_from > 1) - { - vlib_buffer_t *p2; - - p2 = vlib_get_buffer (vm, from[1]); - vlib_prefetch_buffer_header (p2, STORE); - CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD); - } + f = vnet_get_frame_to_sw_interface (vnm, sw_if_index); + f->n_vectors = ptd->per_port_queue[p].n_buffers; + to_next = vlib_frame_vector_args (f); + clib_memcpy_fast (to_next, ptd->per_port_queue[p].buffers, + f->n_vectors * sizeof (u32)); + vnet_put_frame_to_sw_interface (vnm, sw_if_index, f); + ptd->per_port_queue[p].n_buffers = 0; + } + } - bi0 = from[0]; - b0 = vlib_get_buffer (vm, bi0); + vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + + VNET_INTERFACE_COUNTER_TX, thread_index, + bif->sw_if_index, frame->n_vectors); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); + return frame->n_vectors; +} - sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; +static walk_rc_t +bond_active_interface_switch_cb (vnet_main_t * vnm, u32 sw_if_index, + void *arg) +{ + bond_main_t *bm = &bond_main; - port = - (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, - b0); - sif_if_index = *vec_elt_at_index (bif->active_slaves, port); - vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index; - if (bif->per_thread_info[thread_index].frame[port] == 0) - bif->per_thread_info[thread_index].frame[port] = - vnet_get_frame_to_sw_interface (vnm, sif_if_index); - f = bif->per_thread_info[thread_index].frame[port]; - to_next = vlib_frame_vector_args (f); - to_next += f->n_vectors; - to_next[0] = vlib_get_buffer_index (vm, b0); - f->n_vectors++; + send_ip4_garp (bm->vlib_main, sw_if_index); + send_ip6_na (bm->vlib_main, sw_if_index); - if (PREDICT_FALSE (n_trace > 0)) - { - vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); - eth = (ethernet_header_t *) vlib_buffer_get_current (b0); - t0->ethernet = *eth; - t0->sw_if_index = sw_if_index; - t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; - } + return (WALK_CONTINUE); +} - from += 1; - n_left_from -= 1; - } - } +static uword +bond_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + vnet_main_t *vnm = vnet_get_main (); + uword event_type, *event_data = 0; - for (port = 0; port < vec_len (bif->active_slaves); port++) + while (1) { - f = bif->per_thread_info[thread_index].frame[port]; - if (f == 0) - continue; + u32 i; + u32 hw_if_index; - sw_if_index = *vec_elt_at_index (bif->active_slaves, port); - vnet_put_frame_to_sw_interface (vnm, sw_if_index, f); - bif->per_thread_info[thread_index].frame[port] = 0; + vlib_process_wait_for_event (vm); + event_type = vlib_process_get_events (vm, &event_data); + ASSERT (event_type == BOND_SEND_GARP_NA); + for (i = 0; i < vec_len (event_data); i++) + { + hw_if_index = event_data[i]; + /* walk hw interface to process all subinterfaces */ + vnet_hw_interface_walk_sw (vnm, hw_if_index, + bond_active_interface_switch_cb, 0); + } + vec_reset_length (event_data); } - - vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters - + VNET_INTERFACE_COUNTER_TX, thread_index, - bif->sw_if_index, frame->n_vectors); - - clib_spinlock_unlock_if_init (&bif->lockp); - return frame->n_vectors; + return 0; } +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (bond_process_node) = { + .function = bond_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "bond-process", +}; +/* *INDENT-ON* */ + /* *INDENT-OFF* */ VNET_DEVICE_CLASS (bond_dev_class) = { .name = "bond", - .tx_function = bond_tx_fn, .tx_function_n_errors = BOND_TX_N_ERROR, .tx_function_error_strings = bond_tx_error_strings, .format_device_name = format_bond_interface_name, + .set_l2_mode_function = bond_set_l2_mode_function, .admin_up_down_function = bond_interface_admin_up_down, .subif_add_del_function = bond_subif_add_del_function, .format_tx_trace = format_bond_tx_trace, }; -VLIB_DEVICE_TX_FUNCTION_MULTIARCH (bond_dev_class, bond_tx_fn) /* *INDENT-ON* */ /*