#include <vnet/vnet.h>
#include <vnet/ip/icmp46_packet.h>
+#include <vnet/ethernet/packet.h>
+#include <vnet/ip/format.h>
#include <vnet/ip/ip4.h>
#include <vnet/ip/ip6.h>
#include <vnet/udp/udp_packet.h>
#include <vnet/feature/feature.h>
+#include <vnet/classify/pcap_classify.h>
+#include <vnet/hash/hash.h>
+#include <vnet/interface_output.h>
+#include <vppinfra/vector/mask_compare.h>
+#include <vppinfra/vector/compress.h>
+#include <vppinfra/vector/count_equal.h>
+#include <vppinfra/vector/array_mask.h>
typedef struct
{
u32 sw_if_index;
u32 flags;
- u16 gso_size;
- u8 gso_l4_hdr_sz;
- u8 data[128 - 3 * sizeof (u32)];
+ u8 data[128 - 2 * sizeof (u32)];
}
interface_output_trace_t;
else
{
si = vnet_get_sw_interface (vnm, t->sw_if_index);
- s =
- format (s, "%U ", format_vnet_sw_interface_name, vnm, si,
- t->flags);
- }
-#define _(bit, name, v, x) \
- if (v && (t->flags & VNET_BUFFER_F_##name)) \
- s = format (s, "%s ", v);
- foreach_vnet_buffer_flag
-#undef _
- if (t->flags & VNET_BUFFER_F_GSO)
- {
- s = format (s, "\n%Ugso_sz %d gso_l4_hdr_sz %d",
- format_white_space, indent + 2, t->gso_size,
- t->gso_l4_hdr_sz);
+ s = format (s, "%U flags 0x%08x", format_vnet_sw_interface_name, vnm,
+ si, t->flags);
}
s =
format (s, "\n%U%U", format_white_space, indent,
}
return s;
}
+#endif /* CLIB_MARCH_VARIANT */
static void
vnet_interface_output_trace (vlib_main_t * vm,
t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
t0->flags = b0->flags;
- t0->gso_size = vnet_buffer2 (b0)->gso_size;
- t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
clib_memcpy_fast (t0->data, vlib_buffer_get_current (b0),
sizeof (t0->data));
}
t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
t1->flags = b1->flags;
- t1->gso_size = vnet_buffer2 (b1)->gso_size;
- t1->gso_l4_hdr_sz = vnet_buffer2 (b1)->gso_l4_hdr_sz;
clib_memcpy_fast (t1->data, vlib_buffer_get_current (b1),
sizeof (t1->data));
}
t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
t0->flags = b0->flags;
- t0->gso_size = vnet_buffer2 (b0)->gso_size;
- t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
clib_memcpy_fast (t0->data, vlib_buffer_get_current (b0),
sizeof (t0->data));
}
}
static_always_inline void
-calc_checksums (vlib_main_t * vm, vlib_buffer_t * b)
+vnet_interface_output_handle_offload (vlib_main_t *vm, vlib_buffer_t *b)
{
- ip4_header_t *ip4;
- ip6_header_t *ip6;
- tcp_header_t *th;
- udp_header_t *uh;
+ if (b->flags & VNET_BUFFER_F_GSO)
+ return;
+ vnet_calc_checksums_inline (vm, b, b->flags & VNET_BUFFER_F_IS_IP4,
+ b->flags & VNET_BUFFER_F_IS_IP6);
+ vnet_calc_outer_checksums_inline (vm, b);
+}
- int is_ip4 = (b->flags & VNET_BUFFER_F_IS_IP4) != 0;
- int is_ip6 = (b->flags & VNET_BUFFER_F_IS_IP6) != 0;
+static_always_inline uword
+vnet_interface_output_node_inline (vlib_main_t *vm, u32 sw_if_index,
+ vlib_combined_counter_main_t *ccm,
+ vlib_buffer_t **b, void **p,
+ u32 config_index, u8 arc, u32 n_left,
+ int processing_level)
+{
+ u32 n_bytes = 0;
+ u32 n_bytes0, n_bytes1, n_bytes2, n_bytes3;
+ u32 ti = vm->thread_index;
- ASSERT (!(is_ip4 && is_ip6));
+ while (n_left >= 8)
+ {
+ u32 or_flags;
- ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
- uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
+ /* Prefetch next iteration. */
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ if (processing_level >= 1)
+ or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
+
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b[0]->current_length > 0);
+ ASSERT (b[1]->current_length > 0);
+ ASSERT (b[2]->current_length > 0);
+ ASSERT (b[3]->current_length > 0);
+
+ n_bytes += n_bytes0 = vlib_buffer_length_in_chain (vm, b[0]);
+ n_bytes += n_bytes1 = vlib_buffer_length_in_chain (vm, b[1]);
+ n_bytes += n_bytes2 = vlib_buffer_length_in_chain (vm, b[2]);
+ n_bytes += n_bytes3 = vlib_buffer_length_in_chain (vm, b[3]);
+
+ if (processing_level >= 3)
+ {
+ p[0] = vlib_buffer_get_current (b[0]);
+ p[1] = vlib_buffer_get_current (b[1]);
+ p[2] = vlib_buffer_get_current (b[2]);
+ p[3] = vlib_buffer_get_current (b[3]);
+ p += 4;
+ }
- if (is_ip4)
- {
- ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
- ip4->checksum = ip4_header_checksum (ip4);
- if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ if (processing_level >= 2)
{
- th->checksum = 0;
- th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
+ u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
+ tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ tx_swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ tx_swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
+ tx_swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
+
+ /* update vlan subif tx counts, if required */
+ if (PREDICT_FALSE (tx_swif0 != sw_if_index))
+ vlib_increment_combined_counter (ccm, ti, tx_swif0, 1, n_bytes0);
+
+ if (PREDICT_FALSE (tx_swif1 != sw_if_index))
+ vlib_increment_combined_counter (ccm, ti, tx_swif1, 1, n_bytes1);
+
+ if (PREDICT_FALSE (tx_swif2 != sw_if_index))
+ vlib_increment_combined_counter (ccm, ti, tx_swif2, 1, n_bytes2);
+
+ if (PREDICT_FALSE (tx_swif3 != sw_if_index))
+ vlib_increment_combined_counter (ccm, ti, tx_swif3, 1, n_bytes3);
+
+ if (PREDICT_FALSE (config_index != ~0))
+ {
+ vnet_buffer (b[0])->feature_arc_index = arc;
+ b[0]->current_config_index = config_index;
+ vnet_buffer (b[1])->feature_arc_index = arc;
+ b[1]->current_config_index = config_index;
+ vnet_buffer (b[2])->feature_arc_index = arc;
+ b[2]->current_config_index = config_index;
+ vnet_buffer (b[3])->feature_arc_index = arc;
+ b[3]->current_config_index = config_index;
+ }
+ }
+
+ if (processing_level >= 1 && (or_flags & VNET_BUFFER_F_OFFLOAD))
+ {
+ vnet_interface_output_handle_offload (vm, b[0]);
+ vnet_interface_output_handle_offload (vm, b[1]);
+ vnet_interface_output_handle_offload (vm, b[2]);
+ vnet_interface_output_handle_offload (vm, b[3]);
}
- if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
- uh->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
+
+ n_left -= 4;
+ b += 4;
}
- if (is_ip6)
+
+ while (n_left)
{
- int bogus;
- if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ /* Be grumpy about zero length buffers for benefit of
+ driver tx function. */
+ ASSERT (b[0]->current_length > 0);
+
+ n_bytes += n_bytes0 = vlib_buffer_length_in_chain (vm, b[0]);
+
+ if (processing_level >= 3)
{
- th->checksum = 0;
- th->checksum =
- ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
+ p[0] = vlib_buffer_get_current (b[0]);
+ p += 1;
}
- if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+
+ if (processing_level >= 2)
{
- uh->checksum = 0;
- uh->checksum =
- ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
+ u32 tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+
+ if (PREDICT_FALSE (config_index != ~0))
+ {
+ vnet_buffer (b[0])->feature_arc_index = arc;
+ b[0]->current_config_index = config_index;
+ }
+
+ if (PREDICT_FALSE (tx_swif0 != sw_if_index))
+ vlib_increment_combined_counter (ccm, ti, tx_swif0, 1, n_bytes0);
}
- }
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
-}
+ if (processing_level >= 1)
+ vnet_interface_output_handle_offload (vm, b[0]);
-static_always_inline u16
-tso_alloc_tx_bufs (vlib_main_t * vm,
- vnet_interface_per_thread_data_t * ptd,
- vlib_buffer_t * b0, u16 l4_hdr_sz)
-{
- u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
- u16 gso_size = vnet_buffer2 (b0)->gso_size;
- u16 l234_sz = vnet_buffer (b0)->l4_hdr_offset + l4_hdr_sz;
- /* rounded-up division */
- u16 n_bufs = (n_bytes_b0 - l234_sz + (gso_size - 1)) / gso_size;
- u16 n_alloc;
-
- ASSERT (n_bufs > 0);
- vec_validate (ptd->split_buffers, n_bufs - 1);
-
- n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
- if (n_alloc < n_bufs)
- {
- vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
- return 0;
+ n_left -= 1;
+ b += 1;
}
- return 1;
-}
-static_always_inline void
-tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
- u32 flags, u16 length)
-{
- nb0->current_data = 0;
- nb0->total_length_not_including_first_buffer = 0;
- nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
- clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
- clib_memcpy_fast (nb0->data, b0->data, length);
- nb0->current_length = length;
+ return n_bytes;
}
static_always_inline void
-tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
- vlib_buffer_t * b0, u16 template_data_sz,
- u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
- u32 next_tcp_seq, u32 flags)
+vnet_interface_pcap_tx_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, int in_interface_ouput)
{
- tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
-
- *p_dst_left =
- clib_min (gso_size,
- vlib_buffer_get_default_data_size (vm) - template_data_sz);
- *p_dst_ptr = nb0->data + template_data_sz;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 n_left_from, *from;
+ u32 sw_if_index = ~0, hw_if_index = ~0;
+ vnet_pcap_t *pp = &vnm->pcap;
- tcp_header_t *tcp =
- (tcp_header_t *) (nb0->data + vnet_buffer (nb0)->l4_hdr_offset);
- tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
-}
+ if (PREDICT_TRUE (pp->pcap_tx_enable == 0))
+ return;
-static_always_inline void
-tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6)
-{
- u16 l3_hdr_offset = vnet_buffer (b0)->l3_hdr_offset;
- u16 l4_hdr_offset = vnet_buffer (b0)->l4_hdr_offset;
- ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l3_hdr_offset);
- ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l3_hdr_offset);
- tcp_header_t *tcp = (tcp_header_t *) (b0->data + l4_hdr_offset);
-
- tcp->flags = tcp_flags;
-
- if (is_ip6)
- ip6->payload_length =
- clib_host_to_net_u16 (b0->current_length -
- vnet_buffer (b0)->l4_hdr_offset);
+ if (in_interface_ouput)
+ {
+ /* interface-output is called right before interface-output-template.
+ * We only want to capture packets here if there is a per-interface
+ * filter, in case it matches the sub-interface sw_if_index.
+ * If there is no per-interface filter configured, let the
+ * interface-output-template node deal with it */
+ if (pp->pcap_sw_if_index == 0)
+ return;
+ }
else
- ip4->length =
- clib_host_to_net_u16 (b0->current_length -
- vnet_buffer (b0)->l3_hdr_offset);
-}
-
-/**
- * Allocate the necessary number of ptd->split_buffers,
- * and segment the possibly chained buffer(s) from b0 into
- * there.
- *
- * Return the cumulative number of bytes sent or zero
- * if allocation failed.
- */
-
-static_always_inline u32
-tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
- int do_tx_offloads, u32 sbi0, vlib_buffer_t * sb0,
- u32 n_bytes_b0)
-{
- u32 n_tx_bytes = 0;
- int is_ip4 = sb0->flags & VNET_BUFFER_F_IS_IP4;
- int is_ip6 = sb0->flags & VNET_BUFFER_F_IS_IP6;
- ASSERT (is_ip4 || is_ip6);
- ASSERT (sb0->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
- ASSERT (sb0->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID);
- ASSERT (sb0->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
- u16 gso_size = vnet_buffer2 (sb0)->gso_size;
-
- int l4_hdr_sz = vnet_buffer2 (sb0)->gso_l4_hdr_sz;
- u8 save_tcp_flags = 0;
- u8 tcp_flags_no_fin_psh = 0;
- u32 next_tcp_seq = 0;
-
- tcp_header_t *tcp =
- (tcp_header_t *) (sb0->data + vnet_buffer (sb0)->l4_hdr_offset);
- next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
- /* store original flags for last packet and reset FIN and PSH */
- save_tcp_flags = tcp->flags;
- tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
- tcp->checksum = 0;
-
- u32 default_bflags =
- sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
- u16 l234_sz = vnet_buffer (sb0)->l4_hdr_offset + l4_hdr_sz;
- int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
- next_tcp_seq += first_data_size;
-
- if (PREDICT_FALSE (!tso_alloc_tx_bufs (vm, ptd, sb0, l4_hdr_sz)))
- return 0;
-
- vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
- tso_init_buf_from_template_base (b0, sb0, default_bflags,
- l4_hdr_sz + first_data_size);
-
- u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
- if (total_src_left)
{
- /* Need to copy more segments */
- u8 *src_ptr, *dst_ptr;
- u16 src_left, dst_left;
- /* current source buffer */
- vlib_buffer_t *csb0 = sb0;
- u32 csbi0 = sbi0;
- /* current dest buffer */
- vlib_buffer_t *cdb0;
- u16 dbi = 1; /* the buffer [0] is b0 */
-
- src_ptr = sb0->data + l234_sz + first_data_size;
- src_left = sb0->current_length - l234_sz - first_data_size;
- b0->current_length = l234_sz + first_data_size;
-
- tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6);
- if (do_tx_offloads)
- calc_checksums (vm, b0);
-
- /* grab a second buffer and prepare the loop */
- ASSERT (dbi < vec_len (ptd->split_buffers));
- cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
- tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
- &dst_left, next_tcp_seq, default_bflags);
-
- /* an arbitrary large number to catch the runaway loops */
- int nloops = 2000;
- while (total_src_left)
- {
- if (nloops-- <= 0)
- clib_panic ("infinite loop detected");
- u16 bytes_to_copy = clib_min (src_left, dst_left);
-
- clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
+ vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+ sw_if_index = rt->sw_if_index;
+ }
- src_left -= bytes_to_copy;
- src_ptr += bytes_to_copy;
- total_src_left -= bytes_to_copy;
- dst_left -= bytes_to_copy;
- dst_ptr += bytes_to_copy;
- next_tcp_seq += bytes_to_copy;
- cdb0->current_length += bytes_to_copy;
+ n_left_from = frame->n_vectors;
+ from = vlib_frame_vector_args (frame);
- if (0 == src_left)
- {
- int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
- u32 next_bi = csb0->next_buffer;
+ while (n_left_from > 0)
+ {
+ u32 bi0 = from[0];
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ from++;
+ n_left_from--;
- /* init src to the next buffer in chain */
- if (has_next)
- {
- csbi0 = next_bi;
- csb0 = vlib_get_buffer (vm, csbi0);
- src_left = csb0->current_length;
- src_ptr = csb0->data;
- }
- else
- {
- ASSERT (total_src_left == 0);
- break;
- }
- }
- if (0 == dst_left && total_src_left)
+ if (in_interface_ouput)
+ {
+ const u32 sii = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ if (PREDICT_FALSE (sii != sw_if_index))
{
- if (do_tx_offloads)
- calc_checksums (vm, cdb0);
- n_tx_bytes += cdb0->current_length;
- ASSERT (dbi < vec_len (ptd->split_buffers));
- cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
- tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
- gso_size, &dst_ptr, &dst_left,
- next_tcp_seq, default_bflags);
+ const vnet_hw_interface_t *hi =
+ vnet_get_sup_hw_interface (vnm, sii);
+ hw_if_index = hi->sw_if_index;
+ sw_if_index = sii;
}
+ if (hw_if_index == sw_if_index)
+ continue; /* defer to interface-output-template */
}
- tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6);
- if (do_tx_offloads)
- calc_checksums (vm, cdb0);
-
- n_tx_bytes += cdb0->current_length;
+ if (vnet_is_packet_pcaped (pp, b0, sw_if_index))
+ pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
}
- n_tx_bytes += b0->current_length;
- return n_tx_bytes;
}
static_always_inline void
-drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
- vlib_node_runtime_t * node, u32 * pbi0,
- u32 drop_error_code)
+hash_func_with_mask (void **p, u32 *hash, u32 n_packets, u32 *lookup_table,
+ u32 mask, vnet_hash_fn_t hf)
{
- u32 thread_index = vm->thread_index;
- vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+ u32 n_left_from = n_packets;
- vlib_simple_counter_main_t *cm;
- cm =
- vec_elt_at_index (vnm->interface_main.sw_if_counters,
- VNET_INTERFACE_COUNTER_TX_ERROR);
- vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, 1);
-
- vlib_error_drop_buffers (vm, node, pbi0,
- /* buffer stride */ 1,
- /* n_buffers */ 1,
- VNET_INTERFACE_OUTPUT_NEXT_DROP,
- node->node_index, drop_error_code);
-}
+ hf (p, hash, n_packets);
-static_always_inline uword
-vnet_interface_output_node_inline_gso (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- vnet_main_t * vnm,
- vnet_hw_interface_t * hi,
- int do_tx_offloads,
- int do_segmentation)
-{
- vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
- vnet_sw_interface_t *si;
- u32 n_left_to_tx, *from, *from_end, *to_tx;
- u32 n_bytes, n_buffers, n_packets;
- u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3;
- u32 thread_index = vm->thread_index;
- vnet_interface_main_t *im = &vnm->interface_main;
- u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
- u32 current_config_index = ~0;
- u8 arc = im->output_feature_arc_index;
- vnet_interface_per_thread_data_t *ptd =
- vec_elt_at_index (im->per_thread_data, thread_index);
-
- n_buffers = frame->n_vectors;
+ clib_array_mask_u32 (hash, mask, n_packets);
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- vnet_interface_output_trace (vm, node, frame, n_buffers);
+ while (n_left_from >= 4)
+ {
+ hash[0] = lookup_table[hash[0]];
+ hash[1] = lookup_table[hash[1]];
+ hash[2] = lookup_table[hash[2]];
+ hash[3] = lookup_table[hash[3]];
- from = vlib_frame_vector_args (frame);
+ hash += 4;
+ n_left_from -= 4;
+ }
- if (rt->is_deleted)
- return vlib_error_drop_buffers (vm, node, from,
- /* buffer stride */ 1,
- n_buffers,
- VNET_INTERFACE_OUTPUT_NEXT_DROP,
- node->node_index,
- VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
-
- si = vnet_get_sw_interface (vnm, rt->sw_if_index);
- hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
- if (!(si->flags & (VNET_SW_INTERFACE_FLAG_ADMIN_UP |
- VNET_SW_INTERFACE_FLAG_BOND_SLAVE)) ||
- !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ while (n_left_from > 0)
{
- vlib_simple_counter_main_t *cm;
+ hash[0] = lookup_table[hash[0]];
- cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
- VNET_INTERFACE_COUNTER_TX_ERROR);
- vlib_increment_simple_counter (cm, thread_index,
- rt->sw_if_index, n_buffers);
-
- return vlib_error_drop_buffers (vm, node, from,
- /* buffer stride */ 1,
- n_buffers,
- VNET_INTERFACE_OUTPUT_NEXT_DROP,
- node->node_index,
- VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
+ hash += 1;
+ n_left_from -= 1;
}
+}
- from_end = from + n_buffers;
-
- /* Total byte count of all buffers. */
- n_bytes = 0;
- n_packets = 0;
+static_always_inline void
+store_tx_frame_scalar_data (vnet_hw_if_tx_frame_t *copy_frame,
+ vnet_hw_if_tx_frame_t *tf)
+{
+ if (copy_frame)
+ clib_memcpy_fast (tf, copy_frame, sizeof (vnet_hw_if_tx_frame_t));
+}
- /* interface-output feature arc handling */
- if (PREDICT_FALSE (vnet_have_features (arc, rt->sw_if_index)))
+static_always_inline u32
+enqueue_one_to_tx_node (vlib_main_t *vm, vlib_node_runtime_t *node, u32 *ppqi,
+ u32 *from, vnet_hw_if_tx_frame_t *copy_frame,
+ u32 n_vectors, u32 n_left, u32 next_index)
+{
+ u32 tmp[VLIB_FRAME_SIZE];
+ vlib_frame_bitmap_t mask = {};
+ vlib_frame_t *f;
+ vnet_hw_if_tx_frame_t *tf;
+ u32 *to;
+ u32 n_copy = 0, n_free = 0;
+
+ f = vlib_get_next_frame_internal (vm, node, next_index, 0);
+ tf = vlib_frame_scalar_args (f);
+
+ if (f->n_vectors > 0 &&
+ (!copy_frame || (tf->queue_id == copy_frame->queue_id)))
{
- vnet_feature_config_main_t *fcm;
- fcm = vnet_feature_get_config_main (arc);
- current_config_index = vnet_get_feature_config_index (arc,
- rt->sw_if_index);
- vnet_get_config_data (&fcm->config_main, ¤t_config_index,
- &next_index, 0);
+ /* append current next frame */
+ n_free = VLIB_FRAME_SIZE - f->n_vectors;
+ /*
+ * if frame contains enough space for worst case scenario,
+ * we can avoid use of tmp
+ */
+ if (n_free >= n_left)
+ to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
+ else
+ to = tmp;
}
-
- while (from < from_end)
+ else
{
- /* Get new next frame since previous incomplete frame may have less
- than VNET_FRAME_SIZE vectors in it. */
- vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
-
- while (from + 8 <= from_end && n_left_to_tx >= 4)
+ if (f->n_vectors > 0)
{
- u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
- u32 or_flags;
-
- /* Prefetch next iteration. */
- vlib_prefetch_buffer_with_index (vm, from[4], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[5], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[6], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[7], LOAD);
-
- bi0 = from[0];
- bi1 = from[1];
- bi2 = from[2];
- bi3 = from[3];
- to_tx[0] = bi0;
- to_tx[1] = bi1;
- to_tx[2] = bi2;
- to_tx[3] = bi3;
- if (!do_segmentation)
- {
- from += 4;
- to_tx += 4;
- n_left_to_tx -= 4;
- }
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
-
- if (do_segmentation)
- {
- or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
-
- /* go to single loop if we need TSO segmentation */
- if (PREDICT_FALSE (or_flags & VNET_BUFFER_F_GSO))
- break;
- from += 4;
- to_tx += 4;
- n_left_to_tx -= 4;
- }
+ /* current frame doesn't fit - grab empty one */
+ f = vlib_get_next_frame_internal (vm, node, next_index, 1);
+ tf = vlib_frame_scalar_args (f);
+ }
- /* Be grumpy about zero length buffers for benefit of
- driver tx function. */
- ASSERT (b0->current_length > 0);
- ASSERT (b1->current_length > 0);
- ASSERT (b2->current_length > 0);
- ASSERT (b3->current_length > 0);
-
- n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
- n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
- n_bytes_b2 = vlib_buffer_length_in_chain (vm, b2);
- n_bytes_b3 = vlib_buffer_length_in_chain (vm, b3);
- tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
- tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
- tx_swif2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
- tx_swif3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
-
- n_bytes += n_bytes_b0 + n_bytes_b1;
- n_bytes += n_bytes_b2 + n_bytes_b3;
- n_packets += 4;
-
- if (PREDICT_FALSE (current_config_index != ~0))
- {
- vnet_buffer (b0)->feature_arc_index = arc;
- vnet_buffer (b1)->feature_arc_index = arc;
- vnet_buffer (b2)->feature_arc_index = arc;
- vnet_buffer (b3)->feature_arc_index = arc;
- b0->current_config_index = current_config_index;
- b1->current_config_index = current_config_index;
- b2->current_config_index = current_config_index;
- b3->current_config_index = current_config_index;
- }
+ /* empty frame - store scalar data */
+ store_tx_frame_scalar_data (copy_frame, tf);
+ to = vlib_frame_vector_args (f);
+ n_free = VLIB_FRAME_SIZE;
+ }
- /* update vlan subif tx counts, if required */
- if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
- {
- vlib_increment_combined_counter (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX,
- thread_index, tx_swif0, 1,
- n_bytes_b0);
- }
+ /*
+ * per packet queue id array
+ * compare with given queue_id, if match, copy respective buffer index from
+ * -> to
+ */
+ if (ppqi)
+ {
+ clib_mask_compare_u32 (copy_frame->queue_id, ppqi, mask, n_vectors);
+ n_copy = clib_compress_u32 (to, from, mask, n_vectors);
- if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
- {
+ if (n_copy == 0)
+ return n_left;
+ }
+ else
+ {
+ /*
+ * no work required, just copy all buffer indices from -> to
+ */
+ n_copy = n_left;
+ vlib_buffer_copy_indices (to, from, n_copy);
+ }
- vlib_increment_combined_counter (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX,
- thread_index, tx_swif1, 1,
- n_bytes_b1);
- }
+ if (to != tmp)
+ {
+ /* indices already written to frame, just close it */
+ vlib_put_next_frame (vm, node, next_index, n_free - n_copy);
+ }
+ else if (n_free >= n_copy)
+ {
+ /* enough space in the existing frame */
+ to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
+ vlib_buffer_copy_indices (to, tmp, n_copy);
+ vlib_put_next_frame (vm, node, next_index, n_free - n_copy);
+ }
+ else
+ {
+ /* full frame */
+ to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
+ vlib_buffer_copy_indices (to, tmp, n_free);
+ vlib_put_next_frame (vm, node, next_index, 0);
+
+ /* second frame */
+ u32 n_2nd_frame = n_copy - n_free;
+ f = vlib_get_next_frame_internal (vm, node, next_index, 1);
+ tf = vlib_frame_scalar_args (f);
+ /* empty frame - store scalar data */
+ store_tx_frame_scalar_data (copy_frame, tf);
+ to = vlib_frame_vector_args (f);
+ vlib_buffer_copy_indices (to, tmp + n_free, n_2nd_frame);
+ vlib_put_next_frame (vm, node, next_index,
+ VLIB_FRAME_SIZE - n_2nd_frame);
+ }
- if (PREDICT_FALSE (tx_swif2 != rt->sw_if_index))
- {
+ return n_left - n_copy;
+}
- vlib_increment_combined_counter (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX,
- thread_index, tx_swif2, 1,
- n_bytes_b2);
- }
- if (PREDICT_FALSE (tx_swif3 != rt->sw_if_index))
- {
+static_always_inline void
+enqueue_to_tx_node (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vnet_hw_interface_t *hi, u32 next_index,
+ vnet_hw_if_output_node_runtime_t *r, u32 *from, void **p,
+ u32 n_vectors)
+{
+ u32 n_left = n_vectors;
- vlib_increment_combined_counter (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX,
- thread_index, tx_swif3, 1,
- n_bytes_b3);
- }
+ ASSERT (n_vectors <= VLIB_FRAME_SIZE);
- if (!do_segmentation)
- or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
+ /*
+ * backward compatible for drivers not integrated with new tx infra.
+ */
+ if (r == 0)
+ {
+ n_left = enqueue_one_to_tx_node (vm, node, NULL, from, NULL, n_vectors,
+ n_left, next_index);
+ }
+ /*
+ * only 1 tx queue of given interface is available on given thread
+ */
+ else if (r->n_queues == 1)
+ {
+ n_left = enqueue_one_to_tx_node (vm, node, NULL, from, r->frame,
+ n_vectors, n_left, next_index);
+ }
+ /*
+ * multi tx-queues use case
+ */
+ else if (r->n_queues > 1)
+ {
+ u32 qids[VLIB_FRAME_SIZE];
- if (do_tx_offloads)
- {
- if (or_flags &
- (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_IP_CKSUM))
- {
- calc_checksums (vm, b0);
- calc_checksums (vm, b1);
- calc_checksums (vm, b2);
- calc_checksums (vm, b3);
- }
- }
- }
+ hash_func_with_mask (p, qids, n_vectors, r->lookup_table,
+ vec_len (r->lookup_table) - 1, hi->hf);
- while (from + 1 <= from_end && n_left_to_tx >= 1)
+ for (u32 i = 0; i < r->n_queues; i++)
{
- u32 bi0;
- vlib_buffer_t *b0;
- u32 tx_swif0;
+ n_left = enqueue_one_to_tx_node (vm, node, qids, from, &r->frame[i],
+ n_vectors, n_left, next_index);
+ if (n_left == 0)
+ break;
+ }
+ }
+ else
+ ASSERT (0);
+}
- bi0 = from[0];
- to_tx[0] = bi0;
- from += 1;
- to_tx += 1;
- n_left_to_tx -= 1;
+VLIB_NODE_FN (vnet_interface_output_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vlib_combined_counter_main_t *ccm;
+ vnet_hw_interface_t *hi;
+ vnet_sw_interface_t *si;
+ vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+ vnet_hw_if_output_node_runtime_t *r = 0;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
+ u32 n_bytes, n_buffers = frame->n_vectors;
+ u32 config_index = ~0;
+ u32 sw_if_index = rt->sw_if_index;
+ u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
+ u32 ti = vm->thread_index;
+ u8 arc = im->output_feature_arc_index;
+ int arc_or_subif = 0;
+ int do_tx_offloads = 0;
+ void *ptr[VLIB_FRAME_SIZE], **p = ptr;
+ u8 is_parr = 0;
+ u32 *from;
- b0 = vlib_get_buffer (vm, bi0);
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ vnet_interface_output_trace (vm, node, frame, n_buffers);
- /* Be grumpy about zero length buffers for benefit of
- driver tx function. */
- ASSERT (b0->current_length > 0);
+ from = vlib_frame_vector_args (frame);
- n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
- tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
- n_bytes += n_bytes_b0;
- n_packets += 1;
+ if (rt->is_deleted)
+ return vlib_error_drop_buffers (
+ vm, node, from,
+ /* buffer stride */ 1, n_buffers, VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index, VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
- if (PREDICT_FALSE (current_config_index != ~0))
- {
- vnet_buffer (b0)->feature_arc_index = arc;
- b0->current_config_index = current_config_index;
- }
+ vnet_interface_pcap_tx_trace (vm, node, frame, 0 /* in_interface_ouput */);
- if (do_segmentation)
- {
- if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
- {
- /*
- * Undo the enqueue of the b0 - it is not going anywhere,
- * and will be freed either after it's segmented or
- * when dropped, if there is no buffers to segment into.
- */
- to_tx -= 1;
- n_left_to_tx += 1;
- /* undo the counting. */
- n_bytes -= n_bytes_b0;
- n_packets -= 1;
-
- u32 n_tx_bytes = 0;
-
- n_tx_bytes =
- tso_segment_buffer (vm, ptd, do_tx_offloads, bi0, b0,
- n_bytes_b0);
-
- if (PREDICT_FALSE (n_tx_bytes == 0))
- {
- drop_one_buffer_and_count (vm, vnm, node, from - 1,
- VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
- continue;
- }
-
- u16 n_tx_bufs = vec_len (ptd->split_buffers);
- u32 *from_tx_seg = ptd->split_buffers;
-
- while (n_tx_bufs > 0)
- {
- if (n_tx_bufs >= n_left_to_tx)
- {
- while (n_left_to_tx > 0)
- {
- to_tx[0] = from_tx_seg[0];
- to_tx += 1;
- from_tx_seg += 1;
- n_left_to_tx -= 1;
- n_tx_bufs -= 1;
- n_packets += 1;
- }
- vlib_put_next_frame (vm, node, next_index,
- n_left_to_tx);
- vlib_get_new_next_frame (vm, node, next_index,
- to_tx, n_left_to_tx);
- }
- else
- {
- while (n_tx_bufs > 0)
- {
- to_tx[0] = from_tx_seg[0];
- to_tx += 1;
- from_tx_seg += 1;
- n_left_to_tx -= 1;
- n_tx_bufs -= 1;
- n_packets += 1;
- }
- }
- }
- n_bytes += n_tx_bytes;
- if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
- {
-
- vlib_increment_combined_counter
- (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX, thread_index, tx_swif0,
- _vec_len (ptd->split_buffers), n_tx_bytes);
- }
- /* The buffers were enqueued. Reset the length */
- _vec_len (ptd->split_buffers) = 0;
- /* Free the now segmented buffer */
- vlib_buffer_free_one (vm, bi0);
- continue;
- }
- }
+ vlib_get_buffers (vm, from, bufs, n_buffers);
- if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
- {
+ si = vnet_get_sw_interface (vnm, sw_if_index);
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
- vlib_increment_combined_counter (im->combined_sw_if_counters +
- VNET_INTERFACE_COUNTER_TX,
- thread_index, tx_swif0, 1,
- n_bytes_b0);
- }
+ if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
+ !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+ {
+ vlib_simple_counter_main_t *cm;
- if (do_tx_offloads)
- calc_checksums (vm, b0);
- }
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+ vlib_increment_simple_counter (cm, ti, sw_if_index, n_buffers);
- vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
+ return vlib_error_drop_buffers (
+ vm, node, from,
+ /* buffer stride */ 1, n_buffers, VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index, VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
}
- /* Update main interface stats. */
- vlib_increment_combined_counter (im->combined_sw_if_counters
- + VNET_INTERFACE_COUNTER_TX,
- thread_index,
- rt->sw_if_index, n_packets, n_bytes);
- return n_buffers;
-}
-#endif /* CLIB_MARCH_VARIANT */
-
-static_always_inline void vnet_interface_pcap_tx_trace
- (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame,
- int sw_if_index_from_buffer)
-{
- u32 n_left_from, *from;
- u32 sw_if_index;
+ if (hi->output_node_thread_runtimes)
+ r = vec_elt_at_index (hi->output_node_thread_runtimes, vm->thread_index);
- if (PREDICT_TRUE (vm->pcap[VLIB_TX].pcap_enable == 0))
- return;
+ if (r)
+ {
+ /*
+ * tx queue of given interface is not available on given thread
+ */
+ if (r->n_queues == 0)
+ return vlib_error_drop_buffers (
+ vm, node, from,
+ /* buffer stride */ 1, n_buffers, VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index, VNET_INTERFACE_OUTPUT_ERROR_NO_TX_QUEUE);
+ /*
+ * multiple tx queues available on given thread
+ */
+ else if (r->n_queues > 1)
+ /* construct array of pointer */
+ is_parr = 1;
+ }
- if (sw_if_index_from_buffer == 0)
+ /* interface-output feature arc handling */
+ if (PREDICT_FALSE (vnet_have_features (arc, sw_if_index)))
{
- vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
- sw_if_index = rt->sw_if_index;
+ vnet_feature_config_main_t *fcm;
+ fcm = vnet_feature_get_config_main (arc);
+ config_index = vnet_get_feature_config_index (arc, sw_if_index);
+ vnet_get_config_data (&fcm->config_main, &config_index, &next_index, 0);
+ arc_or_subif = 1;
}
+ else if (hash_elts (hi->sub_interface_sw_if_index_by_id))
+ arc_or_subif = 1;
+
+ ccm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
+
+ /* if not all three flags IP4_,TCP_,UDP_CKSUM set, do compute them
+ * here before sending to the interface */
+ if ((hi->caps & VNET_HW_IF_CAP_TX_CKSUM) != VNET_HW_IF_CAP_TX_CKSUM)
+ do_tx_offloads = 1;
+
+ // basic processing
+ if (do_tx_offloads == 0 && arc_or_subif == 0 && is_parr == 0)
+ n_bytes = vnet_interface_output_node_inline (
+ vm, sw_if_index, ccm, bufs, NULL, config_index, arc, n_buffers, 0);
+ // basic processing + tx offloads
+ else if (do_tx_offloads == 1 && arc_or_subif == 0 && is_parr == 0)
+ n_bytes = vnet_interface_output_node_inline (
+ vm, sw_if_index, ccm, bufs, NULL, config_index, arc, n_buffers, 1);
+ // basic processing + tx offloads + vlans + arcs
+ else if (do_tx_offloads == 1 && arc_or_subif == 1 && is_parr == 0)
+ n_bytes = vnet_interface_output_node_inline (
+ vm, sw_if_index, ccm, bufs, NULL, config_index, arc, n_buffers, 2);
+ // basic processing + tx offloads + vlans + arcs + multi-txqs
else
- sw_if_index = ~0;
+ n_bytes = vnet_interface_output_node_inline (
+ vm, sw_if_index, ccm, bufs, p, config_index, arc, n_buffers, 3);
- n_left_from = frame->n_vectors;
from = vlib_frame_vector_args (frame);
-
- while (n_left_from > 0)
+ if (PREDICT_TRUE (next_index == VNET_INTERFACE_OUTPUT_NEXT_TX))
{
- u32 bi0 = from[0];
- vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
-
- if (sw_if_index_from_buffer)
- sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
-
- if (vm->pcap[VLIB_TX].pcap_sw_if_index == 0 ||
- vm->pcap[VLIB_TX].pcap_sw_if_index == sw_if_index)
- pcap_add_buffer (&vm->pcap[VLIB_TX].pcap_main, vm, bi0, 512);
- from++;
- n_left_from--;
+ enqueue_to_tx_node (vm, node, hi, next_index, r, from, ptr,
+ frame->n_vectors);
}
-}
-
-#ifndef CLIB_MARCH_VARIANT
-static_always_inline uword
-vnet_interface_output_node_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, vnet_main_t * vnm,
- vnet_hw_interface_t * hi,
- int do_tx_offloads)
-{
- vnet_interface_pcap_tx_trace (vm, node, frame,
- 0 /* sw_if_index_from_buffer */ );
-
- /*
- * The 3-headed "if" is here because we want to err on the side
- * of not impacting the non-GSO performance - so for the more
- * common case of no GSO interfaces we want to prevent the
- * segmentation codepath from being there altogether.
- */
- if (PREDICT_TRUE (vnm->interface_main.gso_interface_count == 0))
- return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
- do_tx_offloads,
- /* do_segmentation */ 0);
- else if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
- return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
- do_tx_offloads,
- /* do_segmentation */ 0);
else
- return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
- do_tx_offloads,
- /* do_segmentation */ 1);
-}
-
-uword
-vnet_interface_output_node (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- vnet_main_t *vnm = vnet_get_main ();
- vnet_hw_interface_t *hi;
- vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
- hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
-
- vnet_interface_pcap_tx_trace (vm, node, frame,
- 0 /* sw_if_index_from_buffer */ );
+ {
+ vlib_buffer_enqueue_to_single_next (vm, node, from, next_index,
+ frame->n_vectors);
+ }
- if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
- return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
- /* do_tx_offloads */ 0);
- else
- return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
- /* do_tx_offloads */ 1);
+ /* Update main interface stats. */
+ vlib_increment_combined_counter (ccm, ti, sw_if_index, n_buffers, n_bytes);
+ return n_buffers;
}
-#endif /* CLIB_MARCH_VARIANT */
+
+VLIB_REGISTER_NODE (vnet_interface_output_node) = {
+ .name = "interface-output-template",
+ .vector_size = sizeof (u32),
+};
/* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
VLIB_NODE_FN (vnet_per_buffer_interface_output_node) (vlib_main_t * vm,
u32 n_left_to_next, *from, *to_next;
u32 n_left_from, next_index;
- vnet_interface_pcap_tx_trace (vm, node, frame,
- 1 /* sw_if_index_from_buffer */ );
+ vnet_interface_pcap_tx_trace (vm, node, frame, 1 /* in_interface_ouput */);
n_left_from = frame->n_vectors;
typedef struct vnet_error_trace_t_
{
u32 sw_if_index;
+ i8 details_valid;
+ u8 is_ip6;
+ u8 pad[2];
+ u16 mactype;
+ ip46_address_t src, dst;
} vnet_error_trace_t;
-
static u8 *
format_vnet_error_trace (u8 * s, va_list * va)
{
CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
vnet_error_trace_t *t = va_arg (*va, vnet_error_trace_t *);
- s = format (s, "rx:%U", format_vnet_sw_if_index_name,
- vnet_get_main (), t->sw_if_index);
-
+ /* Normal, non-catchup trace */
+ if (t->details_valid == 0)
+ {
+ s = format (s, "rx:%U", format_vnet_sw_if_index_name,
+ vnet_get_main (), t->sw_if_index);
+ }
+ else if (t->details_valid == 1)
+ {
+ /* The trace capture code didn't understant the mactype */
+ s = format (s, "mactype 0x%4x (not decoded)", t->mactype);
+ }
+ else if (t->details_valid == 2)
+ {
+ /* Dump the src/dst addresses */
+ if (t->is_ip6 == 0)
+ s = format (s, "IP4: %U -> %U",
+ format_ip4_address, &t->src.ip4,
+ format_ip4_address, &t->dst.ip4);
+ else
+ s = format (s, "IP6: %U -> %U",
+ format_ip6_address, &t->src.ip6,
+ format_ip6_address, &t->dst.ip6);
+ }
return s;
}
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
- t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0 = vlib_add_trace (vm, node, b0,
+ STRUCT_OFFSET_OF (vnet_error_trace_t, pad));
t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t0->details_valid = 0;
}
if (b1->flags & VLIB_BUFFER_IS_TRACED)
{
- t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
+ t1 = vlib_add_trace (vm, node, b1,
+ STRUCT_OFFSET_OF (vnet_error_trace_t, pad));
t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ t1->details_valid = 0;
}
buffers += 2;
n_left -= 2;
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
- t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0 = vlib_add_trace (vm, node, b0,
+ STRUCT_OFFSET_OF (vnet_error_trace_t, pad));
t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t0->details_valid = 0;
}
buffers += 1;
n_left -= 1;
VNET_ERROR_N_DISPOSITION,
} vnet_error_disposition_t;
+static void
+drop_catchup_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_buffer_t * b)
+{
+ /* Can we safely rewind the buffer? If not, fagedaboudit */
+ if (b->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID)
+ {
+ vnet_error_trace_t *t;
+ ip4_header_t *ip4;
+ ip6_header_t *ip6;
+ ethernet_header_t *eh;
+ i16 delta;
+
+ t = vlib_add_trace (vm, node, b, sizeof (*t));
+ delta = vnet_buffer (b)->l2_hdr_offset - b->current_data;
+ vlib_buffer_advance (b, delta);
+
+ eh = vlib_buffer_get_current (b);
+ /* Save mactype */
+ t->mactype = clib_net_to_host_u16 (eh->type);
+ t->details_valid = 1;
+ switch (t->mactype)
+ {
+ case ETHERNET_TYPE_IP4:
+ ip4 = (void *) (eh + 1);
+ t->details_valid = 2;
+ t->is_ip6 = 0;
+ t->src.ip4.as_u32 = ip4->src_address.as_u32;
+ t->dst.ip4.as_u32 = ip4->dst_address.as_u32;
+ break;
+
+ case ETHERNET_TYPE_IP6:
+ ip6 = (void *) (eh + 1);
+ t->details_valid = 2;
+ t->is_ip6 = 1;
+ clib_memcpy_fast (t->src.as_u8, ip6->src_address.as_u8,
+ sizeof (ip6_address_t));
+ clib_memcpy_fast (t->dst.as_u8, ip6->dst_address.as_u8,
+ sizeof (ip6_address_t));
+ break;
+
+ default:
+ /* Dunno, do nothing, leave details_valid alone */
+ break;
+ }
+ /* Restore current data (probably unnecessary) */
+ vlib_buffer_advance (b, -delta);
+ }
+}
+
static_always_inline uword
interface_drop_punt (vlib_main_t * vm,
vlib_node_runtime_t * node,
u32 sw_if_indices[VLIB_FRAME_SIZE];
vlib_simple_counter_main_t *cm;
u16 nexts[VLIB_FRAME_SIZE];
+ u32 n_trace;
vnet_main_t *vnm;
vnm = vnet_get_main ();
vlib_get_buffers (vm, from, bufs, n_left);
+ /* "trace add error-drop NNN?" */
+ if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
+ {
+ /* If pkts aren't otherwise traced... */
+ if ((node->flags & VLIB_NODE_FLAG_TRACE) == 0)
+ {
+ /* Trace them from here */
+ node->flags |= VLIB_NODE_FLAG_TRACE;
+ while (n_trace && n_left)
+ {
+ if (PREDICT_TRUE
+ (vlib_trace_buffer (vm, node, 0 /* next_index */ , b[0],
+ 0 /* follow chain */ )))
+ {
+ /*
+ * Here we have a wireshark dissector problem.
+ * Packets may be well-formed, or not. We
+ * must not blow chunks in any case.
+ *
+ * Try to produce trace records which will help
+ * folks understand what's going on.
+ */
+ drop_catchup_trace (vm, node, b[0]);
+ n_trace--;
+ }
+ n_left--;
+ b++;
+ }
+ }
+
+ vlib_set_trace_count (vm, node, n_trace);
+ b = bufs;
+ n_left = frame->n_vectors;
+ }
+
if (node->flags & VLIB_NODE_FLAG_TRACE)
interface_trace_buffers (vm, node, frame);
static inline void
pcap_drop_trace (vlib_main_t * vm,
- vnet_interface_main_t * im, vlib_frame_t * f)
+ vnet_interface_main_t * im,
+ vnet_pcap_t * pp, vlib_frame_t * f)
{
u32 *from;
u32 n_left = f->n_vectors;
u32 bi0;
i16 save_current_data;
u16 save_current_length;
+ vlib_error_main_t *em = &vm->error_main;
from = vlib_frame_vector_args (f);
&& hash_get (im->pcap_drop_filter_hash, b0->error))
continue;
+ if (!vnet_is_packet_pcaped (pp, b0, ~0))
+ continue; /* not matching, skip */
+
/* Trace all drops, or drops received on a specific interface */
- if (im->pcap_sw_if_index == 0 ||
- im->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
+ save_current_data = b0->current_data;
+ save_current_length = b0->current_length;
+
+ /*
+ * Typically, we'll need to rewind the buffer
+ * if l2_hdr_offset is valid, make sure to rewind to the start of
+ * the L2 header. This may not be the buffer start in case we pop-ed
+ * vlan tags.
+ * Otherwise, rewind to buffer start and hope for the best.
+ */
+ if (b0->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID)
{
- save_current_data = b0->current_data;
- save_current_length = b0->current_length;
-
- /*
- * Typically, we'll need to rewind the buffer
- */
- if (b0->current_data > 0)
- vlib_buffer_advance (b0, (word) - b0->current_data);
-
- pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
-
- b0->current_data = save_current_data;
- b0->current_length = save_current_length;
+ if (b0->current_data > vnet_buffer (b0)->l2_hdr_offset)
+ vlib_buffer_advance (b0, vnet_buffer (b0)->l2_hdr_offset -
+ b0->current_data);
}
+ else if (b0->current_data > 0)
+ {
+ vlib_buffer_advance (b0, (word) -b0->current_data);
+ }
+
+ {
+ vlib_buffer_t *last = b0;
+ u32 error_node_index;
+ int drop_string_len;
+ vlib_node_t *n;
+ /* Length of the error string */
+ int error_string_len =
+ clib_strnlen (em->counters_heap[b0->error].name, 128);
+
+ /* Dig up the drop node */
+ error_node_index = vm->node_main.node_by_error[b0->error];
+ n = vlib_get_node (vm, error_node_index);
+
+ /* Length of full drop string, w/ "nodename: " prepended */
+ drop_string_len = error_string_len + vec_len (n->name) + 2;
+
+ /* Find the last buffer in the chain */
+ while (last->flags & VLIB_BUFFER_NEXT_PRESENT)
+ last = vlib_get_buffer (vm, last->next_buffer);
+
+ /*
+ * Append <nodename>: <error-string> to the capture,
+ * only if we can do that without allocating a new buffer.
+ */
+ if (PREDICT_TRUE ((last->current_data + last->current_length) <
+ (VLIB_BUFFER_DEFAULT_DATA_SIZE - drop_string_len)))
+ {
+ clib_memcpy_fast (last->data + last->current_data +
+ last->current_length,
+ n->name, vec_len (n->name));
+ clib_memcpy_fast (last->data + last->current_data +
+ last->current_length + vec_len (n->name),
+ ": ", 2);
+ clib_memcpy_fast (last->data + last->current_data +
+ last->current_length + vec_len (n->name) + 2,
+ em->counters_heap[b0->error].name,
+ error_string_len);
+ last->current_length += drop_string_len;
+ b0->flags &= ~(VLIB_BUFFER_TOTAL_LENGTH_VALID);
+ pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
+ last->current_length -= drop_string_len;
+ b0->current_data = save_current_data;
+ b0->current_length = save_current_length;
+ continue;
+ }
+ }
+
+ /*
+ * Didn't have space in the last buffer, here's the dropped
+ * packet as-is
+ */
+ pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
+
+ b0->current_data = save_current_data;
+ b0->current_length = save_current_length;
}
}
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
+ vnet_main_t *vnm = vnet_get_main ();
vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
+ vnet_pcap_t *pp = &vnm->pcap;
- if (PREDICT_FALSE (im->drop_pcap_enable))
- pcap_drop_trace (vm, im, frame);
+ if (PREDICT_FALSE (pp->pcap_drop_enable))
+ pcap_drop_trace (vm, im, pp, frame);
return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
}
return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (interface_drop) = {
.name = "error-drop",
.vector_size = sizeof (u32),
.format_trace = format_vnet_error_trace,
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
.n_next_nodes = 1,
.next_nodes = {
[0] = "drop",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (interface_punt) = {
.name = "error-punt",
.vector_size = sizeof (u32),
.format_trace = format_vnet_error_trace,
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
.n_next_nodes = 1,
.next_nodes = {
[0] = "punt",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node) = {
.name = "interface-output",
.vector_size = sizeof (u32),
};
-/* *INDENT-ON* */
-static uword
-interface_tx_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (vnet_interface_output_arc_end_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
vnet_main_t *vnm = vnet_get_main ();
- u32 last_sw_if_index = ~0;
- vlib_frame_t *to_frame = 0;
- vnet_hw_interface_t *hw = 0;
- u32 *from, *to_next = 0;
- u32 n_left_from;
-
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
- while (n_left_from > 0)
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_hw_interface_t *hi;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index = sw_if_indices;
+ vlib_frame_bitmap_t used_elts = {}, mask = {};
+ u32 *tmp, *from, n_left, n_comp, n_p_comp, swif, off;
+ u16 next_index;
+ void *ptr[VLIB_FRAME_SIZE], **p = ptr;
+
+ from = vlib_frame_vector_args (frame);
+ n_left = frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left);
+
+ while (n_left >= 8)
{
- u32 bi0;
- vlib_buffer_t *b0;
- u32 sw_if_index0;
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ p[0] = vlib_buffer_get_current (b[0]);
+ p[1] = vlib_buffer_get_current (b[1]);
+ p[2] = vlib_buffer_get_current (b[2]);
+ p[3] = vlib_buffer_get_current (b[3]);
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
+ sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
+
+ p += 4;
+ b += 4;
+ sw_if_index += 4;
+ n_left -= 4;
+ }
- bi0 = from[0];
- from++;
- n_left_from--;
- b0 = vlib_get_buffer (vm, bi0);
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ while (n_left)
+ {
+ p[0] = vlib_buffer_get_current (b[0]);
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ p++;
+ b++;
+ sw_if_index++;
+ n_left--;
+ }
+
+ n_left = frame->n_vectors;
+ swif = sw_if_indices[0];
+ off = 0;
- if (PREDICT_FALSE ((last_sw_if_index != sw_if_index0) || to_frame == 0))
+ /* a bit ugly but it allows us to reuse stack space for temporary store
+ * which may also improve memory latency */
+ tmp = (u32 *) bufs;
+
+more:
+ next_index = vec_elt (im->if_out_arc_end_next_index_by_sw_if_index, swif);
+ hi = vnet_get_sup_hw_interface (vnm, swif);
+ vnet_hw_if_output_node_runtime_t *r = 0;
+ void *ptr_tmp[VLIB_FRAME_SIZE], **p_tmp = ptr_tmp;
+
+ if (hi->output_node_thread_runtimes)
+ r = vec_elt_at_index (hi->output_node_thread_runtimes, vm->thread_index);
+
+ /* compare and compress based on comparison mask */
+ clib_mask_compare_u32 (swif, sw_if_indices, mask, frame->n_vectors);
+ n_comp = clib_compress_u32 (tmp, from, mask, frame->n_vectors);
+
+ /*
+ * tx queue of given interface is not available on given thread
+ */
+ if (r)
+ {
+ if (r->n_queues == 0)
{
- if (to_frame)
- {
- hw = vnet_get_sup_hw_interface (vnm, last_sw_if_index);
- vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
- }
- last_sw_if_index = sw_if_index0;
- hw = vnet_get_sup_hw_interface (vnm, sw_if_index0);
- to_frame = vlib_get_frame_to_node (vm, hw->tx_node_index);
- to_next = vlib_frame_vector_args (to_frame);
+ vlib_error_drop_buffers (
+ vm, node, tmp,
+ /* buffer stride */ 1, n_comp, VNET_INTERFACE_OUTPUT_NEXT_DROP,
+ node->node_index, VNET_INTERFACE_OUTPUT_ERROR_NO_TX_QUEUE);
+ goto drop;
}
+ else if (r->n_queues > 1)
+ {
+ n_p_comp = clib_compress_u64 ((u64 *) p_tmp, (u64 *) ptr, mask,
+ frame->n_vectors);
+ ASSERT (n_p_comp == n_comp);
+ }
+ }
- to_next[0] = bi0;
- to_next++;
- to_frame->n_vectors++;
+ enqueue_to_tx_node (vm, node, hi, next_index, r, tmp, ptr_tmp, n_comp);
+
+drop:
+ n_left -= n_comp;
+ if (n_left)
+ {
+ /* store comparison mask so we can find next unused element */
+ vlib_frame_bitmap_or (used_elts, mask);
+
+ /* fine first unused sw_if_index by scanning trough used_elts bitmap */
+ while (PREDICT_FALSE (used_elts[off] == ~0))
+ off++;
+
+ swif =
+ sw_if_indices[(off << 6) + count_trailing_zeros (~used_elts[off])];
+ goto more;
}
- vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
- return from_frame->n_vectors;
+
+ return frame->n_vectors;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (interface_tx, static) = {
- .function = interface_tx_node_fn,
- .name = "interface-tx",
+VLIB_REGISTER_NODE (vnet_interface_output_arc_end_node) = {
+ .name = "interface-output-arc-end",
.vector_size = sizeof (u32),
.n_next_nodes = 1,
.next_nodes = {
},
};
-VNET_FEATURE_ARC_INIT (interface_output, static) =
-{
- .arc_name = "interface-output",
+VNET_FEATURE_ARC_INIT (interface_output, static) = {
+ .arc_name = "interface-output",
.start_nodes = VNET_FEATURES (0),
- .last_in_arc = "interface-tx",
+ .last_in_arc = "interface-output-arc-end",
.arc_index_ptr = &vnet_main.interface_main.output_feature_arc_index,
};
VNET_FEATURE_INIT (span_tx, static) = {
.arc_name = "interface-output",
.node_name = "span-output",
- .runs_before = VNET_FEATURES ("interface-tx"),
+ .runs_before = VNET_FEATURES ("interface-output-arc-end"),
};
VNET_FEATURE_INIT (ipsec_if_tx, static) = {
.arc_name = "interface-output",
.node_name = "ipsec-if-output",
- .runs_before = VNET_FEATURES ("interface-tx"),
+ .runs_before = VNET_FEATURES ("interface-output-arc-end"),
};
-VNET_FEATURE_INIT (interface_tx, static) = {
+VNET_FEATURE_INIT (interface_output_arc_end, static) = {
.arc_name = "interface-output",
- .node_name = "interface-tx",
+ .node_name = "interface-output-arc-end",
.runs_before = 0,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
clib_error_t *
}
#endif /* CLIB_MARCH_VARIANT */
-static clib_error_t *
-pcap_drop_trace_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- vnet_main_t *vnm = vnet_get_main ();
- vnet_interface_main_t *im = &vnm->interface_main;
- u8 *filename;
- u32 max;
- int matched = 0;
- clib_error_t *error = 0;
-
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (input, "on"))
- {
- if (im->drop_pcap_enable == 0)
- {
- if (im->pcap_filename == 0)
- im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
-
- clib_memset (&im->pcap_main, 0, sizeof (im->pcap_main));
- im->pcap_main.file_name = (char *) im->pcap_filename;
- im->pcap_main.n_packets_to_capture = 100;
- if (im->pcap_pkts_to_capture)
- im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
-
- im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
- im->drop_pcap_enable = 1;
- matched = 1;
- vlib_cli_output (vm, "pcap drop capture on...");
- }
- else
- {
- vlib_cli_output (vm, "pcap drop capture already on...");
- }
- matched = 1;
- }
- else if (unformat (input, "off"))
- {
- matched = 1;
-
- if (im->drop_pcap_enable)
- {
- vlib_cli_output (vm, "captured %d pkts...",
- im->pcap_main.n_packets_captured);
- if (im->pcap_main.n_packets_captured)
- {
- im->pcap_main.n_packets_to_capture =
- im->pcap_main.n_packets_captured;
- error = pcap_write (&im->pcap_main);
- if (error)
- clib_error_report (error);
- else
- vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
- }
- }
- else
- {
- vlib_cli_output (vm, "pcap drop capture already off...");
- }
-
- im->drop_pcap_enable = 0;
- }
- else if (unformat (input, "max %d", &max))
- {
- im->pcap_pkts_to_capture = max;
- matched = 1;
- }
-
- else if (unformat (input, "intfc %U",
- unformat_vnet_sw_interface, vnm,
- &im->pcap_sw_if_index))
- matched = 1;
- else if (unformat (input, "intfc any"))
- {
- im->pcap_sw_if_index = 0;
- matched = 1;
- }
- else if (unformat (input, "file %s", &filename))
- {
- u8 *chroot_filename;
- /* Brain-police user path input */
- if (strstr ((char *) filename, "..")
- || index ((char *) filename, '/'))
- {
- vlib_cli_output (vm, "illegal characters in filename '%s'",
- filename);
- continue;
- }
-
- chroot_filename = format (0, "/tmp/%s%c", filename, 0);
- vec_free (filename);
-
- if (im->pcap_filename)
- vec_free (im->pcap_filename);
- im->pcap_filename = chroot_filename;
- im->pcap_main.file_name = (char *) im->pcap_filename;
- matched = 1;
- }
- else if (unformat (input, "status"))
- {
- if (im->drop_pcap_enable == 0)
- {
- vlib_cli_output (vm, "pcap drop capture is off...");
- continue;
- }
-
- vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
- im->pcap_main.n_packets_captured,
- im->pcap_main.n_packets_to_capture);
- matched = 1;
- }
-
- else
- break;
- }
-
- if (matched == 0)
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
-
- return 0;
-}
-
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (pcap_trace_command, static) = {
- .path = "pcap drop trace",
- .short_help =
- "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
- .function = pcap_drop_trace_command_fn,
-};
-/* *INDENT-ON* */
-
/*
* fd.io coding-style-patch-verification: ON
*