#include <vppinfra/error.h>
#include <flowprobe/flowprobe.h>
#include <vnet/ip/ip6_packet.h>
+#include <vlibmemory/api.h>
static void flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e);
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
flowprobe_trace_t *t = va_arg (*args, flowprobe_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s,
"FLOWPROBE[%s]: rx_sw_if_index %d, tx_sw_if_index %d, "
return which;
}
+/*
+ * NTP rfc868 : 2 208 988 800 corresponds to 00:00 1 Jan 1970 GMT
+ */
+#define NTP_TIMESTAMP 2208988800LU
+
static inline u32
flowprobe_common_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
{
/* Ingress interface */
u32 rx_if = clib_host_to_net_u32 (e->key.rx_sw_if_index);
- clib_memcpy (to_b->data + offset, &rx_if, sizeof (rx_if));
+ clib_memcpy_fast (to_b->data + offset, &rx_if, sizeof (rx_if));
offset += sizeof (rx_if);
/* Egress interface */
u32 tx_if = clib_host_to_net_u32 (e->key.tx_sw_if_index);
- clib_memcpy (to_b->data + offset, &tx_if, sizeof (tx_if));
+ clib_memcpy_fast (to_b->data + offset, &tx_if, sizeof (tx_if));
offset += sizeof (tx_if);
/* packet delta count */
u64 packetdelta = clib_host_to_net_u64 (e->packetcount);
- clib_memcpy (to_b->data + offset, &packetdelta, sizeof (u64));
+ clib_memcpy_fast (to_b->data + offset, &packetdelta, sizeof (u64));
offset += sizeof (u64);
+ /* flowStartNanoseconds */
+ u32 t = clib_host_to_net_u32 (e->flow_start.sec + NTP_TIMESTAMP);
+ clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
+ offset += sizeof (u32);
+ t = clib_host_to_net_u32 (e->flow_start.nsec);
+ clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
+ offset += sizeof (u32);
+
+ /* flowEndNanoseconds */
+ t = clib_host_to_net_u32 (e->flow_end.sec + NTP_TIMESTAMP);
+ clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
+ offset += sizeof (u32);
+ t = clib_host_to_net_u32 (e->flow_end.nsec);
+ clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
+ offset += sizeof (u32);
+
return offset - start;
}
u16 start = offset;
/* src mac address */
- clib_memcpy (to_b->data + offset, &e->key.src_mac, 6);
+ clib_memcpy_fast (to_b->data + offset, &e->key.src_mac, 6);
offset += 6;
/* dst mac address */
- clib_memcpy (to_b->data + offset, &e->key.dst_mac, 6);
+ clib_memcpy_fast (to_b->data + offset, &e->key.dst_mac, 6);
offset += 6;
/* ethertype */
- clib_memcpy (to_b->data + offset, &e->key.ethertype, 2);
+ clib_memcpy_fast (to_b->data + offset, &e->key.ethertype, 2);
offset += 2;
return offset - start;
u16 start = offset;
/* ip6 src address */
- clib_memcpy (to_b->data + offset, &e->key.src_address,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (to_b->data + offset, &e->key.src_address,
+ sizeof (ip6_address_t));
offset += sizeof (ip6_address_t);
/* ip6 dst address */
- clib_memcpy (to_b->data + offset, &e->key.dst_address,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (to_b->data + offset, &e->key.dst_address,
+ sizeof (ip6_address_t));
offset += sizeof (ip6_address_t);
/* Protocol */
/* octetDeltaCount */
u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
- clib_memcpy (to_b->data + offset, &octetdelta, sizeof (u64));
+ clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
offset += sizeof (u64);
return offset - start;
u16 start = offset;
/* ip4 src address */
- clib_memcpy (to_b->data + offset, &e->key.src_address.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (to_b->data + offset, &e->key.src_address.ip4,
+ sizeof (ip4_address_t));
offset += sizeof (ip4_address_t);
/* ip4 dst address */
- clib_memcpy (to_b->data + offset, &e->key.dst_address.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (to_b->data + offset, &e->key.dst_address.ip4,
+ sizeof (ip4_address_t));
offset += sizeof (ip4_address_t);
/* Protocol */
/* octetDeltaCount */
u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
- clib_memcpy (to_b->data + offset, &octetdelta, sizeof (u64));
+ clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
offset += sizeof (u64);
return offset - start;
u16 start = offset;
/* src port */
- clib_memcpy (to_b->data + offset, &e->key.src_port, 2);
+ clib_memcpy_fast (to_b->data + offset, &e->key.src_port, 2);
offset += 2;
/* dst port */
- clib_memcpy (to_b->data + offset, &e->key.dst_port, 2);
+ clib_memcpy_fast (to_b->data + offset, &e->key.dst_port, 2);
+ offset += 2;
+
+ /* tcp control bits */
+ u16 control_bits = htons (e->prot.tcp.flags);
+ clib_memcpy_fast (to_b->data + offset, &control_bits, 2);
offset += 2;
return offset - start;
u32 h = 0;
#ifdef clib_crc32c_uses_intrinsics
- h = clib_crc32c ((u8 *) k->as_u32, FLOWPROBE_KEY_IN_U32);
+ h = clib_crc32c ((u8 *) k, sizeof (*k));
#else
- u64 tmp =
- k->as_u32[0] ^ k->as_u32[1] ^ k->as_u32[2] ^ k->as_u32[3] ^ k->as_u32[4];
+ int i;
+ u64 tmp = 0;
+ for (i = 0; i < sizeof (*k) / 8; i++)
+ tmp ^= ((u64 *) k)[i];
+
h = clib_xxhash (tmp);
#endif
static inline void
add_to_flow_record_state (vlib_main_t * vm, vlib_node_runtime_t * node,
flowprobe_main_t * fm, vlib_buffer_t * b,
- u64 timestamp, u16 length,
+ timestamp_nsec_t timestamp, u16 length,
flowprobe_variant_t which, flowprobe_trace_t * t)
{
if (fm->disabled)
ethernet_header_t *eth = vlib_buffer_get_current (b);
u16 ethertype = clib_net_to_host_u16 (eth->type);
/* *INDENT-OFF* */
- flowprobe_key_t k = { {0} };
+ flowprobe_key_t k = {};
/* *INDENT-ON* */
ip4_header_t *ip4 = 0;
ip6_header_t *ip6 = 0;
udp_header_t *udp = 0;
+ tcp_header_t *tcp = 0;
+ u8 tcp_flags = 0;
if (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4)
{
if (flags & FLOW_RECORD_L2)
{
- clib_memcpy (k.src_mac, eth->src_address, 6);
- clib_memcpy (k.dst_mac, eth->dst_address, 6);
+ clib_memcpy_fast (k.src_mac, eth->src_address, 6);
+ clib_memcpy_fast (k.dst_mac, eth->dst_address, 6);
k.ethertype = ethertype;
}
if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
{
ip6 = (ip6_header_t *) (eth + 1);
- udp = (udp_header_t *) (ip6 + 1);
if (flags & FLOW_RECORD_L3)
{
k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
k.dst_address.as_u64[1] = ip6->dst_address.as_u64[1];
}
k.protocol = ip6->protocol;
+ if (k.protocol == IP_PROTOCOL_UDP)
+ udp = (udp_header_t *) (ip6 + 1);
+ else if (k.protocol == IP_PROTOCOL_TCP)
+ tcp = (tcp_header_t *) (ip6 + 1);
+
octets = clib_net_to_host_u16 (ip6->payload_length)
+ sizeof (ip6_header_t);
}
if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
{
ip4 = (ip4_header_t *) (eth + 1);
- udp = (udp_header_t *) (ip4 + 1);
if (flags & FLOW_RECORD_L3)
{
k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
k.dst_address.ip4.as_u32 = ip4->dst_address.as_u32;
}
k.protocol = ip4->protocol;
+ if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_UDP)
+ udp = (udp_header_t *) (ip4 + 1);
+ else if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_TCP)
+ tcp = (tcp_header_t *) (ip4 + 1);
+
octets = clib_net_to_host_u16 (ip4->length);
}
- if ((flags & FLOW_RECORD_L4) && udp &&
- (k.protocol == IP_PROTOCOL_TCP || k.protocol == IP_PROTOCOL_UDP))
+
+ if (udp)
{
k.src_port = udp->src_port;
k.dst_port = udp->dst_port;
}
+ else if (tcp)
+ {
+ k.src_port = tcp->src_port;
+ k.dst_port = tcp->dst_port;
+ tcp_flags = tcp->flags;
+ }
if (t)
{
t->rx_sw_if_index = k.rx_sw_if_index;
t->tx_sw_if_index = k.tx_sw_if_index;
- clib_memcpy (t->src_mac, k.src_mac, 6);
- clib_memcpy (t->dst_mac, k.dst_mac, 6);
+ clib_memcpy_fast (t->src_mac, k.src_mac, 6);
+ clib_memcpy_fast (t->dst_mac, k.dst_mac, 6);
t->ethertype = k.ethertype;
t->src_address.ip4.as_u32 = k.src_address.ip4.as_u32;
t->dst_address.ip4.as_u32 = k.dst_address.ip4.as_u32;
if (e->packetcount)
flowprobe_export_entry (vm, e);
e->key = k;
+ e->flow_start = timestamp;
vlib_node_increment_counter (vm, node->node_index,
FLOWPROBE_ERROR_COLLISION, 1);
}
{
e = flowprobe_create (my_cpu_number, &k, &poolindex);
e->last_exported = now;
+ e->flow_start = timestamp;
}
}
else
e->packetcount++;
e->octetcount += octets;
e->last_updated = now;
-
+ e->flow_end = timestamp;
+ e->prot.tcp.flags |= tcp_flags;
if (fm->active_timer == 0
|| (now > e->last_exported + fm->active_timer))
flowprobe_export_entry (vm, e);
flow_report_main_t *frm = &flow_report_main;
vlib_buffer_t *b0;
u32 bi0;
- vlib_buffer_free_list_t *fl;
u32 my_cpu_number = vm->thread_index;
/* Find or allocate a buffer */
/* Initialize the buffer */
b0 = fm->context[which].buffers_per_worker[my_cpu_number] =
vlib_get_buffer (vm, bi0);
- fl =
- vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
- vlib_buffer_init_for_free_list (b0, fl);
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
b0->current_data = 0;
b0->current_length = flowprobe_get_headersize ();
- b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_FLOW_REPORT);
+ b0->flags |=
+ (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
fm->context[which].next_record_offset_per_worker[my_cpu_number] =
u32 n_left_from, *from, *to_next;
flowprobe_next_t next_index;
flowprobe_main_t *fm = &flowprobe_main;
- u64 now;
+ timestamp_nsec_t timestamp;
- now = (u64) ((vlib_time_now (vm) - fm->vlib_time_0) * 1e9);
- now += fm->nanosecond_time_0;
+ unix_time_now_nsec_fraction (×tamp.sec, ×tamp.nsec);
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
- &next0, b0);
- vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_TX],
- &next1, b1);
+ vnet_feature_next (&next0, b0);
+ vnet_feature_next (&next1, b1);
len0 = vlib_buffer_length_in_chain (vm, b0);
ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
- if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
- add_to_flow_record_state (vm, node, fm, b0, now, len0,
+ if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
+ add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
flowprobe_get_variant
(which, fm->context[which].flags,
ethertype0), 0);
ethernet_header_t *eh1 = vlib_buffer_get_current (b1);
u16 ethertype1 = clib_net_to_host_u16 (eh1->type);
- if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
- add_to_flow_record_state (vm, node, fm, b1, now, len1,
+ if (PREDICT_TRUE ((b1->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
+ add_to_flow_record_state (vm, node, fm, b1, timestamp, len1,
flowprobe_get_variant
(which, fm->context[which].flags,
ethertype1), 0);
b0 = vlib_get_buffer (vm, bi0);
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
- &next0, b0);
+ vnet_feature_next (&next0, b0);
len0 = vlib_buffer_length_in_chain (vm, b0);
ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
- if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
{
flowprobe_trace_t *t = 0;
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
&& (b0->flags & VLIB_BUFFER_IS_TRACED)))
t = vlib_add_trace (vm, node, b0, sizeof (*t));
- add_to_flow_record_state (vm, node, fm, b0, now, len0,
+ add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
flowprobe_get_variant
(which, fm->context[which].flags,
ethertype0), t);
* entry. Otherwise restart timer with what's left
* Premature passive timer by more than 10%
*/
- if ((now - e->last_updated) < (fm->passive_timer * 0.9))
+ if ((now - e->last_updated) < (u64) (fm->passive_timer * 0.9))
{
- f64 delta = fm->passive_timer - (now - e->last_updated);
+ u64 delta = fm->passive_timer - (now - e->last_updated);
e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
(fm->timers_per_worker[cpu_index], *i, 0, delta);
}