2 * node.c - ipfix probe graph node
4 * Copyright (c) 2017 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vppinfra/crc32.h>
21 #include <vppinfra/error.h>
22 #include <flowprobe/flowprobe.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vlibmemory/api.h>
26 static void flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e);
29 * @file flow record generator graph node
34 /** interface handle */
37 /** packet timestamp */
39 /** size of the buffer */
49 ip46_address_t src_address;
50 ip46_address_t dst_address;
58 flowprobe_variant_t which;
61 static char *flowprobe_variant_strings[] = {
62 [FLOW_VARIANT_IP4] = "IP4",
63 [FLOW_VARIANT_IP6] = "IP6",
64 [FLOW_VARIANT_L2] = "L2",
65 [FLOW_VARIANT_L2_IP4] = "L2-IP4",
66 [FLOW_VARIANT_L2_IP6] = "L2-IP6",
69 /* packet trace format function */
71 format_flowprobe_trace (u8 * s, va_list * args)
73 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75 flowprobe_trace_t *t = va_arg (*args, flowprobe_trace_t *);
76 u32 indent = format_get_indent (s);
79 "FLOWPROBE[%s]: rx_sw_if_index %d, tx_sw_if_index %d, "
80 "timestamp %lld, size %d", flowprobe_variant_strings[t->which],
81 t->rx_sw_if_index, t->tx_sw_if_index,
82 t->timestamp, t->buffer_size);
84 if (t->which == FLOW_VARIANT_L2)
85 s = format (s, "\n%U -> %U", format_white_space, indent,
86 format_ethernet_address, &t->src_mac,
87 format_ethernet_address, &t->dst_mac);
90 && (t->which == FLOW_VARIANT_L2_IP4 || t->which == FLOW_VARIANT_IP4
91 || t->which == FLOW_VARIANT_L2_IP6 || t->which == FLOW_VARIANT_IP6))
93 format (s, "\n%U%U: %U -> %U", format_white_space, indent,
94 format_ip_protocol, t->protocol, format_ip46_address,
95 &t->src_address, IP46_TYPE_ANY, format_ip46_address,
96 &t->dst_address, IP46_TYPE_ANY);
100 vlib_node_registration_t flowprobe_ip4_node;
101 vlib_node_registration_t flowprobe_ip6_node;
102 vlib_node_registration_t flowprobe_l2_node;
104 /* No counters at the moment */
105 #define foreach_flowprobe_error \
106 _(COLLISION, "Hash table collisions") \
107 _(BUFFER, "Buffer allocation error") \
108 _(EXPORTED_PACKETS, "Exported packets") \
109 _(INPATH, "Exported packets in path")
113 #define _(sym,str) FLOWPROBE_ERROR_##sym,
114 foreach_flowprobe_error
119 static char *flowprobe_error_strings[] = {
120 #define _(sym,string) string,
121 foreach_flowprobe_error
128 FLOWPROBE_NEXT_IP4_LOOKUP,
132 #define FLOWPROBE_NEXT_NODES { \
133 [FLOWPROBE_NEXT_DROP] = "error-drop", \
134 [FLOWPROBE_NEXT_IP4_LOOKUP] = "ip4-lookup", \
137 static inline flowprobe_variant_t
138 flowprobe_get_variant (flowprobe_variant_t which,
139 flowprobe_record_t flags, u16 ethertype)
141 if (which == FLOW_VARIANT_L2
142 && (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4))
143 return ethertype == ETHERNET_TYPE_IP6 ? FLOW_VARIANT_L2_IP6 : ethertype ==
144 ETHERNET_TYPE_IP4 ? FLOW_VARIANT_L2_IP4 : FLOW_VARIANT_L2;
149 * NTP rfc868 : 2 208 988 800 corresponds to 00:00 1 Jan 1970 GMT
151 #define NTP_TIMESTAMP 2208988800LU
154 flowprobe_common_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
158 /* Ingress interface */
159 u32 rx_if = clib_host_to_net_u32 (e->key.rx_sw_if_index);
160 clib_memcpy_fast (to_b->data + offset, &rx_if, sizeof (rx_if));
161 offset += sizeof (rx_if);
163 /* Egress interface */
164 u32 tx_if = clib_host_to_net_u32 (e->key.tx_sw_if_index);
165 clib_memcpy_fast (to_b->data + offset, &tx_if, sizeof (tx_if));
166 offset += sizeof (tx_if);
168 /* packet delta count */
169 u64 packetdelta = clib_host_to_net_u64 (e->packetcount);
170 clib_memcpy_fast (to_b->data + offset, &packetdelta, sizeof (u64));
171 offset += sizeof (u64);
173 /* flowStartNanoseconds */
174 u32 t = clib_host_to_net_u32 (e->flow_start.sec + NTP_TIMESTAMP);
175 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
176 offset += sizeof (u32);
177 t = clib_host_to_net_u32 (e->flow_start.nsec);
178 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
179 offset += sizeof (u32);
181 /* flowEndNanoseconds */
182 t = clib_host_to_net_u32 (e->flow_end.sec + NTP_TIMESTAMP);
183 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
184 offset += sizeof (u32);
185 t = clib_host_to_net_u32 (e->flow_end.nsec);
186 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
187 offset += sizeof (u32);
189 return offset - start;
193 flowprobe_l2_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
197 /* src mac address */
198 clib_memcpy_fast (to_b->data + offset, &e->key.src_mac, 6);
201 /* dst mac address */
202 clib_memcpy_fast (to_b->data + offset, &e->key.dst_mac, 6);
206 clib_memcpy_fast (to_b->data + offset, &e->key.ethertype, 2);
209 return offset - start;
213 flowprobe_l3_ip6_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
217 /* ip6 src address */
218 clib_memcpy_fast (to_b->data + offset, &e->key.src_address,
219 sizeof (ip6_address_t));
220 offset += sizeof (ip6_address_t);
222 /* ip6 dst address */
223 clib_memcpy_fast (to_b->data + offset, &e->key.dst_address,
224 sizeof (ip6_address_t));
225 offset += sizeof (ip6_address_t);
228 to_b->data[offset++] = e->key.protocol;
230 /* octetDeltaCount */
231 u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
232 clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
233 offset += sizeof (u64);
235 return offset - start;
239 flowprobe_l3_ip4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
243 /* ip4 src address */
244 clib_memcpy_fast (to_b->data + offset, &e->key.src_address.ip4,
245 sizeof (ip4_address_t));
246 offset += sizeof (ip4_address_t);
248 /* ip4 dst address */
249 clib_memcpy_fast (to_b->data + offset, &e->key.dst_address.ip4,
250 sizeof (ip4_address_t));
251 offset += sizeof (ip4_address_t);
254 to_b->data[offset++] = e->key.protocol;
256 /* octetDeltaCount */
257 u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
258 clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
259 offset += sizeof (u64);
261 return offset - start;
265 flowprobe_l4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
270 clib_memcpy_fast (to_b->data + offset, &e->key.src_port, 2);
274 clib_memcpy_fast (to_b->data + offset, &e->key.dst_port, 2);
277 /* tcp control bits */
278 u16 control_bits = htons (e->prot.tcp.flags);
279 clib_memcpy_fast (to_b->data + offset, &control_bits, 2);
282 return offset - start;
286 flowprobe_hash (flowprobe_key_t * k)
288 flowprobe_main_t *fm = &flowprobe_main;
291 #ifdef clib_crc32c_uses_intrinsics
292 h = clib_crc32c ((u8 *) k, sizeof (*k));
296 for (i = 0; i < sizeof (*k) / 8; i++)
297 tmp ^= ((u64 *) k)[i];
299 h = clib_xxhash (tmp);
302 return h >> (32 - fm->ht_log2len);
306 flowprobe_lookup (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex,
309 flowprobe_main_t *fm = &flowprobe_main;
310 flowprobe_entry_t *e;
313 h = (fm->active_timer) ? flowprobe_hash (k) : 0;
315 /* Lookup in the flow state pool */
316 *poolindex = fm->hash_per_worker[my_cpu_number][h];
317 if (*poolindex != ~0)
319 e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], *poolindex);
322 /* Verify key or report collision */
323 if (memcmp (k, &e->key, sizeof (flowprobe_key_t)))
333 flowprobe_create (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex)
335 flowprobe_main_t *fm = &flowprobe_main;
338 flowprobe_entry_t *e;
341 h = (fm->active_timer) ? flowprobe_hash (k) : 0;
343 pool_get (fm->pool_per_worker[my_cpu_number], e);
344 *poolindex = e - fm->pool_per_worker[my_cpu_number];
345 fm->hash_per_worker[my_cpu_number][h] = *poolindex;
349 if (fm->passive_timer > 0)
351 e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
352 (fm->timers_per_worker[my_cpu_number], *poolindex, 0,
359 add_to_flow_record_state (vlib_main_t * vm, vlib_node_runtime_t * node,
360 flowprobe_main_t * fm, vlib_buffer_t * b,
361 timestamp_nsec_t timestamp, u16 length,
362 flowprobe_variant_t which, flowprobe_trace_t * t)
367 u32 my_cpu_number = vm->thread_index;
370 flowprobe_record_t flags = fm->context[which].flags;
371 bool collect_ip4 = false, collect_ip6 = false;
373 ethernet_header_t *eth = vlib_buffer_get_current (b);
374 u16 ethertype = clib_net_to_host_u16 (eth->type);
376 flowprobe_key_t k = {};
378 ip4_header_t *ip4 = 0;
379 ip6_header_t *ip6 = 0;
380 udp_header_t *udp = 0;
381 tcp_header_t *tcp = 0;
384 if (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4)
386 collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
387 collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
390 k.rx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
391 k.tx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
395 if (flags & FLOW_RECORD_L2)
397 clib_memcpy_fast (k.src_mac, eth->src_address, 6);
398 clib_memcpy_fast (k.dst_mac, eth->dst_address, 6);
399 k.ethertype = ethertype;
401 if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
403 ip6 = (ip6_header_t *) (eth + 1);
404 if (flags & FLOW_RECORD_L3)
406 k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
407 k.src_address.as_u64[1] = ip6->src_address.as_u64[1];
408 k.dst_address.as_u64[0] = ip6->dst_address.as_u64[0];
409 k.dst_address.as_u64[1] = ip6->dst_address.as_u64[1];
411 k.protocol = ip6->protocol;
412 if (k.protocol == IP_PROTOCOL_UDP)
413 udp = (udp_header_t *) (ip6 + 1);
414 else if (k.protocol == IP_PROTOCOL_TCP)
415 tcp = (tcp_header_t *) (ip6 + 1);
417 octets = clib_net_to_host_u16 (ip6->payload_length)
418 + sizeof (ip6_header_t);
420 if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
422 ip4 = (ip4_header_t *) (eth + 1);
423 if (flags & FLOW_RECORD_L3)
425 k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
426 k.dst_address.ip4.as_u32 = ip4->dst_address.as_u32;
428 k.protocol = ip4->protocol;
429 if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_UDP)
430 udp = (udp_header_t *) (ip4 + 1);
431 else if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_TCP)
432 tcp = (tcp_header_t *) (ip4 + 1);
434 octets = clib_net_to_host_u16 (ip4->length);
439 k.src_port = udp->src_port;
440 k.dst_port = udp->dst_port;
444 k.src_port = tcp->src_port;
445 k.dst_port = tcp->dst_port;
446 tcp_flags = tcp->flags;
451 t->rx_sw_if_index = k.rx_sw_if_index;
452 t->tx_sw_if_index = k.tx_sw_if_index;
453 clib_memcpy_fast (t->src_mac, k.src_mac, 6);
454 clib_memcpy_fast (t->dst_mac, k.dst_mac, 6);
455 t->ethertype = k.ethertype;
456 t->src_address.ip4.as_u32 = k.src_address.ip4.as_u32;
457 t->dst_address.ip4.as_u32 = k.dst_address.ip4.as_u32;
458 t->protocol = k.protocol;
459 t->src_port = k.src_port;
460 t->dst_port = k.dst_port;
464 flowprobe_entry_t *e = 0;
465 f64 now = vlib_time_now (vm);
466 if (fm->active_timer > 0)
469 bool collision = false;
471 e = flowprobe_lookup (my_cpu_number, &k, &poolindex, &collision);
474 /* Flush data and clean up entry for reuse. */
476 flowprobe_export_entry (vm, e);
478 e->flow_start = timestamp;
479 vlib_node_increment_counter (vm, node->node_index,
480 FLOWPROBE_ERROR_COLLISION, 1);
482 if (!e) /* Create new entry */
484 e = flowprobe_create (my_cpu_number, &k, &poolindex);
485 e->last_exported = now;
486 e->flow_start = timestamp;
491 e = &fm->stateless_entry[my_cpu_number];
499 e->octetcount += octets;
500 e->last_updated = now;
501 e->flow_end = timestamp;
502 e->prot.tcp.flags |= tcp_flags;
503 if (fm->active_timer == 0
504 || (now > e->last_exported + fm->active_timer))
505 flowprobe_export_entry (vm, e);
510 flowprobe_get_headersize (void)
512 return sizeof (ip4_header_t) + sizeof (udp_header_t) +
513 sizeof (ipfix_message_header_t) + sizeof (ipfix_set_header_t);
517 flowprobe_export_send (vlib_main_t * vm, vlib_buffer_t * b0,
518 flowprobe_variant_t which)
520 flowprobe_main_t *fm = &flowprobe_main;
521 flow_report_main_t *frm = &flow_report_main;
523 ip4_ipfix_template_packet_t *tp;
524 ipfix_set_header_t *s;
525 ipfix_message_header_t *h;
528 flowprobe_record_t flags = fm->context[which].flags;
529 u32 my_cpu_number = vm->thread_index;
532 flow_report_stream_t *stream;
534 /* Nothing to send */
535 if (fm->context[which].next_record_offset_per_worker[my_cpu_number] <=
536 flowprobe_get_headersize ())
539 u32 i, index = vec_len (frm->streams);
540 for (i = 0; i < index; i++)
541 if (frm->streams[i].domain_id == 1)
546 if (i == vec_len (frm->streams))
548 vec_validate (frm->streams, index);
549 frm->streams[index].domain_id = 1;
551 stream = &frm->streams[index];
553 tp = vlib_buffer_get_current (b0);
554 ip = (ip4_header_t *) & tp->ip4;
555 udp = (udp_header_t *) (ip + 1);
556 h = (ipfix_message_header_t *) (udp + 1);
557 s = (ipfix_set_header_t *) (h + 1);
559 ip->ip_version_and_header_length = 0x45;
561 ip->protocol = IP_PROTOCOL_UDP;
562 ip->flags_and_fragment_offset = 0;
563 ip->src_address.as_u32 = frm->src_address.as_u32;
564 ip->dst_address.as_u32 = frm->ipfix_collector.as_u32;
565 udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
566 udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
569 /* FIXUP: message header export_time */
570 h->export_time = (u32)
571 (((f64) frm->unix_time_0) +
572 (vlib_time_now (frm->vlib_main) - frm->vlib_time_0));
573 h->export_time = clib_host_to_net_u32 (h->export_time);
574 h->domain_id = clib_host_to_net_u32 (stream->domain_id);
576 /* FIXUP: message header sequence_number */
577 h->sequence_number = stream->sequence_number++;
578 h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
580 s->set_id_length = ipfix_set_id_length (fm->template_reports[flags],
582 (sizeof (*ip) + sizeof (*udp) +
584 h->version_length = version_length (b0->current_length -
585 (sizeof (*ip) + sizeof (*udp)));
587 ip->length = clib_host_to_net_u16 (b0->current_length);
589 ip->checksum = ip4_header_checksum (ip);
590 udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
592 if (frm->udp_checksum)
594 /* RFC 7011 section 10.3.2. */
595 udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
596 if (udp->checksum == 0)
597 udp->checksum = 0xffff;
600 ASSERT (ip->checksum == ip4_header_checksum (ip));
602 /* Find or allocate a frame */
603 f = fm->context[which].frames_per_worker[my_cpu_number];
604 if (PREDICT_FALSE (f == 0))
607 f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
608 fm->context[which].frames_per_worker[my_cpu_number] = f;
609 u32 bi0 = vlib_get_buffer_index (vm, b0);
611 /* Enqueue the buffer */
612 to_next = vlib_frame_vector_args (f);
617 vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
618 vlib_node_increment_counter (vm, flowprobe_l2_node.index,
619 FLOWPROBE_ERROR_EXPORTED_PACKETS, 1);
621 fm->context[which].frames_per_worker[my_cpu_number] = 0;
622 fm->context[which].buffers_per_worker[my_cpu_number] = 0;
623 fm->context[which].next_record_offset_per_worker[my_cpu_number] =
624 flowprobe_get_headersize ();
627 static vlib_buffer_t *
628 flowprobe_get_buffer (vlib_main_t * vm, flowprobe_variant_t which)
630 flowprobe_main_t *fm = &flowprobe_main;
631 flow_report_main_t *frm = &flow_report_main;
634 u32 my_cpu_number = vm->thread_index;
636 /* Find or allocate a buffer */
637 b0 = fm->context[which].buffers_per_worker[my_cpu_number];
639 /* Need to allocate a buffer? */
640 if (PREDICT_FALSE (b0 == 0))
642 if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
644 vlib_node_increment_counter (vm, flowprobe_l2_node.index,
645 FLOWPROBE_ERROR_BUFFER, 1);
649 /* Initialize the buffer */
650 b0 = fm->context[which].buffers_per_worker[my_cpu_number] =
651 vlib_get_buffer (vm, bi0);
652 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
654 b0->current_data = 0;
655 b0->current_length = flowprobe_get_headersize ();
657 (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
658 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
659 vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
660 fm->context[which].next_record_offset_per_worker[my_cpu_number] =
668 flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
670 u32 my_cpu_number = vm->thread_index;
671 flowprobe_main_t *fm = &flowprobe_main;
672 flow_report_main_t *frm = &flow_report_main;
674 bool collect_ip4 = false, collect_ip6 = false;
675 flowprobe_variant_t which = e->key.which;
676 flowprobe_record_t flags = fm->context[which].flags;
678 fm->context[which].next_record_offset_per_worker[my_cpu_number];
680 if (offset < flowprobe_get_headersize ())
681 offset = flowprobe_get_headersize ();
683 b0 = flowprobe_get_buffer (vm, which);
684 /* No available buffer, what to do... */
688 if (flags & FLOW_RECORD_L3)
690 collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
691 collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
694 offset += flowprobe_common_add (b0, e, offset);
696 if (flags & FLOW_RECORD_L2)
697 offset += flowprobe_l2_add (b0, e, offset);
699 offset += flowprobe_l3_ip6_add (b0, e, offset);
701 offset += flowprobe_l3_ip4_add (b0, e, offset);
702 if (flags & FLOW_RECORD_L4)
703 offset += flowprobe_l4_add (b0, e, offset);
705 /* Reset per flow-export counters */
708 e->last_exported = vlib_time_now (vm);
710 b0->current_length = offset;
712 fm->context[which].next_record_offset_per_worker[my_cpu_number] = offset;
713 /* Time to flush the buffer? */
714 if (offset + fm->template_size[flags] > frm->path_mtu)
715 flowprobe_export_send (vm, b0, which);
719 flowprobe_node_fn (vlib_main_t * vm,
720 vlib_node_runtime_t * node, vlib_frame_t * frame,
721 flowprobe_variant_t which)
723 u32 n_left_from, *from, *to_next;
724 flowprobe_next_t next_index;
725 flowprobe_main_t *fm = &flowprobe_main;
726 timestamp_nsec_t timestamp;
728 unix_time_now_nsec_fraction (×tamp.sec, ×tamp.nsec);
730 from = vlib_frame_vector_args (frame);
731 n_left_from = frame->n_vectors;
732 next_index = node->cached_next_index;
734 while (n_left_from > 0)
738 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
740 while (n_left_from >= 4 && n_left_to_next >= 2)
742 u32 next0 = FLOWPROBE_NEXT_DROP;
743 u32 next1 = FLOWPROBE_NEXT_DROP;
746 vlib_buffer_t *b0, *b1;
748 /* Prefetch next iteration. */
750 vlib_buffer_t *p2, *p3;
752 p2 = vlib_get_buffer (vm, from[2]);
753 p3 = vlib_get_buffer (vm, from[3]);
755 vlib_prefetch_buffer_header (p2, LOAD);
756 vlib_prefetch_buffer_header (p3, LOAD);
758 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
759 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
762 /* speculatively enqueue b0 and b1 to the current next frame */
763 to_next[0] = bi0 = from[0];
764 to_next[1] = bi1 = from[1];
770 b0 = vlib_get_buffer (vm, bi0);
771 b1 = vlib_get_buffer (vm, bi1);
773 vnet_feature_next (&next0, b0);
774 vnet_feature_next (&next1, b1);
776 len0 = vlib_buffer_length_in_chain (vm, b0);
777 ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
778 u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
780 if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
781 add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
782 flowprobe_get_variant
783 (which, fm->context[which].flags,
786 len1 = vlib_buffer_length_in_chain (vm, b1);
787 ethernet_header_t *eh1 = vlib_buffer_get_current (b1);
788 u16 ethertype1 = clib_net_to_host_u16 (eh1->type);
790 if (PREDICT_TRUE ((b1->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
791 add_to_flow_record_state (vm, node, fm, b1, timestamp, len1,
792 flowprobe_get_variant
793 (which, fm->context[which].flags,
796 /* verify speculative enqueues, maybe switch current next frame */
797 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
798 to_next, n_left_to_next,
799 bi0, bi1, next0, next1);
802 while (n_left_from > 0 && n_left_to_next > 0)
806 u32 next0 = FLOWPROBE_NEXT_DROP;
809 /* speculatively enqueue b0 to the current next frame */
817 b0 = vlib_get_buffer (vm, bi0);
819 vnet_feature_next (&next0, b0);
821 len0 = vlib_buffer_length_in_chain (vm, b0);
822 ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
823 u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
825 if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
827 flowprobe_trace_t *t = 0;
828 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
829 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
830 t = vlib_add_trace (vm, node, b0, sizeof (*t));
832 add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
833 flowprobe_get_variant
834 (which, fm->context[which].flags,
838 /* verify speculative enqueue, maybe switch current next frame */
839 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
840 to_next, n_left_to_next,
844 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
846 return frame->n_vectors;
850 flowprobe_ip4_node_fn (vlib_main_t * vm,
851 vlib_node_runtime_t * node, vlib_frame_t * frame)
853 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4);
857 flowprobe_ip6_node_fn (vlib_main_t * vm,
858 vlib_node_runtime_t * node, vlib_frame_t * frame)
860 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6);
864 flowprobe_l2_node_fn (vlib_main_t * vm,
865 vlib_node_runtime_t * node, vlib_frame_t * frame)
867 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2);
871 flush_record (flowprobe_variant_t which)
873 vlib_main_t *vm = vlib_get_main ();
874 vlib_buffer_t *b = flowprobe_get_buffer (vm, which);
876 flowprobe_export_send (vm, b, which);
880 flowprobe_flush_callback_ip4 (void)
882 flush_record (FLOW_VARIANT_IP4);
886 flowprobe_flush_callback_ip6 (void)
888 flush_record (FLOW_VARIANT_IP6);
892 flowprobe_flush_callback_l2 (void)
894 flush_record (FLOW_VARIANT_L2);
895 flush_record (FLOW_VARIANT_L2_IP4);
896 flush_record (FLOW_VARIANT_L2_IP6);
901 flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex)
903 flowprobe_main_t *fm = &flowprobe_main;
904 flowprobe_entry_t *e;
907 e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], poolindex);
910 h = flowprobe_hash (&e->key);
913 fm->hash_per_worker[my_cpu_number][h] = ~0;
915 pool_put_index (fm->pool_per_worker[my_cpu_number], poolindex);
919 /* Per worker process processing the active/passive expired entries */
921 flowprobe_walker_process (vlib_main_t * vm,
922 vlib_node_runtime_t * rt, vlib_frame_t * f)
924 flowprobe_main_t *fm = &flowprobe_main;
925 flow_report_main_t *frm = &flow_report_main;
926 flowprobe_entry_t *e;
929 * $$$$ Remove this check from here and track FRM status and disable
930 * this process if required.
932 if (frm->ipfix_collector.as_u32 == 0 || frm->src_address.as_u32 == 0)
937 fm->disabled = false;
939 u32 cpu_index = os_get_thread_index ();
940 u32 *to_be_removed = 0, *i;
943 * Tick the timer when required and process the vector of expired
946 f64 start_time = vlib_time_now (vm);
949 tw_timer_expire_timers_2t_1w_2048sl (fm->timers_per_worker[cpu_index],
952 vec_foreach (i, fm->expired_passive_per_worker[cpu_index])
955 f64 now = vlib_time_now (vm);
956 if (now > start_time + 100e-6
957 || exported > FLOW_MAXIMUM_EXPORT_ENTRIES - 1)
960 if (pool_is_free_index (fm->pool_per_worker[cpu_index], *i))
962 clib_warning ("Element is %d is freed already\n", *i);
966 e = pool_elt_at_index (fm->pool_per_worker[cpu_index], *i);
968 /* Check last update timestamp. If it is longer than passive time nuke
969 * entry. Otherwise restart timer with what's left
970 * Premature passive timer by more than 10%
972 if ((now - e->last_updated) < (u64) (fm->passive_timer * 0.9))
974 u64 delta = fm->passive_timer - (now - e->last_updated);
975 e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
976 (fm->timers_per_worker[cpu_index], *i, 0, delta);
978 else /* Nuke entry */
980 vec_add1 (to_be_removed, *i);
982 /* If anything to report send it to the exporter */
983 if (e->packetcount && now > e->last_exported + fm->active_timer)
986 flowprobe_export_entry (vm, e);
991 vec_delete (fm->expired_passive_per_worker[cpu_index], count, 0);
993 vec_foreach (i, to_be_removed) flowprobe_delete_by_index (cpu_index, *i);
994 vec_free (to_be_removed);
1000 VLIB_REGISTER_NODE (flowprobe_ip4_node) = {
1001 .function = flowprobe_ip4_node_fn,
1002 .name = "flowprobe-ip4",
1003 .vector_size = sizeof (u32),
1004 .format_trace = format_flowprobe_trace,
1005 .type = VLIB_NODE_TYPE_INTERNAL,
1006 .n_errors = ARRAY_LEN(flowprobe_error_strings),
1007 .error_strings = flowprobe_error_strings,
1008 .n_next_nodes = FLOWPROBE_N_NEXT,
1009 .next_nodes = FLOWPROBE_NEXT_NODES,
1011 VLIB_REGISTER_NODE (flowprobe_ip6_node) = {
1012 .function = flowprobe_ip6_node_fn,
1013 .name = "flowprobe-ip6",
1014 .vector_size = sizeof (u32),
1015 .format_trace = format_flowprobe_trace,
1016 .type = VLIB_NODE_TYPE_INTERNAL,
1017 .n_errors = ARRAY_LEN(flowprobe_error_strings),
1018 .error_strings = flowprobe_error_strings,
1019 .n_next_nodes = FLOWPROBE_N_NEXT,
1020 .next_nodes = FLOWPROBE_NEXT_NODES,
1022 VLIB_REGISTER_NODE (flowprobe_l2_node) = {
1023 .function = flowprobe_l2_node_fn,
1024 .name = "flowprobe-l2",
1025 .vector_size = sizeof (u32),
1026 .format_trace = format_flowprobe_trace,
1027 .type = VLIB_NODE_TYPE_INTERNAL,
1028 .n_errors = ARRAY_LEN(flowprobe_error_strings),
1029 .error_strings = flowprobe_error_strings,
1030 .n_next_nodes = FLOWPROBE_N_NEXT,
1031 .next_nodes = FLOWPROBE_NEXT_NODES,
1033 VLIB_REGISTER_NODE (flowprobe_walker_node) = {
1034 .function = flowprobe_walker_process,
1035 .name = "flowprobe-walker",
1036 .type = VLIB_NODE_TYPE_INPUT,
1037 .state = VLIB_NODE_STATE_INTERRUPT,
1042 * fd.io coding-style-patch-verification: ON
1045 * eval: (c-set-style "gnu")