2 * node.c - ipfix probe graph node
4 * Copyright (c) 2017 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/crc32.h>
20 #include <vppinfra/xxhash.h>
21 #include <vppinfra/error.h>
22 #include <flowprobe/flowprobe.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vnet/udp/udp_local.h>
25 #include <vlibmemory/api.h>
27 static void flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e);
31 * flow record generator graph node
36 /** interface handle */
39 /** packet timestamp */
41 /** size of the buffer */
51 ip46_address_t src_address;
52 ip46_address_t dst_address;
60 flowprobe_variant_t which;
63 static char *flowprobe_variant_strings[] = {
64 [FLOW_VARIANT_IP4] = "IP4",
65 [FLOW_VARIANT_IP6] = "IP6",
66 [FLOW_VARIANT_L2] = "L2",
67 [FLOW_VARIANT_L2_IP4] = "L2-IP4",
68 [FLOW_VARIANT_L2_IP6] = "L2-IP6",
71 /* packet trace format function */
73 format_flowprobe_trace (u8 * s, va_list * args)
75 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
76 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
77 flowprobe_trace_t *t = va_arg (*args, flowprobe_trace_t *);
78 u32 indent = format_get_indent (s);
81 "FLOWPROBE[%s]: rx_sw_if_index %d, tx_sw_if_index %d, "
82 "timestamp %lld, size %d", flowprobe_variant_strings[t->which],
83 t->rx_sw_if_index, t->tx_sw_if_index,
84 t->timestamp, t->buffer_size);
86 if (t->which == FLOW_VARIANT_L2)
87 s = format (s, "\n%U -> %U", format_white_space, indent,
88 format_ethernet_address, &t->src_mac,
89 format_ethernet_address, &t->dst_mac);
92 && (t->which == FLOW_VARIANT_L2_IP4 || t->which == FLOW_VARIANT_IP4
93 || t->which == FLOW_VARIANT_L2_IP6 || t->which == FLOW_VARIANT_IP6))
95 format (s, "\n%U%U: %U -> %U", format_white_space, indent,
96 format_ip_protocol, t->protocol, format_ip46_address,
97 &t->src_address, IP46_TYPE_ANY, format_ip46_address,
98 &t->dst_address, IP46_TYPE_ANY);
102 vlib_node_registration_t flowprobe_input_ip4_node;
103 vlib_node_registration_t flowprobe_input_ip6_node;
104 vlib_node_registration_t flowprobe_input_l2_node;
105 vlib_node_registration_t flowprobe_output_ip4_node;
106 vlib_node_registration_t flowprobe_output_ip6_node;
107 vlib_node_registration_t flowprobe_output_l2_node;
108 vlib_node_registration_t flowprobe_flush_ip4_node;
109 vlib_node_registration_t flowprobe_flush_ip6_node;
110 vlib_node_registration_t flowprobe_flush_l2_node;
112 /* No counters at the moment */
113 #define foreach_flowprobe_error \
114 _(COLLISION, "Hash table collisions") \
115 _(BUFFER, "Buffer allocation error") \
116 _(EXPORTED_PACKETS, "Exported packets") \
117 _(INPATH, "Exported packets in path")
121 #define _(sym,str) FLOWPROBE_ERROR_##sym,
122 foreach_flowprobe_error
127 static char *flowprobe_error_strings[] = {
128 #define _(sym,string) string,
129 foreach_flowprobe_error
136 FLOWPROBE_NEXT_IP4_LOOKUP,
140 #define FLOWPROBE_NEXT_NODES { \
141 [FLOWPROBE_NEXT_DROP] = "error-drop", \
142 [FLOWPROBE_NEXT_IP4_LOOKUP] = "ip4-lookup", \
145 static inline flowprobe_variant_t
146 flowprobe_get_variant (flowprobe_variant_t which,
147 flowprobe_record_t flags, u16 ethertype)
149 if (which == FLOW_VARIANT_L2
150 && (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4))
151 return ethertype == ETHERNET_TYPE_IP6 ? FLOW_VARIANT_L2_IP6 : ethertype ==
152 ETHERNET_TYPE_IP4 ? FLOW_VARIANT_L2_IP4 : FLOW_VARIANT_L2;
157 * NTP rfc868 : 2 208 988 800 corresponds to 00:00 1 Jan 1970 GMT
159 #define NTP_TIMESTAMP 2208988800LU
162 flowprobe_common_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
166 /* Ingress interface */
167 u32 rx_if = clib_host_to_net_u32 (e->key.rx_sw_if_index);
168 clib_memcpy_fast (to_b->data + offset, &rx_if, sizeof (rx_if));
169 offset += sizeof (rx_if);
171 /* Egress interface */
172 u32 tx_if = clib_host_to_net_u32 (e->key.tx_sw_if_index);
173 clib_memcpy_fast (to_b->data + offset, &tx_if, sizeof (tx_if));
174 offset += sizeof (tx_if);
179 to_b->data[offset++] = (e->key.direction == FLOW_DIRECTION_TX);
181 /* packet delta count */
182 u64 packetdelta = clib_host_to_net_u64 (e->packetcount);
183 clib_memcpy_fast (to_b->data + offset, &packetdelta, sizeof (u64));
184 offset += sizeof (u64);
186 /* flowStartNanoseconds */
187 u32 t = clib_host_to_net_u32 (e->flow_start.sec + NTP_TIMESTAMP);
188 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
189 offset += sizeof (u32);
190 t = clib_host_to_net_u32 (e->flow_start.nsec);
191 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
192 offset += sizeof (u32);
194 /* flowEndNanoseconds */
195 t = clib_host_to_net_u32 (e->flow_end.sec + NTP_TIMESTAMP);
196 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
197 offset += sizeof (u32);
198 t = clib_host_to_net_u32 (e->flow_end.nsec);
199 clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
200 offset += sizeof (u32);
202 return offset - start;
206 flowprobe_l2_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
210 /* src mac address */
211 clib_memcpy_fast (to_b->data + offset, &e->key.src_mac, 6);
214 /* dst mac address */
215 clib_memcpy_fast (to_b->data + offset, &e->key.dst_mac, 6);
219 clib_memcpy_fast (to_b->data + offset, &e->key.ethertype, 2);
222 return offset - start;
226 flowprobe_l3_ip6_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
230 /* ip6 src address */
231 clib_memcpy_fast (to_b->data + offset, &e->key.src_address,
232 sizeof (ip6_address_t));
233 offset += sizeof (ip6_address_t);
235 /* ip6 dst address */
236 clib_memcpy_fast (to_b->data + offset, &e->key.dst_address,
237 sizeof (ip6_address_t));
238 offset += sizeof (ip6_address_t);
241 to_b->data[offset++] = e->key.protocol;
243 /* octetDeltaCount */
244 u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
245 clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
246 offset += sizeof (u64);
248 return offset - start;
252 flowprobe_l3_ip4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
256 /* ip4 src address */
257 clib_memcpy_fast (to_b->data + offset, &e->key.src_address.ip4,
258 sizeof (ip4_address_t));
259 offset += sizeof (ip4_address_t);
261 /* ip4 dst address */
262 clib_memcpy_fast (to_b->data + offset, &e->key.dst_address.ip4,
263 sizeof (ip4_address_t));
264 offset += sizeof (ip4_address_t);
267 to_b->data[offset++] = e->key.protocol;
269 /* octetDeltaCount */
270 u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
271 clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
272 offset += sizeof (u64);
274 return offset - start;
278 flowprobe_l4_add (vlib_buffer_t * to_b, flowprobe_entry_t * e, u16 offset)
283 clib_memcpy_fast (to_b->data + offset, &e->key.src_port, 2);
287 clib_memcpy_fast (to_b->data + offset, &e->key.dst_port, 2);
290 /* tcp control bits */
291 u16 control_bits = htons (e->prot.tcp.flags);
292 clib_memcpy_fast (to_b->data + offset, &control_bits, 2);
295 return offset - start;
299 flowprobe_hash (flowprobe_key_t * k)
301 flowprobe_main_t *fm = &flowprobe_main;
304 #ifdef clib_crc32c_uses_intrinsics
305 h = clib_crc32c ((u8 *) k, sizeof (*k));
309 for (i = 0; i < sizeof (*k) / 8; i++)
310 tmp ^= ((u64 *) k)[i];
312 h = clib_xxhash (tmp);
315 return h >> (32 - fm->ht_log2len);
319 flowprobe_lookup (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex,
322 flowprobe_main_t *fm = &flowprobe_main;
323 flowprobe_entry_t *e;
326 h = (fm->active_timer) ? flowprobe_hash (k) : 0;
328 /* Lookup in the flow state pool */
329 *poolindex = fm->hash_per_worker[my_cpu_number][h];
330 if (*poolindex != ~0)
332 e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], *poolindex);
335 /* Verify key or report collision */
336 if (memcmp (k, &e->key, sizeof (flowprobe_key_t)))
346 flowprobe_create (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex)
348 flowprobe_main_t *fm = &flowprobe_main;
351 flowprobe_entry_t *e;
354 h = (fm->active_timer) ? flowprobe_hash (k) : 0;
356 pool_get (fm->pool_per_worker[my_cpu_number], e);
357 *poolindex = e - fm->pool_per_worker[my_cpu_number];
358 fm->hash_per_worker[my_cpu_number][h] = *poolindex;
362 if (fm->passive_timer > 0)
364 e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
365 (fm->timers_per_worker[my_cpu_number], *poolindex, 0,
372 add_to_flow_record_state (vlib_main_t *vm, vlib_node_runtime_t *node,
373 flowprobe_main_t *fm, vlib_buffer_t *b,
374 timestamp_nsec_t timestamp, u16 length,
375 flowprobe_variant_t which,
376 flowprobe_direction_t direction,
377 flowprobe_trace_t *t)
382 ASSERT (direction == FLOW_DIRECTION_RX || direction == FLOW_DIRECTION_TX);
384 u32 my_cpu_number = vm->thread_index;
387 flowprobe_record_t flags = fm->context[which].flags;
388 bool collect_ip4 = false, collect_ip6 = false;
390 ethernet_header_t *eth = (direction == FLOW_DIRECTION_TX) ?
391 vlib_buffer_get_current (b) :
392 ethernet_buffer_get_header (b);
393 u16 ethertype = clib_net_to_host_u16 (eth->type);
394 i16 l3_hdr_offset = (u8 *) eth - b->data + sizeof (ethernet_header_t);
395 flowprobe_key_t k = {};
396 ip4_header_t *ip4 = 0;
397 ip6_header_t *ip6 = 0;
398 udp_header_t *udp = 0;
399 tcp_header_t *tcp = 0;
402 if (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4)
404 collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
405 collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
408 k.rx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
409 k.tx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
412 k.direction = direction;
414 if (flags & FLOW_RECORD_L2)
416 clib_memcpy_fast (k.src_mac, eth->src_address, 6);
417 clib_memcpy_fast (k.dst_mac, eth->dst_address, 6);
418 k.ethertype = ethertype;
420 if (ethertype == ETHERNET_TYPE_VLAN)
423 ethernet_vlan_header_tv_t *ethv =
424 (ethernet_vlan_header_tv_t *) (&(eth->type));
425 /*Q in Q possibility */
426 while (clib_net_to_host_u16 (ethv->type) == ETHERNET_TYPE_VLAN)
429 l3_hdr_offset += sizeof (ethernet_vlan_header_tv_t);
431 k.ethertype = ethertype = clib_net_to_host_u16 ((ethv)->type);
433 if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
435 ip6 = (ip6_header_t *) (b->data + l3_hdr_offset);
436 if (flags & FLOW_RECORD_L3)
438 k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
439 k.src_address.as_u64[1] = ip6->src_address.as_u64[1];
440 k.dst_address.as_u64[0] = ip6->dst_address.as_u64[0];
441 k.dst_address.as_u64[1] = ip6->dst_address.as_u64[1];
443 k.protocol = ip6->protocol;
444 if (k.protocol == IP_PROTOCOL_UDP)
445 udp = (udp_header_t *) (ip6 + 1);
446 else if (k.protocol == IP_PROTOCOL_TCP)
447 tcp = (tcp_header_t *) (ip6 + 1);
449 octets = clib_net_to_host_u16 (ip6->payload_length)
450 + sizeof (ip6_header_t);
452 if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
454 ip4 = (ip4_header_t *) (b->data + l3_hdr_offset);
455 if (flags & FLOW_RECORD_L3)
457 k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
458 k.dst_address.ip4.as_u32 = ip4->dst_address.as_u32;
460 k.protocol = ip4->protocol;
461 if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_UDP)
462 udp = (udp_header_t *) (ip4 + 1);
463 else if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_TCP)
464 tcp = (tcp_header_t *) (ip4 + 1);
466 octets = clib_net_to_host_u16 (ip4->length);
471 k.src_port = udp->src_port;
472 k.dst_port = udp->dst_port;
476 k.src_port = tcp->src_port;
477 k.dst_port = tcp->dst_port;
478 tcp_flags = tcp->flags;
483 t->rx_sw_if_index = k.rx_sw_if_index;
484 t->tx_sw_if_index = k.tx_sw_if_index;
485 clib_memcpy_fast (t->src_mac, k.src_mac, 6);
486 clib_memcpy_fast (t->dst_mac, k.dst_mac, 6);
487 t->ethertype = k.ethertype;
488 t->src_address.ip4.as_u32 = k.src_address.ip4.as_u32;
489 t->dst_address.ip4.as_u32 = k.dst_address.ip4.as_u32;
490 t->protocol = k.protocol;
491 t->src_port = k.src_port;
492 t->dst_port = k.dst_port;
496 flowprobe_entry_t *e = 0;
497 f64 now = vlib_time_now (vm);
498 if (fm->active_timer > 0)
501 bool collision = false;
503 e = flowprobe_lookup (my_cpu_number, &k, &poolindex, &collision);
506 /* Flush data and clean up entry for reuse. */
508 flowprobe_export_entry (vm, e);
510 e->flow_start = timestamp;
511 vlib_node_increment_counter (vm, node->node_index,
512 FLOWPROBE_ERROR_COLLISION, 1);
514 if (!e) /* Create new entry */
516 e = flowprobe_create (my_cpu_number, &k, &poolindex);
517 e->last_exported = now;
518 e->flow_start = timestamp;
523 e = &fm->stateless_entry[my_cpu_number];
531 e->octetcount += octets;
532 e->last_updated = now;
533 e->flow_end = timestamp;
534 e->prot.tcp.flags |= tcp_flags;
535 if (fm->active_timer == 0
536 || (now > e->last_exported + fm->active_timer))
537 flowprobe_export_entry (vm, e);
542 flowprobe_get_headersize (void)
544 return sizeof (ip4_header_t) + sizeof (udp_header_t) +
545 sizeof (ipfix_message_header_t) + sizeof (ipfix_set_header_t);
549 flowprobe_export_send (vlib_main_t * vm, vlib_buffer_t * b0,
550 flowprobe_variant_t which)
552 flowprobe_main_t *fm = &flowprobe_main;
553 flow_report_main_t *frm = &flow_report_main;
554 ipfix_exporter_t *exp = pool_elt_at_index (frm->exporters, 0);
556 ip4_ipfix_template_packet_t *tp;
557 ipfix_set_header_t *s;
558 ipfix_message_header_t *h;
561 flowprobe_record_t flags = fm->context[which].flags;
562 u32 my_cpu_number = vm->thread_index;
565 flow_report_stream_t *stream;
567 /* Nothing to send */
568 if (fm->context[which].next_record_offset_per_worker[my_cpu_number] <=
569 flowprobe_get_headersize ())
572 u32 i, index = vec_len (exp->streams);
573 for (i = 0; i < index; i++)
574 if (exp->streams[i].domain_id == 1)
579 if (i == vec_len (exp->streams))
581 vec_validate (exp->streams, index);
582 exp->streams[index].domain_id = 1;
584 stream = &exp->streams[index];
586 tp = vlib_buffer_get_current (b0);
587 ip = (ip4_header_t *) & tp->ip4;
588 udp = (udp_header_t *) (ip + 1);
589 h = (ipfix_message_header_t *) (udp + 1);
590 s = (ipfix_set_header_t *) (h + 1);
592 ip->ip_version_and_header_length = 0x45;
594 ip->protocol = IP_PROTOCOL_UDP;
595 ip->flags_and_fragment_offset = 0;
596 ip->src_address.as_u32 = exp->src_address.ip.ip4.as_u32;
597 ip->dst_address.as_u32 = exp->ipfix_collector.ip.ip4.as_u32;
598 udp->src_port = clib_host_to_net_u16 (stream->src_port);
599 udp->dst_port = clib_host_to_net_u16 (exp->collector_port);
602 /* FIXUP: message header export_time */
604 (u32) (((f64) frm->unix_time_0) + (vlib_time_now (vm) - frm->vlib_time_0));
605 h->export_time = clib_host_to_net_u32 (h->export_time);
606 h->domain_id = clib_host_to_net_u32 (stream->domain_id);
608 /* FIXUP: message header sequence_number */
609 h->sequence_number = stream->sequence_number++;
610 h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
612 s->set_id_length = ipfix_set_id_length (fm->template_reports[flags],
614 (sizeof (*ip) + sizeof (*udp) +
616 h->version_length = version_length (b0->current_length -
617 (sizeof (*ip) + sizeof (*udp)));
619 ip->length = clib_host_to_net_u16 (b0->current_length);
621 ip->checksum = ip4_header_checksum (ip);
622 udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
624 if (exp->udp_checksum)
626 /* RFC 7011 section 10.3.2. */
627 udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
628 if (udp->checksum == 0)
629 udp->checksum = 0xffff;
632 ASSERT (ip4_header_checksum_is_valid (ip));
634 /* Find or allocate a frame */
635 f = fm->context[which].frames_per_worker[my_cpu_number];
636 if (PREDICT_FALSE (f == 0))
639 f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
640 fm->context[which].frames_per_worker[my_cpu_number] = f;
641 u32 bi0 = vlib_get_buffer_index (vm, b0);
643 /* Enqueue the buffer */
644 to_next = vlib_frame_vector_args (f);
649 vlib_put_frame_to_node (vm, ip4_lookup_node.index, f);
650 vlib_node_increment_counter (vm, flowprobe_output_l2_node.index,
651 FLOWPROBE_ERROR_EXPORTED_PACKETS, 1);
653 fm->context[which].frames_per_worker[my_cpu_number] = 0;
654 fm->context[which].buffers_per_worker[my_cpu_number] = 0;
655 fm->context[which].next_record_offset_per_worker[my_cpu_number] =
656 flowprobe_get_headersize ();
659 static vlib_buffer_t *
660 flowprobe_get_buffer (vlib_main_t * vm, flowprobe_variant_t which)
662 flowprobe_main_t *fm = &flowprobe_main;
663 ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
666 u32 my_cpu_number = vm->thread_index;
668 /* Find or allocate a buffer */
669 b0 = fm->context[which].buffers_per_worker[my_cpu_number];
671 /* Need to allocate a buffer? */
672 if (PREDICT_FALSE (b0 == 0))
674 if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
676 vlib_node_increment_counter (vm, flowprobe_output_l2_node.index,
677 FLOWPROBE_ERROR_BUFFER, 1);
681 /* Initialize the buffer */
682 b0 = fm->context[which].buffers_per_worker[my_cpu_number] =
683 vlib_get_buffer (vm, bi0);
685 b0->current_data = 0;
686 b0->current_length = flowprobe_get_headersize ();
688 (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
689 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
690 vnet_buffer (b0)->sw_if_index[VLIB_TX] = exp->fib_index;
691 fm->context[which].next_record_offset_per_worker[my_cpu_number] =
699 flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
701 u32 my_cpu_number = vm->thread_index;
702 flowprobe_main_t *fm = &flowprobe_main;
703 ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
705 bool collect_ip4 = false, collect_ip6 = false;
706 bool collect_l4 = false;
707 flowprobe_variant_t which = e->key.which;
708 flowprobe_record_t flags = fm->context[which].flags;
710 fm->context[which].next_record_offset_per_worker[my_cpu_number];
712 if (offset < flowprobe_get_headersize ())
713 offset = flowprobe_get_headersize ();
715 b0 = flowprobe_get_buffer (vm, which);
716 /* No available buffer, what to do... */
720 if (flags & FLOW_RECORD_L3)
722 collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
723 collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
725 if (flags & FLOW_RECORD_L4)
727 collect_l4 = (which != FLOW_VARIANT_L2);
730 offset += flowprobe_common_add (b0, e, offset);
732 if (flags & FLOW_RECORD_L2)
733 offset += flowprobe_l2_add (b0, e, offset);
735 offset += flowprobe_l3_ip6_add (b0, e, offset);
737 offset += flowprobe_l3_ip4_add (b0, e, offset);
739 offset += flowprobe_l4_add (b0, e, offset);
741 /* Reset per flow-export counters */
744 e->last_exported = vlib_time_now (vm);
745 e->prot.tcp.flags = 0;
747 b0->current_length = offset;
749 fm->context[which].next_record_offset_per_worker[my_cpu_number] = offset;
750 /* Time to flush the buffer? */
751 if (offset + fm->template_size[flags] > exp->path_mtu)
752 flowprobe_export_send (vm, b0, which);
756 flowprobe_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
757 vlib_frame_t *frame, flowprobe_variant_t which,
758 flowprobe_direction_t direction)
760 u32 n_left_from, *from, *to_next;
761 flowprobe_next_t next_index;
762 flowprobe_main_t *fm = &flowprobe_main;
763 timestamp_nsec_t timestamp;
765 unix_time_now_nsec_fraction (×tamp.sec, ×tamp.nsec);
767 from = vlib_frame_vector_args (frame);
768 n_left_from = frame->n_vectors;
769 next_index = node->cached_next_index;
771 while (n_left_from > 0)
775 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
777 while (n_left_from >= 4 && n_left_to_next >= 2)
779 u32 next0 = FLOWPROBE_NEXT_DROP;
780 u32 next1 = FLOWPROBE_NEXT_DROP;
783 vlib_buffer_t *b0, *b1;
785 /* Prefetch next iteration. */
787 vlib_buffer_t *p2, *p3;
789 p2 = vlib_get_buffer (vm, from[2]);
790 p3 = vlib_get_buffer (vm, from[3]);
792 vlib_prefetch_buffer_header (p2, LOAD);
793 vlib_prefetch_buffer_header (p3, LOAD);
795 clib_prefetch_store (p2->data);
796 clib_prefetch_store (p3->data);
799 /* speculatively enqueue b0 and b1 to the current next frame */
800 to_next[0] = bi0 = from[0];
801 to_next[1] = bi1 = from[1];
807 b0 = vlib_get_buffer (vm, bi0);
808 b1 = vlib_get_buffer (vm, bi1);
810 vnet_feature_next (&next0, b0);
811 vnet_feature_next (&next1, b1);
813 len0 = vlib_buffer_length_in_chain (vm, b0);
814 ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
815 u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
817 if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
818 add_to_flow_record_state (
819 vm, node, fm, b0, timestamp, len0,
820 flowprobe_get_variant (which, fm->context[which].flags,
824 len1 = vlib_buffer_length_in_chain (vm, b1);
825 ethernet_header_t *eh1 = vlib_buffer_get_current (b1);
826 u16 ethertype1 = clib_net_to_host_u16 (eh1->type);
828 if (PREDICT_TRUE ((b1->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
829 add_to_flow_record_state (
830 vm, node, fm, b1, timestamp, len1,
831 flowprobe_get_variant (which, fm->context[which].flags,
835 /* verify speculative enqueues, maybe switch current next frame */
836 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
837 to_next, n_left_to_next,
838 bi0, bi1, next0, next1);
841 while (n_left_from > 0 && n_left_to_next > 0)
845 u32 next0 = FLOWPROBE_NEXT_DROP;
848 /* speculatively enqueue b0 to the current next frame */
856 b0 = vlib_get_buffer (vm, bi0);
858 vnet_feature_next (&next0, b0);
860 len0 = vlib_buffer_length_in_chain (vm, b0);
861 ethernet_header_t *eh0 = vlib_buffer_get_current (b0);
862 u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
864 if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
866 flowprobe_trace_t *t = 0;
867 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
868 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
869 t = vlib_add_trace (vm, node, b0, sizeof (*t));
871 add_to_flow_record_state (
872 vm, node, fm, b0, timestamp, len0,
873 flowprobe_get_variant (which, fm->context[which].flags,
878 /* verify speculative enqueue, maybe switch current next frame */
879 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
880 to_next, n_left_to_next,
884 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
886 return frame->n_vectors;
890 flowprobe_input_ip4_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
893 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4,
898 flowprobe_input_ip6_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
901 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6,
906 flowprobe_input_l2_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
909 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2,
914 flowprobe_output_ip4_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
917 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4,
922 flowprobe_output_ip6_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
925 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6,
930 flowprobe_output_l2_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
933 return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2,
938 flush_record (flowprobe_variant_t which)
940 vlib_main_t *vm = vlib_get_main ();
941 vlib_buffer_t *b = flowprobe_get_buffer (vm, which);
943 flowprobe_export_send (vm, b, which);
947 flowprobe_flush_callback_ip4 (void)
949 vlib_main_t *worker_vm;
952 /* Flush for each worker thread */
953 for (i = 1; i < vlib_get_n_threads (); i++)
955 worker_vm = vlib_get_main_by_index (i);
957 vlib_node_set_interrupt_pending (worker_vm,
958 flowprobe_flush_ip4_node.index);
961 /* Flush for the main thread */
962 flush_record (FLOW_VARIANT_IP4);
966 flowprobe_flush_callback_ip6 (void)
968 vlib_main_t *worker_vm;
971 /* Flush for each worker thread */
972 for (i = 1; i < vlib_get_n_threads (); i++)
974 worker_vm = vlib_get_main_by_index (i);
976 vlib_node_set_interrupt_pending (worker_vm,
977 flowprobe_flush_ip6_node.index);
980 /* Flush for the main thread */
981 flush_record (FLOW_VARIANT_IP6);
985 flowprobe_flush_callback_l2 (void)
987 vlib_main_t *worker_vm;
990 /* Flush for each worker thread */
991 for (i = 1; i < vlib_get_n_threads (); i++)
993 worker_vm = vlib_get_main_by_index (i);
995 vlib_node_set_interrupt_pending (worker_vm,
996 flowprobe_flush_l2_node.index);
999 /* Flush for the main thread */
1000 flush_record (FLOW_VARIANT_L2);
1001 flush_record (FLOW_VARIANT_L2_IP4);
1002 flush_record (FLOW_VARIANT_L2_IP6);
1006 flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex)
1008 flowprobe_main_t *fm = &flowprobe_main;
1009 flowprobe_entry_t *e;
1012 e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], poolindex);
1015 h = flowprobe_hash (&e->key);
1018 fm->hash_per_worker[my_cpu_number][h] = ~0;
1020 pool_put_index (fm->pool_per_worker[my_cpu_number], poolindex);
1024 /* Per worker process processing the active/passive expired entries */
1026 flowprobe_walker_process (vlib_main_t * vm,
1027 vlib_node_runtime_t * rt, vlib_frame_t * f)
1029 flowprobe_main_t *fm = &flowprobe_main;
1030 flowprobe_entry_t *e;
1031 ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
1034 * $$$$ Remove this check from here and track FRM status and disable
1035 * this process if required.
1037 if (ip_address_is_zero (&exp->ipfix_collector) ||
1038 ip_address_is_zero (&exp->src_address))
1040 fm->disabled = true;
1043 fm->disabled = false;
1045 u32 cpu_index = os_get_thread_index ();
1046 u32 *to_be_removed = 0, *i;
1049 * Tick the timer when required and process the vector of expired
1052 f64 start_time = vlib_time_now (vm);
1055 tw_timer_expire_timers_2t_1w_2048sl (fm->timers_per_worker[cpu_index],
1058 vec_foreach (i, fm->expired_passive_per_worker[cpu_index])
1061 f64 now = vlib_time_now (vm);
1062 if (now > start_time + 100e-6
1063 || exported > FLOW_MAXIMUM_EXPORT_ENTRIES - 1)
1066 if (pool_is_free_index (fm->pool_per_worker[cpu_index], *i))
1068 clib_warning ("Element is %d is freed already\n", *i);
1072 e = pool_elt_at_index (fm->pool_per_worker[cpu_index], *i);
1074 /* Check last update timestamp. If it is longer than passive time nuke
1075 * entry. Otherwise restart timer with what's left
1076 * Premature passive timer by more than 10%
1078 if ((now - e->last_updated) < (u64) (fm->passive_timer * 0.9))
1080 u64 delta = fm->passive_timer - (now - e->last_updated);
1081 e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
1082 (fm->timers_per_worker[cpu_index], *i, 0, delta);
1084 else /* Nuke entry */
1086 vec_add1 (to_be_removed, *i);
1088 /* If anything to report send it to the exporter */
1089 if (e->packetcount && now > e->last_exported + fm->active_timer)
1092 flowprobe_export_entry (vm, e);
1097 vec_delete (fm->expired_passive_per_worker[cpu_index], count, 0);
1099 vec_foreach (i, to_be_removed) flowprobe_delete_by_index (cpu_index, *i);
1100 vec_free (to_be_removed);
1106 flowprobe_flush_ip4 (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
1108 flush_record (FLOW_VARIANT_IP4);
1114 flowprobe_flush_ip6 (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
1116 flush_record (FLOW_VARIANT_IP6);
1122 flowprobe_flush_l2 (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
1124 flush_record (FLOW_VARIANT_L2);
1125 flush_record (FLOW_VARIANT_L2_IP4);
1126 flush_record (FLOW_VARIANT_L2_IP6);
1131 VLIB_REGISTER_NODE (flowprobe_input_ip4_node) = {
1132 .function = flowprobe_input_ip4_node_fn,
1133 .name = "flowprobe-input-ip4",
1134 .vector_size = sizeof (u32),
1135 .format_trace = format_flowprobe_trace,
1136 .type = VLIB_NODE_TYPE_INTERNAL,
1137 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1138 .error_strings = flowprobe_error_strings,
1139 .n_next_nodes = FLOWPROBE_N_NEXT,
1140 .next_nodes = FLOWPROBE_NEXT_NODES,
1142 VLIB_REGISTER_NODE (flowprobe_input_ip6_node) = {
1143 .function = flowprobe_input_ip6_node_fn,
1144 .name = "flowprobe-input-ip6",
1145 .vector_size = sizeof (u32),
1146 .format_trace = format_flowprobe_trace,
1147 .type = VLIB_NODE_TYPE_INTERNAL,
1148 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1149 .error_strings = flowprobe_error_strings,
1150 .n_next_nodes = FLOWPROBE_N_NEXT,
1151 .next_nodes = FLOWPROBE_NEXT_NODES,
1153 VLIB_REGISTER_NODE (flowprobe_input_l2_node) = {
1154 .function = flowprobe_input_l2_node_fn,
1155 .name = "flowprobe-input-l2",
1156 .vector_size = sizeof (u32),
1157 .format_trace = format_flowprobe_trace,
1158 .type = VLIB_NODE_TYPE_INTERNAL,
1159 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1160 .error_strings = flowprobe_error_strings,
1161 .n_next_nodes = FLOWPROBE_N_NEXT,
1162 .next_nodes = FLOWPROBE_NEXT_NODES,
1164 VLIB_REGISTER_NODE (flowprobe_output_ip4_node) = {
1165 .function = flowprobe_output_ip4_node_fn,
1166 .name = "flowprobe-output-ip4",
1167 .vector_size = sizeof (u32),
1168 .format_trace = format_flowprobe_trace,
1169 .type = VLIB_NODE_TYPE_INTERNAL,
1170 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1171 .error_strings = flowprobe_error_strings,
1172 .n_next_nodes = FLOWPROBE_N_NEXT,
1173 .next_nodes = FLOWPROBE_NEXT_NODES,
1175 VLIB_REGISTER_NODE (flowprobe_output_ip6_node) = {
1176 .function = flowprobe_output_ip6_node_fn,
1177 .name = "flowprobe-output-ip6",
1178 .vector_size = sizeof (u32),
1179 .format_trace = format_flowprobe_trace,
1180 .type = VLIB_NODE_TYPE_INTERNAL,
1181 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1182 .error_strings = flowprobe_error_strings,
1183 .n_next_nodes = FLOWPROBE_N_NEXT,
1184 .next_nodes = FLOWPROBE_NEXT_NODES,
1186 VLIB_REGISTER_NODE (flowprobe_output_l2_node) = {
1187 .function = flowprobe_output_l2_node_fn,
1188 .name = "flowprobe-output-l2",
1189 .vector_size = sizeof (u32),
1190 .format_trace = format_flowprobe_trace,
1191 .type = VLIB_NODE_TYPE_INTERNAL,
1192 .n_errors = ARRAY_LEN (flowprobe_error_strings),
1193 .error_strings = flowprobe_error_strings,
1194 .n_next_nodes = FLOWPROBE_N_NEXT,
1195 .next_nodes = FLOWPROBE_NEXT_NODES,
1197 VLIB_REGISTER_NODE (flowprobe_walker_node) = {
1198 .function = flowprobe_walker_process,
1199 .name = "flowprobe-walker",
1200 .type = VLIB_NODE_TYPE_INPUT,
1201 .state = VLIB_NODE_STATE_INTERRUPT,
1203 VLIB_REGISTER_NODE (flowprobe_flush_ip4_node) = {
1204 .function = flowprobe_flush_ip4,
1205 .name = "flowprobe-flush-ip4",
1206 .type = VLIB_NODE_TYPE_INPUT,
1207 .state = VLIB_NODE_STATE_INTERRUPT,
1209 VLIB_REGISTER_NODE (flowprobe_flush_ip6_node) = {
1210 .function = flowprobe_flush_ip6,
1211 .name = "flowprobe-flush-ip6",
1212 .type = VLIB_NODE_TYPE_INPUT,
1213 .state = VLIB_NODE_STATE_INTERRUPT,
1215 VLIB_REGISTER_NODE (flowprobe_flush_l2_node) = {
1216 .function = flowprobe_flush_l2,
1217 .name = "flowprobe-flush-l2",
1218 .type = VLIB_NODE_TYPE_INPUT,
1219 .state = VLIB_NODE_STATE_INTERRUPT,
1223 * fd.io coding-style-patch-verification: ON
1226 * eval: (c-set-style "gnu")