2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_ioam_export_h__
16 #define __included_ioam_export_h__
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/ip/ip6_hop_by_hop.h>
24 #include <vnet/udp/udp_local.h>
25 #include <vnet/udp/udp_packet.h>
26 #include <vnet/ipfix-export/ipfix_packet.h>
28 #include <vppinfra/pool.h>
29 #include <vppinfra/hash.h>
30 #include <vppinfra/error.h>
31 #include <vppinfra/elog.h>
32 #include <vppinfra/lock.h>
34 #include <vlib/threads.h>
36 typedef struct ioam_export_buffer
38 /** Required for pool_get_aligned */
39 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
40 /* Allocated buffer */
43 u8 records_in_this_buffer;
44 } ioam_export_buffer_t;
49 /* API message ID base */
53 /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
58 /* ipfix collector, our ip address */
59 ip4_address_t ipfix_collector;
60 ip4_address_t src_address;
62 /* Pool of ioam_export_buffer_t */
63 ioam_export_buffer_t *buffer_pool;
64 /* Vector of per thread ioam_export_buffer_t to buffer pool index */
65 u32 *buffer_per_thread;
66 /* Lock per thread to swap buffers between worker and timer process */
67 clib_spinlock_t *lockp;
69 /* time scale transform */
74 vlib_main_t *vlib_main;
75 vnet_main_t *vnet_main;
76 ethernet_main_t *ethernet_main;
80 u32 export_process_node_index;
84 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
86 * Number of records in a buffer
87 * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
89 #define DEFAULT_EXPORT_RECORDS 7
92 ioam_export_set_next_node (ioam_export_main_t * em, u8 * next_node_name)
94 vlib_node_t *next_node;
96 next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
97 em->next_node_index = next_node->index;
101 ioam_export_reset_next_node (ioam_export_main_t * em)
103 vlib_node_t *next_node;
105 next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
106 em->next_node_index = next_node->index;
109 always_inline ioam_export_buffer_t *
110 ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
113 if (vec_len (em->buffer_per_thread) > thread_id)
114 return (pool_elt_at_index
115 (em->buffer_pool, em->buffer_per_thread[thread_id]));
120 ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
122 clib_memcpy_fast (b0->data, em->record_header, vec_len (em->record_header));
123 b0->current_data = 0;
124 b0->current_length = vec_len (em->record_header);
125 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
130 ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
131 ioam_export_buffer_t * eb)
133 vlib_buffer_t *b = 0;
137 /* TODO: Perhaps buffer init from template here */
138 if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
140 eb->records_in_this_buffer = 0;
141 eb->touched_at = vlib_time_now (vm);
142 b = vlib_get_buffer (vm, eb->buffer_index);
143 (void) ioam_export_buffer_add_header (em, b);
144 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
145 vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
150 ioam_export_thread_buffer_free (ioam_export_main_t * em)
152 vlib_main_t *vm = em->vlib_main;
153 ioam_export_buffer_t *eb = 0;
155 for (i = 0; i < vec_len (em->buffer_per_thread); i++)
157 eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
159 vlib_buffer_free (vm, &(eb->buffer_index), 1);
161 for (i = 0; i < vec_len (em->lockp); i++)
162 clib_mem_free ((void *) em->lockp[i]);
163 vec_free (em->buffer_per_thread);
164 pool_free (em->buffer_pool);
165 vec_free (em->lockp);
166 em->buffer_per_thread = 0;
172 ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
174 int no_of_threads = vec_len (vlib_worker_threads);
176 ioam_export_buffer_t *eb = 0;
178 pool_alloc_aligned (em->buffer_pool,
179 no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
180 vec_validate_aligned (em->buffer_per_thread,
181 no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
182 vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
184 if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
188 for (i = 0; i < no_of_threads; i++)
191 pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
192 clib_memset (eb, 0, sizeof (*eb));
193 em->buffer_per_thread[i] = eb - em->buffer_pool;
194 if (ioam_export_init_buffer (em, vm, eb) != 1)
196 ioam_export_thread_buffer_free (em);
199 clib_spinlock_init (&em->lockp[i]);
204 #define IPFIX_IOAM_EXPORT_ID 272
205 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
207 /* Used to build the rewrite */
208 /* data set packet */
211 ipfix_message_header_t h;
212 ipfix_set_header_t s;
213 } ipfix_data_packet_t;
219 ipfix_data_packet_t ipfix;
220 } ip4_ipfix_data_packet_t;
224 ioam_export_header_cleanup (ioam_export_main_t * em,
225 ip4_address_t * collector_address,
226 ip4_address_t * src_address)
228 vec_free (em->record_header);
229 em->record_header = 0;
233 ioam_export_header_create (ioam_export_main_t * em,
234 ip4_address_t * collector_address,
235 ip4_address_t * src_address)
239 ipfix_message_header_t *h;
240 ipfix_set_header_t *s;
242 ip4_ipfix_data_packet_t *tp;
245 /* allocate rewrite space */
246 vec_validate_aligned (rewrite,
247 sizeof (ip4_ipfix_data_packet_t) - 1,
248 CLIB_CACHE_LINE_BYTES);
250 tp = (ip4_ipfix_data_packet_t *) rewrite;
251 ip = (ip4_header_t *) & tp->ip4;
252 udp = (udp_header_t *) (ip + 1);
253 h = (ipfix_message_header_t *) (udp + 1);
254 s = (ipfix_set_header_t *) (h + 1);
256 ip->ip_version_and_header_length = 0x45;
258 ip->protocol = IP_PROTOCOL_UDP;
259 ip->src_address.as_u32 = src_address->as_u32;
260 ip->dst_address.as_u32 = collector_address->as_u32;
261 udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262 udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
263 /* FIXUP: UDP length */
264 udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
265 (DEFAULT_EXPORT_RECORDS *
266 DEFAULT_EXPORT_SIZE) - sizeof (*ip));
268 /* FIXUP: message header export_time */
269 /* FIXUP: message header sequence_number */
270 h->domain_id = clib_host_to_net_u32 (em->domain_id);
272 /*FIXUP: Setid length in octets if records exported are not default */
273 s->set_id_length = ipfix_set_id_length (em->set_id,
275 (DEFAULT_EXPORT_RECORDS *
276 DEFAULT_EXPORT_SIZE)));
278 /* FIXUP: h version and length length in octets if records exported are not default */
279 h->version_length = version_length (sizeof (*h) +
281 (DEFAULT_EXPORT_RECORDS *
282 DEFAULT_EXPORT_SIZE)));
284 /* FIXUP: ip length if records exported are not default */
285 /* FIXUP: ip checksum if records exported are not default */
286 ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
287 (DEFAULT_EXPORT_RECORDS *
288 DEFAULT_EXPORT_SIZE));
289 ip->checksum = ip4_header_checksum (ip);
290 _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
291 em->record_header = rewrite;
296 ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
297 ioam_export_buffer_t * eb)
301 ipfix_message_header_t *h;
302 ipfix_set_header_t *s;
303 ip4_ipfix_data_packet_t *tp;
307 vlib_frame_t *nf = 0;
310 b0 = vlib_get_buffer (vm, eb->buffer_index);
311 tp = vlib_buffer_get_current (b0);
312 ip = (ip4_header_t *) & tp->ip4;
313 udp = (udp_header_t *) (ip + 1);
314 h = (ipfix_message_header_t *) (udp + 1);
315 s = (ipfix_set_header_t *) (h + 1);
317 /* FIXUP: message header export_time */
318 h->export_time = clib_host_to_net_u32 ((u32)
319 (((f64) em->unix_time_0) +
320 (vlib_time_now (em->vlib_main) -
323 /* FIXUP: message header sequence_number */
324 h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
326 /* FIXUP: lengths if different from default */
327 if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
329 s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
331 (sizeof (*ip) + sizeof (*udp) +
334 version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
337 new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
338 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
339 length /* changed member */ );
340 ip->checksum = ip_csum_fold (sum0);
342 udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
345 /* Enqueue pkts to ip4-lookup */
347 nf = vlib_get_frame_to_node (vm, em->next_node_index);
349 to_next = vlib_frame_vector_args (nf);
351 to_next[0] = eb->buffer_index;
352 vlib_put_frame_to_node (vm, em->next_node_index, nf);
357 #define EXPORT_TIMEOUT (20.0)
358 #define THREAD_PERIOD (30.0)
360 ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
361 vlib_node_runtime_t * rt, vlib_frame_t * f,
367 uword *event_data = 0;
369 ioam_export_buffer_t *eb = 0, *new_eb = 0;
370 u32 *vec_buffer_indices = 0;
371 u32 *vec_buffer_to_be_sent = 0;
372 u32 *thread_index = 0;
373 u32 new_pool_index = 0;
375 em->export_process_node_index = index;
376 /* Wait for Godot... */
377 vlib_process_wait_for_event_or_clock (vm, 1e9);
378 event_type = vlib_process_get_events (vm, &event_data);
380 clib_warning ("bogus kickoff event received, %d", event_type);
381 vec_reset_length (event_data);
385 vlib_process_wait_for_event_or_clock (vm, timeout);
386 event_type = vlib_process_get_events (vm, &event_data);
389 case 2: /* Stop and Wait for kickoff again */
392 case 1: /* kickoff : Check for unsent buffers */
393 timeout = THREAD_PERIOD;
395 case ~0: /* timeout */
398 vec_reset_length (event_data);
399 now = vlib_time_now (vm);
401 * Create buffers for threads that are not active enough
402 * to send out the export records
404 for (i = 0; i < vec_len (em->buffer_per_thread); i++)
406 /* If the worker thread is processing export records ignore further checks */
407 if (CLIB_SPINLOCK_IS_LOCKED (&em->lockp[i]))
409 eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
410 if (eb->records_in_this_buffer > 0
411 && now > (eb->touched_at + EXPORT_TIMEOUT))
413 pool_get_aligned (em->buffer_pool, new_eb,
414 CLIB_CACHE_LINE_BYTES);
415 clib_memset (new_eb, 0, sizeof (*new_eb));
416 if (ioam_export_init_buffer (em, vm, new_eb) == 1)
418 new_pool_index = new_eb - em->buffer_pool;
419 vec_add (vec_buffer_indices, &new_pool_index, 1);
420 vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
422 vec_add (thread_index, &i, 1);
426 pool_put (em->buffer_pool, new_eb);
432 if (vec_len (thread_index) != 0)
435 * Now swap the buffers out
437 for (i = 0; i < vec_len (thread_index); i++)
439 clib_spinlock_lock (&em->lockp[thread_index[i]]);
440 em->buffer_per_thread[thread_index[i]] =
441 vec_pop (vec_buffer_indices);
442 clib_spinlock_unlock (&em->lockp[thread_index[i]]);
445 /* Send the buffers */
446 for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
449 pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
450 ioam_export_send_buffer (em, vm, eb);
451 pool_put (em->buffer_pool, eb);
456 /* Free any leftover/unused buffers and everything that was allocated */
457 for (i = 0; i < vec_len (vec_buffer_indices); i++)
459 new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
460 vlib_buffer_free (vm, &new_eb->buffer_index, 1);
461 pool_put (em->buffer_pool, new_eb);
463 vec_free (vec_buffer_indices);
464 vec_free (vec_buffer_to_be_sent);
465 vec_free (thread_index);
467 return 0; /* not so much */
470 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC) \
472 u32 n_left_from, *from, *to_next; \
473 export_next_t next_index; \
474 u32 pkts_recorded = 0; \
475 ioam_export_buffer_t *my_buf = 0; \
476 vlib_buffer_t *eb0 = 0; \
478 from = vlib_frame_vector_args (F); \
479 n_left_from = (F)->n_vectors; \
480 next_index = (N)->cached_next_index; \
481 clib_spinlock_lock (&(EM)->lockp[(VM)->thread_index]); \
482 my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \
483 my_buf->touched_at = vlib_time_now (VM); \
484 while (n_left_from > 0) \
486 u32 n_left_to_next; \
487 vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next); \
488 while (n_left_from >= 4 && n_left_to_next >= 2) \
494 vlib_buffer_t *p0, *p1; \
495 u32 ip_len0, ip_len1; \
497 vlib_buffer_t *p2, *p3; \
498 p2 = vlib_get_buffer (VM, from[2]); \
499 p3 = vlib_get_buffer (VM, from[3]); \
500 vlib_prefetch_buffer_header (p2, LOAD); \
501 vlib_prefetch_buffer_header (p3, LOAD); \
502 CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
503 CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
505 to_next[0] = bi0 = from[0]; \
506 to_next[1] = bi1 = from[1]; \
510 n_left_to_next -= 2; \
511 p0 = vlib_get_buffer (VM, bi0); \
512 p1 = vlib_get_buffer (VM, bi1); \
513 ip0 = vlib_buffer_get_current (p0); \
514 ip1 = vlib_buffer_get_current (p1); \
516 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
518 clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE); \
519 ebi0 = my_buf->buffer_index; \
520 eb0 = vlib_get_buffer (VM, ebi0); \
521 if (PREDICT_FALSE (eb0 == 0)) \
524 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
526 ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1; \
527 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
528 FIXUP_FUNC(eb0, p0); \
529 eb0->current_length += DEFAULT_EXPORT_SIZE; \
530 my_buf->records_in_this_buffer++; \
531 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
533 ioam_export_send_buffer (EM, VM, my_buf); \
534 ioam_export_init_buffer (EM, VM, my_buf); \
536 ebi0 = my_buf->buffer_index; \
537 eb0 = vlib_get_buffer (VM, ebi0); \
538 if (PREDICT_FALSE (eb0 == 0)) \
540 copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1); \
541 FIXUP_FUNC(eb0, p1); \
542 eb0->current_length += DEFAULT_EXPORT_SIZE; \
543 my_buf->records_in_this_buffer++; \
544 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
546 ioam_export_send_buffer (EM, VM, my_buf); \
547 ioam_export_init_buffer (EM, VM, my_buf); \
549 pkts_recorded += 2; \
550 if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE))) \
552 if (p0->flags & VLIB_BUFFER_IS_TRACED) \
554 export_trace_t *t = \
555 vlib_add_trace (VM, node, p0, sizeof (*t)); \
557 clib_net_to_host_u32 (ip0->V); \
558 t->next_index = next0; \
560 if (p1->flags & VLIB_BUFFER_IS_TRACED) \
562 export_trace_t *t = \
563 vlib_add_trace (VM, N, p1, sizeof (*t)); \
565 clib_net_to_host_u32 (ip1->V); \
566 t->next_index = next1; \
570 vlib_validate_buffer_enqueue_x2 (VM, N, next_index, \
571 to_next, n_left_to_next, \
572 bi0, bi1, next0, next1); \
574 while (n_left_from > 0 && n_left_to_next > 0) \
586 n_left_to_next -= 1; \
587 p0 = vlib_get_buffer (VM, bi0); \
588 ip0 = vlib_buffer_get_current (p0); \
590 clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
591 ebi0 = my_buf->buffer_index; \
592 eb0 = vlib_get_buffer (VM, ebi0); \
593 if (PREDICT_FALSE (eb0 == 0)) \
596 ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
597 copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
598 FIXUP_FUNC(eb0, p0); \
599 eb0->current_length += DEFAULT_EXPORT_SIZE; \
600 my_buf->records_in_this_buffer++; \
601 if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
603 ioam_export_send_buffer (EM, VM, my_buf); \
604 ioam_export_init_buffer (EM, VM, my_buf); \
606 if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE) \
607 && (p0->flags & VLIB_BUFFER_IS_TRACED))) \
609 export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t)); \
611 clib_net_to_host_u32 (ip0->V); \
612 t->next_index = next0; \
614 pkts_recorded += 1; \
616 vlib_validate_buffer_enqueue_x1 (VM, N, next_index, \
617 to_next, n_left_to_next, \
620 vlib_put_next_frame (VM, N, next_index, n_left_to_next); \
622 vlib_node_increment_counter (VM, export_node.index, \
623 EXPORT_ERROR_RECORDED, pkts_recorded); \
624 clib_spinlock_unlock (&(EM)->lockp[(VM)->thread_index]); \
627 #endif /* __included_ioam_export_h__ */
630 * fd.io coding-style-patch-verification: ON
633 * eval: (c-set-style "gnu")