misc: minimize dependencies on udp.h
[vpp.git] / src / plugins / ioam / export-common / ioam_export.h
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_ioam_export_h__
16 #define __included_ioam_export_h__
17
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/ip/ip6_hop_by_hop.h>
24 #include <vnet/udp/udp_local.h>
25 #include <vnet/udp/udp_packet.h>
26 #include <vnet/ipfix-export/ipfix_packet.h>
27
28 #include <vppinfra/pool.h>
29 #include <vppinfra/hash.h>
30 #include <vppinfra/error.h>
31 #include <vppinfra/elog.h>
32 #include <vppinfra/lock.h>
33
34 #include <vlib/threads.h>
35
36 typedef struct ioam_export_buffer
37 {
38   /** Required for pool_get_aligned */
39   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
40   /* Allocated buffer */
41   u32 buffer_index;
42   u64 touched_at;
43   u8 records_in_this_buffer;
44 } ioam_export_buffer_t;
45
46
47 typedef struct
48 {
49   /* API message ID base */
50   u16 msg_id_base;
51   u16 set_id;
52
53   /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
54   u8 *record_header;
55   u32 sequence_number;
56   u32 domain_id;
57
58   /* ipfix collector, our ip address */
59   ip4_address_t ipfix_collector;
60   ip4_address_t src_address;
61
62   /* Pool of ioam_export_buffer_t */
63   ioam_export_buffer_t *buffer_pool;
64   /* Vector of per thread ioam_export_buffer_t to buffer pool index */
65   u32 *buffer_per_thread;
66   /* Lock per thread to swap buffers between worker and timer process */
67   clib_spinlock_t *lockp;
68
69   /* time scale transform */
70   u32 unix_time_0;
71   f64 vlib_time_0;
72
73   /* convenience */
74   vlib_main_t *vlib_main;
75   vnet_main_t *vnet_main;
76   ethernet_main_t *ethernet_main;
77   u32 next_node_index;
78
79   uword my_hbh_slot;
80   u32 export_process_node_index;
81 } ioam_export_main_t;
82
83
84 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
85 /*
86  *  Number of records in a buffer
87  * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
88  */
89 #define DEFAULT_EXPORT_RECORDS 7
90
91 inline static void
92 ioam_export_set_next_node (ioam_export_main_t * em, u8 * next_node_name)
93 {
94   vlib_node_t *next_node;
95
96   next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
97   em->next_node_index = next_node->index;
98 }
99
100 inline static void
101 ioam_export_reset_next_node (ioam_export_main_t * em)
102 {
103   vlib_node_t *next_node;
104
105   next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
106   em->next_node_index = next_node->index;
107 }
108
109 always_inline ioam_export_buffer_t *
110 ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
111 {
112
113   if (vec_len (em->buffer_per_thread) > thread_id)
114     return (pool_elt_at_index
115             (em->buffer_pool, em->buffer_per_thread[thread_id]));
116   return (0);
117 }
118
119 inline static int
120 ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
121 {
122   clib_memcpy_fast (b0->data, em->record_header, vec_len (em->record_header));
123   b0->current_data = 0;
124   b0->current_length = vec_len (em->record_header);
125   b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
126   return (1);
127 }
128
129 inline static int
130 ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
131                          ioam_export_buffer_t * eb)
132 {
133   vlib_buffer_t *b = 0;
134
135   if (!eb)
136     return (-1);
137   /* TODO: Perhaps buffer init from template here */
138   if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
139     return (-2);
140   eb->records_in_this_buffer = 0;
141   eb->touched_at = vlib_time_now (vm);
142   b = vlib_get_buffer (vm, eb->buffer_index);
143   (void) ioam_export_buffer_add_header (em, b);
144   vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
145   vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
146   return (1);
147 }
148
149 inline static void
150 ioam_export_thread_buffer_free (ioam_export_main_t * em)
151 {
152   vlib_main_t *vm = em->vlib_main;
153   ioam_export_buffer_t *eb = 0;
154   int i;
155   for (i = 0; i < vec_len (em->buffer_per_thread); i++)
156     {
157       eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
158       if (eb)
159         vlib_buffer_free (vm, &(eb->buffer_index), 1);
160     }
161   for (i = 0; i < vec_len (em->lockp); i++)
162     clib_mem_free ((void *) em->lockp[i]);
163   vec_free (em->buffer_per_thread);
164   pool_free (em->buffer_pool);
165   vec_free (em->lockp);
166   em->buffer_per_thread = 0;
167   em->buffer_pool = 0;
168   em->lockp = 0;
169 }
170
171 inline static int
172 ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
173 {
174   int no_of_threads = vec_len (vlib_worker_threads);
175   int i;
176   ioam_export_buffer_t *eb = 0;
177
178   pool_alloc_aligned (em->buffer_pool,
179                       no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
180   vec_validate_aligned (em->buffer_per_thread,
181                         no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
182   vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
183
184   if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
185     {
186       return (-1);
187     }
188   for (i = 0; i < no_of_threads; i++)
189     {
190       eb = 0;
191       pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
192       clib_memset (eb, 0, sizeof (*eb));
193       em->buffer_per_thread[i] = eb - em->buffer_pool;
194       if (ioam_export_init_buffer (em, vm, eb) != 1)
195         {
196           ioam_export_thread_buffer_free (em);
197           return (-2);
198         }
199       clib_spinlock_init (&em->lockp[i]);
200     }
201   return (1);
202 }
203
204 #define IPFIX_IOAM_EXPORT_ID 272
205 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
206
207 /* Used to build the rewrite */
208 /* data set packet */
209 typedef struct
210 {
211   ipfix_message_header_t h;
212   ipfix_set_header_t s;
213 } ipfix_data_packet_t;
214
215 typedef struct
216 {
217   ip4_header_t ip4;
218   udp_header_t udp;
219   ipfix_data_packet_t ipfix;
220 } ip4_ipfix_data_packet_t;
221
222
223 inline static void
224 ioam_export_header_cleanup (ioam_export_main_t * em,
225                             ip4_address_t * collector_address,
226                             ip4_address_t * src_address)
227 {
228   vec_free (em->record_header);
229   em->record_header = 0;
230 }
231
232 inline static int
233 ioam_export_header_create (ioam_export_main_t * em,
234                            ip4_address_t * collector_address,
235                            ip4_address_t * src_address)
236 {
237   ip4_header_t *ip;
238   udp_header_t *udp;
239   ipfix_message_header_t *h;
240   ipfix_set_header_t *s;
241   u8 *rewrite = 0;
242   ip4_ipfix_data_packet_t *tp;
243
244
245   /* allocate rewrite space */
246   vec_validate_aligned (rewrite,
247                         sizeof (ip4_ipfix_data_packet_t) - 1,
248                         CLIB_CACHE_LINE_BYTES);
249
250   tp = (ip4_ipfix_data_packet_t *) rewrite;
251   ip = (ip4_header_t *) & tp->ip4;
252   udp = (udp_header_t *) (ip + 1);
253   h = (ipfix_message_header_t *) (udp + 1);
254   s = (ipfix_set_header_t *) (h + 1);
255
256   ip->ip_version_and_header_length = 0x45;
257   ip->ttl = 254;
258   ip->protocol = IP_PROTOCOL_UDP;
259   ip->src_address.as_u32 = src_address->as_u32;
260   ip->dst_address.as_u32 = collector_address->as_u32;
261   udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262   udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
263   /* FIXUP: UDP length */
264   udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
265                                       (DEFAULT_EXPORT_RECORDS *
266                                        DEFAULT_EXPORT_SIZE) - sizeof (*ip));
267
268   /* FIXUP: message header export_time */
269   /* FIXUP: message header sequence_number */
270   h->domain_id = clib_host_to_net_u32 (em->domain_id);
271
272   /*FIXUP: Setid length in octets if records exported are not default */
273   s->set_id_length = ipfix_set_id_length (em->set_id,
274                                           (sizeof (*s) +
275                                            (DEFAULT_EXPORT_RECORDS *
276                                             DEFAULT_EXPORT_SIZE)));
277
278   /* FIXUP: h version and length length in octets if records exported are not default */
279   h->version_length = version_length (sizeof (*h) +
280                                       (sizeof (*s) +
281                                        (DEFAULT_EXPORT_RECORDS *
282                                         DEFAULT_EXPORT_SIZE)));
283
284   /* FIXUP: ip length if records exported are not default */
285   /* FIXUP: ip checksum if records exported are not default */
286   ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
287                                      (DEFAULT_EXPORT_RECORDS *
288                                       DEFAULT_EXPORT_SIZE));
289   ip->checksum = ip4_header_checksum (ip);
290   _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
291   em->record_header = rewrite;
292   return (1);
293 }
294
295 inline static int
296 ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
297                          ioam_export_buffer_t * eb)
298 {
299   ip4_header_t *ip;
300   udp_header_t *udp;
301   ipfix_message_header_t *h;
302   ipfix_set_header_t *s;
303   ip4_ipfix_data_packet_t *tp;
304   vlib_buffer_t *b0;
305   u16 new_l0, old_l0;
306   ip_csum_t sum0;
307   vlib_frame_t *nf = 0;
308   u32 *to_next;
309
310   b0 = vlib_get_buffer (vm, eb->buffer_index);
311   tp = vlib_buffer_get_current (b0);
312   ip = (ip4_header_t *) & tp->ip4;
313   udp = (udp_header_t *) (ip + 1);
314   h = (ipfix_message_header_t *) (udp + 1);
315   s = (ipfix_set_header_t *) (h + 1);
316
317   /* FIXUP: message header export_time */
318   h->export_time = clib_host_to_net_u32 ((u32)
319                                          (((f64) em->unix_time_0) +
320                                           (vlib_time_now (em->vlib_main) -
321                                            em->vlib_time_0)));
322
323   /* FIXUP: message header sequence_number */
324   h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
325
326   /* FIXUP: lengths if different from default */
327   if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
328     {
329       s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
330                                               b0->current_length -
331                                               (sizeof (*ip) + sizeof (*udp) +
332                                                sizeof (*h)));
333       h->version_length =
334         version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
335       sum0 = ip->checksum;
336       old_l0 = ip->length;
337       new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
338       sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
339                              length /* changed member */ );
340       ip->checksum = ip_csum_fold (sum0);
341       ip->length = new_l0;
342       udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
343     }
344
345   /* Enqueue pkts to ip4-lookup */
346
347   nf = vlib_get_frame_to_node (vm, em->next_node_index);
348   nf->n_vectors = 0;
349   to_next = vlib_frame_vector_args (nf);
350   nf->n_vectors = 1;
351   to_next[0] = eb->buffer_index;
352   vlib_put_frame_to_node (vm, em->next_node_index, nf);
353   return (1);
354
355 }
356
357 #define EXPORT_TIMEOUT (20.0)
358 #define THREAD_PERIOD (30.0)
359 inline static uword
360 ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
361                             vlib_node_runtime_t * rt, vlib_frame_t * f,
362                             u32 index)
363 {
364   f64 now;
365   f64 timeout = 30.0;
366   uword event_type;
367   uword *event_data = 0;
368   int i;
369   ioam_export_buffer_t *eb = 0, *new_eb = 0;
370   u32 *vec_buffer_indices = 0;
371   u32 *vec_buffer_to_be_sent = 0;
372   u32 *thread_index = 0;
373   u32 new_pool_index = 0;
374
375   em->export_process_node_index = index;
376   /* Wait for Godot... */
377   vlib_process_wait_for_event_or_clock (vm, 1e9);
378   event_type = vlib_process_get_events (vm, &event_data);
379   if (event_type != 1)
380     clib_warning ("bogus kickoff event received, %d", event_type);
381   vec_reset_length (event_data);
382
383   while (1)
384     {
385       vlib_process_wait_for_event_or_clock (vm, timeout);
386       event_type = vlib_process_get_events (vm, &event_data);
387       switch (event_type)
388         {
389         case 2:         /* Stop and Wait for kickoff again */
390           timeout = 1e9;
391           break;
392         case 1:         /* kickoff : Check for unsent buffers */
393           timeout = THREAD_PERIOD;
394           break;
395         case ~0:                /* timeout */
396           break;
397         }
398       vec_reset_length (event_data);
399       now = vlib_time_now (vm);
400       /*
401        * Create buffers for threads that are not active enough
402        * to send out the export records
403        */
404       for (i = 0; i < vec_len (em->buffer_per_thread); i++)
405         {
406           /* If the worker thread is processing export records ignore further checks */
407           if (CLIB_SPINLOCK_IS_LOCKED (&em->lockp[i]))
408             continue;
409           eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
410           if (eb->records_in_this_buffer > 0
411               && now > (eb->touched_at + EXPORT_TIMEOUT))
412             {
413               pool_get_aligned (em->buffer_pool, new_eb,
414                                 CLIB_CACHE_LINE_BYTES);
415               clib_memset (new_eb, 0, sizeof (*new_eb));
416               if (ioam_export_init_buffer (em, vm, new_eb) == 1)
417                 {
418                   new_pool_index = new_eb - em->buffer_pool;
419                   vec_add (vec_buffer_indices, &new_pool_index, 1);
420                   vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
421                            1);
422                   vec_add (thread_index, &i, 1);
423                 }
424               else
425                 {
426                   pool_put (em->buffer_pool, new_eb);
427                   /*Give up */
428                   goto CLEANUP;
429                 }
430             }
431         }
432       if (vec_len (thread_index) != 0)
433         {
434           /*
435            * Now swap the buffers out
436            */
437           for (i = 0; i < vec_len (thread_index); i++)
438             {
439               clib_spinlock_lock (&em->lockp[thread_index[i]]);
440               em->buffer_per_thread[thread_index[i]] =
441                 vec_pop (vec_buffer_indices);
442               clib_spinlock_unlock (&em->lockp[thread_index[i]]);
443             }
444
445           /* Send the buffers */
446           for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
447             {
448               eb =
449                 pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
450               ioam_export_send_buffer (em, vm, eb);
451               pool_put (em->buffer_pool, eb);
452             }
453         }
454
455     CLEANUP:
456       /* Free any leftover/unused buffers and everything that was allocated */
457       for (i = 0; i < vec_len (vec_buffer_indices); i++)
458         {
459           new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
460           vlib_buffer_free (vm, &new_eb->buffer_index, 1);
461           pool_put (em->buffer_pool, new_eb);
462         }
463       vec_free (vec_buffer_indices);
464       vec_free (vec_buffer_to_be_sent);
465       vec_free (thread_index);
466     }
467   return 0;                     /* not so much */
468 }
469
470 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC)   \
471 do {                                                                           \
472   u32 n_left_from, *from, *to_next;                                            \
473   export_next_t next_index;                                                    \
474   u32 pkts_recorded = 0;                                                       \
475   ioam_export_buffer_t *my_buf = 0;                                            \
476   vlib_buffer_t *eb0 = 0;                                                      \
477   u32 ebi0 = 0;                                                                \
478   from = vlib_frame_vector_args (F);                                           \
479   n_left_from = (F)->n_vectors;                                                \
480   next_index = (N)->cached_next_index;                                         \
481   clib_spinlock_lock (&(EM)->lockp[(VM)->thread_index]);                       \
482   my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index);                 \
483   my_buf->touched_at = vlib_time_now (VM);                                     \
484   while (n_left_from > 0)                                                      \
485     {                                                                          \
486       u32 n_left_to_next;                                                      \
487       vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next);        \
488       while (n_left_from >= 4 && n_left_to_next >= 2)                          \
489         {                                                                      \
490           u32 next0 = NEXT;                                                    \
491           u32 next1 = NEXT;                                                    \
492           u32 bi0, bi1;                                                        \
493           HTYPE *ip0, *ip1;                                                    \
494           vlib_buffer_t *p0, *p1;                                              \
495           u32 ip_len0, ip_len1;                                                \
496           {                                                                    \
497             vlib_buffer_t *p2, *p3;                                            \
498             p2 = vlib_get_buffer (VM, from[2]);                                \
499             p3 = vlib_get_buffer (VM, from[3]);                                \
500             vlib_prefetch_buffer_header (p2, LOAD);                            \
501             vlib_prefetch_buffer_header (p3, LOAD);                            \
502             CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
503             CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
504           }                                                                    \
505           to_next[0] = bi0 = from[0];                                          \
506           to_next[1] = bi1 = from[1];                                          \
507           from += 2;                                                           \
508           to_next += 2;                                                        \
509           n_left_from -= 2;                                                    \
510           n_left_to_next -= 2;                                                 \
511           p0 = vlib_get_buffer (VM, bi0);                                      \
512           p1 = vlib_get_buffer (VM, bi1);                                      \
513           ip0 = vlib_buffer_get_current (p0);                                  \
514           ip1 = vlib_buffer_get_current (p1);                                  \
515           ip_len0 =                                                            \
516             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
517           ip_len1 =                                                            \
518             clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE);                    \
519           ebi0 = my_buf->buffer_index;                                         \
520           eb0 = vlib_get_buffer (VM, ebi0);                                    \
521           if (PREDICT_FALSE (eb0 == 0))                                        \
522             goto NO_BUFFER1;                                                   \
523           ip_len0 =                                                            \
524             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
525           ip_len1 =                                                            \
526             ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1;     \
527           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
528           FIXUP_FUNC(eb0, p0);                                                 \
529           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
530           my_buf->records_in_this_buffer++;                                    \
531           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
532             {                                                                  \
533               ioam_export_send_buffer (EM, VM, my_buf);                        \
534               ioam_export_init_buffer (EM, VM, my_buf);                        \
535             }                                                                  \
536           ebi0 = my_buf->buffer_index;                                         \
537           eb0 = vlib_get_buffer (VM, ebi0);                                    \
538           if (PREDICT_FALSE (eb0 == 0))                                        \
539             goto NO_BUFFER1;                                                   \
540           copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1);     \
541           FIXUP_FUNC(eb0, p1);                                                 \
542           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
543           my_buf->records_in_this_buffer++;                                    \
544           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
545             {                                                                  \
546               ioam_export_send_buffer (EM, VM, my_buf);                        \
547               ioam_export_init_buffer (EM, VM, my_buf);                        \
548             }                                                                  \
549           pkts_recorded += 2;                                                  \
550           if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE)))          \
551             {                                                                  \
552               if (p0->flags & VLIB_BUFFER_IS_TRACED)                           \
553                 {                                                              \
554                   export_trace_t *t =                                          \
555                     vlib_add_trace (VM, node, p0, sizeof (*t));                \
556                   t->flow_label =                                              \
557                     clib_net_to_host_u32 (ip0->V);                             \
558                   t->next_index = next0;                                       \
559                 }                                                              \
560               if (p1->flags & VLIB_BUFFER_IS_TRACED)                           \
561                 {                                                              \
562                   export_trace_t *t =                                          \
563                     vlib_add_trace (VM, N, p1, sizeof (*t));                   \
564                   t->flow_label =                                              \
565                     clib_net_to_host_u32 (ip1->V);                             \
566                   t->next_index = next1;                                       \
567                 }                                                              \
568             }                                                                  \
569         NO_BUFFER1:                                                            \
570           vlib_validate_buffer_enqueue_x2 (VM, N, next_index,                  \
571                                            to_next, n_left_to_next,            \
572                                            bi0, bi1, next0, next1);            \
573         }                                                                      \
574       while (n_left_from > 0 && n_left_to_next > 0)                            \
575         {                                                                      \
576           u32 bi0;                                                             \
577           vlib_buffer_t *p0;                                                   \
578           u32 next0 = NEXT;                                                    \
579           HTYPE *ip0;                                                          \
580           u32 ip_len0;                                                         \
581           bi0 = from[0];                                                       \
582           to_next[0] = bi0;                                                    \
583           from += 1;                                                           \
584           to_next += 1;                                                        \
585           n_left_from -= 1;                                                    \
586           n_left_to_next -= 1;                                                 \
587           p0 = vlib_get_buffer (VM, bi0);                                      \
588           ip0 = vlib_buffer_get_current (p0);                                  \
589           ip_len0 =                                                            \
590             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
591           ebi0 = my_buf->buffer_index;                                         \
592           eb0 = vlib_get_buffer (VM, ebi0);                                    \
593           if (PREDICT_FALSE (eb0 == 0))                                        \
594             goto NO_BUFFER;                                                    \
595           ip_len0 =                                                            \
596             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
597           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
598           FIXUP_FUNC(eb0, p0);                                                 \
599           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
600           my_buf->records_in_this_buffer++;                                    \
601           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
602             {                                                                  \
603               ioam_export_send_buffer (EM, VM, my_buf);                        \
604               ioam_export_init_buffer (EM, VM, my_buf);                        \
605             }                                                                  \
606           if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE)               \
607                              && (p0->flags & VLIB_BUFFER_IS_TRACED)))          \
608             {                                                                  \
609               export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t));   \
610               t->flow_label =                                                  \
611                 clib_net_to_host_u32 (ip0->V);                                 \
612               t->next_index = next0;                                           \
613             }                                                                  \
614           pkts_recorded += 1;                                                  \
615         NO_BUFFER:                                                             \
616           vlib_validate_buffer_enqueue_x1 (VM, N, next_index,                  \
617                                            to_next, n_left_to_next,            \
618                                            bi0, next0);                        \
619         }                                                                      \
620       vlib_put_next_frame (VM, N, next_index, n_left_to_next);                 \
621     }                                                                          \
622   vlib_node_increment_counter (VM, export_node.index,                          \
623                                EXPORT_ERROR_RECORDED, pkts_recorded);          \
624   clib_spinlock_unlock (&(EM)->lockp[(VM)->thread_index]);                     \
625 } while(0)
626
627 #endif /* __included_ioam_export_h__ */
628
629 /*
630  * fd.io coding-style-patch-verification: ON
631  *
632  * Local Variables:
633  * eval: (c-set-style "gnu")
634  * End:
635  */