Flow: Rename IPFIX exporter.
[vpp.git] / src / plugins / ioam / export-common / ioam_export.h
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_ioam_export_h__
16 #define __included_ioam_export_h__
17
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/ip/ip6_hop_by_hop.h>
24 #include <vnet/udp/udp.h>
25 #include <vnet/ipfix-export/ipfix_packet.h>
26
27 #include <vppinfra/pool.h>
28 #include <vppinfra/hash.h>
29 #include <vppinfra/error.h>
30 #include <vppinfra/elog.h>
31
32 #include <vlib/threads.h>
33
34 typedef struct ioam_export_buffer
35 {
36   /** Required for pool_get_aligned */
37   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
38   /* Allocated buffer */
39   u32 buffer_index;
40   u64 touched_at;
41   u8 records_in_this_buffer;
42 } ioam_export_buffer_t;
43
44
45 typedef struct
46 {
47   /* API message ID base */
48   u16 msg_id_base;
49   u16 set_id;
50
51   /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
52   u8 *record_header;
53   u32 sequence_number;
54   u32 domain_id;
55
56   /* ipfix collector, our ip address */
57   ip4_address_t ipfix_collector;
58   ip4_address_t src_address;
59
60   /* Pool of ioam_export_buffer_t */
61   ioam_export_buffer_t *buffer_pool;
62   /* Vector of per thread ioam_export_buffer_t to buffer pool index */
63   u32 *buffer_per_thread;
64   /* Lock per thread to swap buffers between worker and timer process */
65   volatile u32 **lockp;
66
67   /* time scale transform */
68   u32 unix_time_0;
69   f64 vlib_time_0;
70
71   /* convenience */
72   vlib_main_t *vlib_main;
73   vnet_main_t *vnet_main;
74   ethernet_main_t *ethernet_main;
75   u32 next_node_index;
76
77   uword my_hbh_slot;
78   u32 export_process_node_index;
79 } ioam_export_main_t;
80
81
82 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
83 /*
84  *  Number of records in a buffer
85  * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
86  */
87 #define DEFAULT_EXPORT_RECORDS 7
88
89 inline static void
90 ioam_export_set_next_node (ioam_export_main_t * em, u8 * next_node_name)
91 {
92   vlib_node_t *next_node;
93
94   next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
95   em->next_node_index = next_node->index;
96 }
97
98 inline static void
99 ioam_export_reset_next_node (ioam_export_main_t * em)
100 {
101   vlib_node_t *next_node;
102
103   next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
104   em->next_node_index = next_node->index;
105 }
106
107 always_inline ioam_export_buffer_t *
108 ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
109 {
110
111   if (vec_len (em->buffer_per_thread) > thread_id)
112     return (pool_elt_at_index
113             (em->buffer_pool, em->buffer_per_thread[thread_id]));
114   return (0);
115 }
116
117 inline static int
118 ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
119 {
120   clib_memcpy (b0->data, em->record_header, vec_len (em->record_header));
121   b0->current_data = 0;
122   b0->current_length = vec_len (em->record_header);
123   b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
124   return (1);
125 }
126
127 inline static int
128 ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
129                          ioam_export_buffer_t * eb)
130 {
131   vlib_buffer_t *b = 0;
132
133   if (!eb)
134     return (-1);
135   /* TODO: Perhaps buffer init from template here */
136   if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
137     return (-2);
138   eb->records_in_this_buffer = 0;
139   eb->touched_at = vlib_time_now (vm);
140   b = vlib_get_buffer (vm, eb->buffer_index);
141   (void) ioam_export_buffer_add_header (em, b);
142   vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
143   vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
144   return (1);
145 }
146
147 inline static void
148 ioam_export_thread_buffer_free (ioam_export_main_t * em)
149 {
150   vlib_main_t *vm = em->vlib_main;
151   ioam_export_buffer_t *eb = 0;
152   int i;
153   for (i = 0; i < vec_len (em->buffer_per_thread); i++)
154     {
155       eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
156       if (eb)
157         vlib_buffer_free (vm, &(eb->buffer_index), 1);
158     }
159   for (i = 0; i < vec_len (em->lockp); i++)
160     clib_mem_free ((void *) em->lockp[i]);
161   vec_free (em->buffer_per_thread);
162   pool_free (em->buffer_pool);
163   vec_free (em->lockp);
164   em->buffer_per_thread = 0;
165   em->buffer_pool = 0;
166   em->lockp = 0;
167 }
168
169 inline static int
170 ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
171 {
172   int no_of_threads = vec_len (vlib_worker_threads);
173   int i;
174   ioam_export_buffer_t *eb = 0;
175
176   pool_alloc_aligned (em->buffer_pool,
177                       no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
178   vec_validate_aligned (em->buffer_per_thread,
179                         no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
180   vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
181
182   if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
183     {
184       return (-1);
185     }
186   for (i = 0; i < no_of_threads; i++)
187     {
188       eb = 0;
189       pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
190       memset (eb, 0, sizeof (*eb));
191       em->buffer_per_thread[i] = eb - em->buffer_pool;
192       if (ioam_export_init_buffer (em, vm, eb) != 1)
193         {
194           ioam_export_thread_buffer_free (em);
195           return (-2);
196         }
197       em->lockp[i] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
198                                              CLIB_CACHE_LINE_BYTES);
199       memset ((void *) em->lockp[i], 0, CLIB_CACHE_LINE_BYTES);
200     }
201   return (1);
202 }
203
204 #define IPFIX_IOAM_EXPORT_ID 272
205 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
206
207 /* Used to build the rewrite */
208 /* data set packet */
209 typedef struct
210 {
211   ipfix_message_header_t h;
212   ipfix_set_header_t s;
213 } ipfix_data_packet_t;
214
215 typedef struct
216 {
217   ip4_header_t ip4;
218   udp_header_t udp;
219   ipfix_data_packet_t ipfix;
220 } ip4_ipfix_data_packet_t;
221
222
223 inline static void
224 ioam_export_header_cleanup (ioam_export_main_t * em,
225                             ip4_address_t * collector_address,
226                             ip4_address_t * src_address)
227 {
228   vec_free (em->record_header);
229   em->record_header = 0;
230 }
231
232 inline static int
233 ioam_export_header_create (ioam_export_main_t * em,
234                            ip4_address_t * collector_address,
235                            ip4_address_t * src_address)
236 {
237   ip4_header_t *ip;
238   udp_header_t *udp;
239   ipfix_message_header_t *h;
240   ipfix_set_header_t *s;
241   u8 *rewrite = 0;
242   ip4_ipfix_data_packet_t *tp;
243
244
245   /* allocate rewrite space */
246   vec_validate_aligned (rewrite,
247                         sizeof (ip4_ipfix_data_packet_t) - 1,
248                         CLIB_CACHE_LINE_BYTES);
249
250   tp = (ip4_ipfix_data_packet_t *) rewrite;
251   ip = (ip4_header_t *) & tp->ip4;
252   udp = (udp_header_t *) (ip + 1);
253   h = (ipfix_message_header_t *) (udp + 1);
254   s = (ipfix_set_header_t *) (h + 1);
255
256   ip->ip_version_and_header_length = 0x45;
257   ip->ttl = 254;
258   ip->protocol = IP_PROTOCOL_UDP;
259   ip->src_address.as_u32 = src_address->as_u32;
260   ip->dst_address.as_u32 = collector_address->as_u32;
261   udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262   udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
263   /* FIXUP: UDP length */
264   udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
265                                       (DEFAULT_EXPORT_RECORDS *
266                                        DEFAULT_EXPORT_SIZE) - sizeof (*ip));
267
268   /* FIXUP: message header export_time */
269   /* FIXUP: message header sequence_number */
270   h->domain_id = clib_host_to_net_u32 (em->domain_id);
271
272   /*FIXUP: Setid length in octets if records exported are not default */
273   s->set_id_length = ipfix_set_id_length (em->set_id,
274                                           (sizeof (*s) +
275                                            (DEFAULT_EXPORT_RECORDS *
276                                             DEFAULT_EXPORT_SIZE)));
277
278   /* FIXUP: h version and length length in octets if records exported are not default */
279   h->version_length = version_length (sizeof (*h) +
280                                       (sizeof (*s) +
281                                        (DEFAULT_EXPORT_RECORDS *
282                                         DEFAULT_EXPORT_SIZE)));
283
284   /* FIXUP: ip length if records exported are not default */
285   /* FIXUP: ip checksum if records exported are not default */
286   ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
287                                      (DEFAULT_EXPORT_RECORDS *
288                                       DEFAULT_EXPORT_SIZE));
289   ip->checksum = ip4_header_checksum (ip);
290   _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
291   em->record_header = rewrite;
292   return (1);
293 }
294
295 inline static int
296 ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
297                          ioam_export_buffer_t * eb)
298 {
299   ip4_header_t *ip;
300   udp_header_t *udp;
301   ipfix_message_header_t *h;
302   ipfix_set_header_t *s;
303   ip4_ipfix_data_packet_t *tp;
304   vlib_buffer_t *b0;
305   u16 new_l0, old_l0;
306   ip_csum_t sum0;
307   vlib_frame_t *nf = 0;
308   u32 *to_next;
309
310   b0 = vlib_get_buffer (vm, eb->buffer_index);
311   tp = vlib_buffer_get_current (b0);
312   ip = (ip4_header_t *) & tp->ip4;
313   udp = (udp_header_t *) (ip + 1);
314   h = (ipfix_message_header_t *) (udp + 1);
315   s = (ipfix_set_header_t *) (h + 1);
316
317   /* FIXUP: message header export_time */
318   h->export_time = clib_host_to_net_u32 ((u32)
319                                          (((f64) em->unix_time_0) +
320                                           (vlib_time_now (em->vlib_main) -
321                                            em->vlib_time_0)));
322
323   /* FIXUP: message header sequence_number */
324   h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
325
326   /* FIXUP: lengths if different from default */
327   if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
328     {
329       s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
330                                               b0->current_length -
331                                               (sizeof (*ip) + sizeof (*udp) +
332                                                sizeof (*h)));
333       h->version_length =
334         version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
335       sum0 = ip->checksum;
336       old_l0 = ip->length;
337       new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
338       sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
339                              length /* changed member */ );
340       ip->checksum = ip_csum_fold (sum0);
341       ip->length = new_l0;
342       udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
343     }
344
345   /* Enqueue pkts to ip4-lookup */
346
347   nf = vlib_get_frame_to_node (vm, em->next_node_index);
348   nf->n_vectors = 0;
349   to_next = vlib_frame_vector_args (nf);
350   nf->n_vectors = 1;
351   to_next[0] = eb->buffer_index;
352   vlib_put_frame_to_node (vm, em->next_node_index, nf);
353   return (1);
354
355 }
356
357 #define EXPORT_TIMEOUT (20.0)
358 #define THREAD_PERIOD (30.0)
359 inline static uword
360 ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
361                             vlib_node_runtime_t * rt, vlib_frame_t * f,
362                             u32 index)
363 {
364   f64 now;
365   f64 timeout = 30.0;
366   uword event_type;
367   uword *event_data = 0;
368   int i;
369   ioam_export_buffer_t *eb = 0, *new_eb = 0;
370   u32 *vec_buffer_indices = 0;
371   u32 *vec_buffer_to_be_sent = 0;
372   u32 *thread_index = 0;
373   u32 new_pool_index = 0;
374
375   em->export_process_node_index = index;
376   /* Wait for Godot... */
377   vlib_process_wait_for_event_or_clock (vm, 1e9);
378   event_type = vlib_process_get_events (vm, &event_data);
379   if (event_type != 1)
380     clib_warning ("bogus kickoff event received, %d", event_type);
381   vec_reset_length (event_data);
382
383   while (1)
384     {
385       vlib_process_wait_for_event_or_clock (vm, timeout);
386       event_type = vlib_process_get_events (vm, &event_data);
387       switch (event_type)
388         {
389         case 2:         /* Stop and Wait for kickoff again */
390           timeout = 1e9;
391           break;
392         case 1:         /* kickoff : Check for unsent buffers */
393           timeout = THREAD_PERIOD;
394           break;
395         case ~0:                /* timeout */
396           break;
397         }
398       vec_reset_length (event_data);
399       now = vlib_time_now (vm);
400       /*
401        * Create buffers for threads that are not active enough
402        * to send out the export records
403        */
404       for (i = 0; i < vec_len (em->buffer_per_thread); i++)
405         {
406           /* If the worker thread is processing export records ignore further checks */
407           if (*em->lockp[i] == 1)
408             continue;
409           eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
410           if (eb->records_in_this_buffer > 0
411               && now > (eb->touched_at + EXPORT_TIMEOUT))
412             {
413               pool_get_aligned (em->buffer_pool, new_eb,
414                                 CLIB_CACHE_LINE_BYTES);
415               memset (new_eb, 0, sizeof (*new_eb));
416               if (ioam_export_init_buffer (em, vm, new_eb) == 1)
417                 {
418                   new_pool_index = new_eb - em->buffer_pool;
419                   vec_add (vec_buffer_indices, &new_pool_index, 1);
420                   vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
421                            1);
422                   vec_add (thread_index, &i, 1);
423                 }
424               else
425                 {
426                   pool_put (em->buffer_pool, new_eb);
427                   /*Give up */
428                   goto CLEANUP;
429                 }
430             }
431         }
432       if (vec_len (thread_index) != 0)
433         {
434           /*
435            * Now swap the buffers out
436            */
437           for (i = 0; i < vec_len (thread_index); i++)
438             {
439               while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
440                 ;
441               em->buffer_per_thread[thread_index[i]] =
442                 vec_pop (vec_buffer_indices);
443               *em->lockp[thread_index[i]] = 0;
444             }
445
446           /* Send the buffers */
447           for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
448             {
449               eb =
450                 pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
451               ioam_export_send_buffer (em, vm, eb);
452               pool_put (em->buffer_pool, eb);
453             }
454         }
455
456     CLEANUP:
457       /* Free any leftover/unused buffers and everything that was allocated */
458       for (i = 0; i < vec_len (vec_buffer_indices); i++)
459         {
460           new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
461           vlib_buffer_free (vm, &new_eb->buffer_index, 1);
462           pool_put (em->buffer_pool, new_eb);
463         }
464       vec_free (vec_buffer_indices);
465       vec_free (vec_buffer_to_be_sent);
466       vec_free (thread_index);
467     }
468   return 0;                     /* not so much */
469 }
470
471 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC)   \
472 do {                                                                           \
473   u32 n_left_from, *from, *to_next;                                            \
474   export_next_t next_index;                                                    \
475   u32 pkts_recorded = 0;                                                       \
476   ioam_export_buffer_t *my_buf = 0;                                            \
477   vlib_buffer_t *eb0 = 0;                                                      \
478   u32 ebi0 = 0;                                                                \
479   from = vlib_frame_vector_args (F);                                           \
480   n_left_from = (F)->n_vectors;                                                \
481   next_index = (N)->cached_next_index;                                         \
482   while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1));       \
483   my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index);                 \
484   my_buf->touched_at = vlib_time_now (VM);                                     \
485   while (n_left_from > 0)                                                      \
486     {                                                                          \
487       u32 n_left_to_next;                                                      \
488       vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next);        \
489       while (n_left_from >= 4 && n_left_to_next >= 2)                          \
490         {                                                                      \
491           u32 next0 = NEXT;                                                    \
492           u32 next1 = NEXT;                                                    \
493           u32 bi0, bi1;                                                        \
494           HTYPE *ip0, *ip1;                                                    \
495           vlib_buffer_t *p0, *p1;                                              \
496           u32 ip_len0, ip_len1;                                                \
497           {                                                                    \
498             vlib_buffer_t *p2, *p3;                                            \
499             p2 = vlib_get_buffer (VM, from[2]);                                \
500             p3 = vlib_get_buffer (VM, from[3]);                                \
501             vlib_prefetch_buffer_header (p2, LOAD);                            \
502             vlib_prefetch_buffer_header (p3, LOAD);                            \
503             CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
504             CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
505           }                                                                    \
506           to_next[0] = bi0 = from[0];                                          \
507           to_next[1] = bi1 = from[1];                                          \
508           from += 2;                                                           \
509           to_next += 2;                                                        \
510           n_left_from -= 2;                                                    \
511           n_left_to_next -= 2;                                                 \
512           p0 = vlib_get_buffer (VM, bi0);                                      \
513           p1 = vlib_get_buffer (VM, bi1);                                      \
514           ip0 = vlib_buffer_get_current (p0);                                  \
515           ip1 = vlib_buffer_get_current (p1);                                  \
516           ip_len0 =                                                            \
517             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
518           ip_len1 =                                                            \
519             clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE);                    \
520           ebi0 = my_buf->buffer_index;                                         \
521           eb0 = vlib_get_buffer (VM, ebi0);                                    \
522           if (PREDICT_FALSE (eb0 == 0))                                        \
523             goto NO_BUFFER1;                                                   \
524           ip_len0 =                                                            \
525             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
526           ip_len1 =                                                            \
527             ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1;     \
528           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
529           FIXUP_FUNC(eb0, p0);                                                 \
530           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
531           my_buf->records_in_this_buffer++;                                    \
532           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
533             {                                                                  \
534               ioam_export_send_buffer (EM, VM, my_buf);                        \
535               ioam_export_init_buffer (EM, VM, my_buf);                        \
536             }                                                                  \
537           ebi0 = my_buf->buffer_index;                                         \
538           eb0 = vlib_get_buffer (VM, ebi0);                                    \
539           if (PREDICT_FALSE (eb0 == 0))                                        \
540             goto NO_BUFFER1;                                                   \
541           copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1);     \
542           FIXUP_FUNC(eb0, p1);                                                 \
543           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
544           my_buf->records_in_this_buffer++;                                    \
545           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
546             {                                                                  \
547               ioam_export_send_buffer (EM, VM, my_buf);                        \
548               ioam_export_init_buffer (EM, VM, my_buf);                        \
549             }                                                                  \
550           pkts_recorded += 2;                                                  \
551           if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE)))          \
552             {                                                                  \
553               if (p0->flags & VLIB_BUFFER_IS_TRACED)                           \
554                 {                                                              \
555                   export_trace_t *t =                                          \
556                     vlib_add_trace (VM, node, p0, sizeof (*t));                \
557                   t->flow_label =                                              \
558                     clib_net_to_host_u32 (ip0->V);                             \
559                   t->next_index = next0;                                       \
560                 }                                                              \
561               if (p1->flags & VLIB_BUFFER_IS_TRACED)                           \
562                 {                                                              \
563                   export_trace_t *t =                                          \
564                     vlib_add_trace (VM, N, p1, sizeof (*t));                   \
565                   t->flow_label =                                              \
566                     clib_net_to_host_u32 (ip1->V);                             \
567                   t->next_index = next1;                                       \
568                 }                                                              \
569             }                                                                  \
570         NO_BUFFER1:                                                            \
571           vlib_validate_buffer_enqueue_x2 (VM, N, next_index,                  \
572                                            to_next, n_left_to_next,            \
573                                            bi0, bi1, next0, next1);            \
574         }                                                                      \
575       while (n_left_from > 0 && n_left_to_next > 0)                            \
576         {                                                                      \
577           u32 bi0;                                                             \
578           vlib_buffer_t *p0;                                                   \
579           u32 next0 = NEXT;                                                    \
580           HTYPE *ip0;                                                          \
581           u32 ip_len0;                                                         \
582           bi0 = from[0];                                                       \
583           to_next[0] = bi0;                                                    \
584           from += 1;                                                           \
585           to_next += 1;                                                        \
586           n_left_from -= 1;                                                    \
587           n_left_to_next -= 1;                                                 \
588           p0 = vlib_get_buffer (VM, bi0);                                      \
589           ip0 = vlib_buffer_get_current (p0);                                  \
590           ip_len0 =                                                            \
591             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
592           ebi0 = my_buf->buffer_index;                                         \
593           eb0 = vlib_get_buffer (VM, ebi0);                                    \
594           if (PREDICT_FALSE (eb0 == 0))                                        \
595             goto NO_BUFFER;                                                    \
596           ip_len0 =                                                            \
597             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
598           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
599           FIXUP_FUNC(eb0, p0);                                                 \
600           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
601           my_buf->records_in_this_buffer++;                                    \
602           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
603             {                                                                  \
604               ioam_export_send_buffer (EM, VM, my_buf);                        \
605               ioam_export_init_buffer (EM, VM, my_buf);                        \
606             }                                                                  \
607           if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE)               \
608                              && (p0->flags & VLIB_BUFFER_IS_TRACED)))          \
609             {                                                                  \
610               export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t));   \
611               t->flow_label =                                                  \
612                 clib_net_to_host_u32 (ip0->V);                                 \
613               t->next_index = next0;                                           \
614             }                                                                  \
615           pkts_recorded += 1;                                                  \
616         NO_BUFFER:                                                             \
617           vlib_validate_buffer_enqueue_x1 (VM, N, next_index,                  \
618                                            to_next, n_left_to_next,            \
619                                            bi0, next0);                        \
620         }                                                                      \
621       vlib_put_next_frame (VM, N, next_index, n_left_to_next);                 \
622     }                                                                          \
623   vlib_node_increment_counter (VM, export_node.index,                          \
624                                EXPORT_ERROR_RECORDED, pkts_recorded);          \
625   *(EM)->lockp[(VM)->thread_index] = 0;                                        \
626 } while(0)
627
628 #endif /* __included_ioam_export_h__ */
629
630 /*
631  * fd.io coding-style-patch-verification: ON
632  *
633  * Local Variables:
634  * eval: (c-set-style "gnu")
635  * End:
636  */