VPP-598: tcp stack initial commit
[vpp.git] / src / plugins / ioam / export-common / ioam_export.h
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_ioam_export_h__
16 #define __included_ioam_export_h__
17
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/ip/ip6_hop_by_hop.h>
24 #include <vnet/udp/udp.h>
25 #include <vnet/flow/ipfix_packet.h>
26
27 #include <vppinfra/pool.h>
28 #include <vppinfra/hash.h>
29 #include <vppinfra/error.h>
30 #include <vppinfra/elog.h>
31
32 #include <vlib/threads.h>
33
34 typedef struct ioam_export_buffer
35 {
36   /* Allocated buffer */
37   u32 buffer_index;
38   u64 touched_at;
39   u8 records_in_this_buffer;
40 } ioam_export_buffer_t;
41
42
43 typedef struct
44 {
45   /* API message ID base */
46   u16 msg_id_base;
47   u16 set_id;
48
49   /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
50   u8 *record_header;
51   u32 sequence_number;
52   u32 domain_id;
53
54   /* ipfix collector, our ip address */
55   ip4_address_t ipfix_collector;
56   ip4_address_t src_address;
57
58   /* Pool of ioam_export_buffer_t */
59   ioam_export_buffer_t *buffer_pool;
60   /* Vector of per thread ioam_export_buffer_t to buffer pool index */
61   u32 *buffer_per_thread;
62   /* Lock per thread to swap buffers between worker and timer process */
63   volatile u32 **lockp;
64
65   /* time scale transform */
66   u32 unix_time_0;
67   f64 vlib_time_0;
68
69   /* convenience */
70   vlib_main_t *vlib_main;
71   vnet_main_t *vnet_main;
72   ethernet_main_t *ethernet_main;
73   u32 next_node_index;
74
75   uword my_hbh_slot;
76   u32 export_process_node_index;
77 } ioam_export_main_t;
78
79 extern ioam_export_main_t ioam_export_main;
80 extern ioam_export_main_t vxlan_gpe_ioam_export_main;
81
82 extern vlib_node_registration_t export_node;
83 extern vlib_node_registration_t vxlan_export_node;
84
85 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
86 /*
87  *  Number of records in a buffer
88  * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
89  */
90 #define DEFAULT_EXPORT_RECORDS 7
91
92 inline static void
93 ioam_export_set_next_node (ioam_export_main_t * em, u8 * next_node_name)
94 {
95   vlib_node_t *next_node;
96
97   next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
98   em->next_node_index = next_node->index;
99 }
100
101 inline static void
102 ioam_export_reset_next_node (ioam_export_main_t * em)
103 {
104   vlib_node_t *next_node;
105
106   next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
107   em->next_node_index = next_node->index;
108 }
109
110 always_inline ioam_export_buffer_t *
111 ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
112 {
113
114   if (vec_len (em->buffer_per_thread) > thread_id)
115     return (pool_elt_at_index
116             (em->buffer_pool, em->buffer_per_thread[thread_id]));
117   return (0);
118 }
119
120 inline static int
121 ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
122 {
123   clib_memcpy (b0->data, em->record_header, vec_len (em->record_header));
124   b0->current_data = 0;
125   b0->current_length = vec_len (em->record_header);
126   b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
127   return (1);
128 }
129
130 inline static int
131 ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
132                          ioam_export_buffer_t * eb)
133 {
134   vlib_buffer_t *b = 0;
135
136   if (!eb)
137     return (-1);
138   /* TODO: Perhaps buffer init from template here */
139   if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
140     return (-2);
141   eb->records_in_this_buffer = 0;
142   eb->touched_at = vlib_time_now (vm);
143   b = vlib_get_buffer (vm, eb->buffer_index);
144   (void) ioam_export_buffer_add_header (em, b);
145   vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
146   vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
147   return (1);
148 }
149
150 inline static void
151 ioam_export_thread_buffer_free (ioam_export_main_t * em)
152 {
153   vlib_main_t *vm = em->vlib_main;
154   ioam_export_buffer_t *eb = 0;
155   int i;
156   for (i = 0; i < vec_len (em->buffer_per_thread); i++)
157     {
158       eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
159       if (eb)
160         vlib_buffer_free (vm, &(eb->buffer_index), 1);
161     }
162   for (i = 0; i < vec_len (em->lockp); i++)
163     clib_mem_free ((void *) em->lockp[i]);
164   vec_free (em->buffer_per_thread);
165   pool_free (em->buffer_pool);
166   vec_free (em->lockp);
167   em->buffer_per_thread = 0;
168   em->buffer_pool = 0;
169   em->lockp = 0;
170 }
171
172 inline static int
173 ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
174 {
175   int no_of_threads = vec_len (vlib_worker_threads);
176   int i;
177   ioam_export_buffer_t *eb = 0;
178
179   pool_alloc_aligned (em->buffer_pool,
180                       no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
181   vec_validate_aligned (em->buffer_per_thread,
182                         no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
183   vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
184
185   if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
186     {
187       return (-1);
188     }
189   for (i = 0; i < no_of_threads; i++)
190     {
191       eb = 0;
192       pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
193       memset (eb, 0, sizeof (*eb));
194       em->buffer_per_thread[i] = eb - em->buffer_pool;
195       if (ioam_export_init_buffer (em, vm, eb) != 1)
196         {
197           ioam_export_thread_buffer_free (em);
198           return (-2);
199         }
200       em->lockp[i] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
201                                              CLIB_CACHE_LINE_BYTES);
202       memset ((void *) em->lockp[i], 0, CLIB_CACHE_LINE_BYTES);
203     }
204   return (1);
205 }
206
207 #define IPFIX_IOAM_EXPORT_ID 272
208 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
209
210 /* Used to build the rewrite */
211 /* data set packet */
212 typedef struct
213 {
214   ipfix_message_header_t h;
215   ipfix_set_header_t s;
216 } ipfix_data_packet_t;
217
218 typedef struct
219 {
220   ip4_header_t ip4;
221   udp_header_t udp;
222   ipfix_data_packet_t ipfix;
223 } ip4_ipfix_data_packet_t;
224
225
226 inline static void
227 ioam_export_header_cleanup (ioam_export_main_t * em,
228                             ip4_address_t * collector_address,
229                             ip4_address_t * src_address)
230 {
231   vec_free (em->record_header);
232   em->record_header = 0;
233 }
234
235 inline static int
236 ioam_export_header_create (ioam_export_main_t * em,
237                            ip4_address_t * collector_address,
238                            ip4_address_t * src_address)
239 {
240   ip4_header_t *ip;
241   udp_header_t *udp;
242   ipfix_message_header_t *h;
243   ipfix_set_header_t *s;
244   u8 *rewrite = 0;
245   ip4_ipfix_data_packet_t *tp;
246
247
248   /* allocate rewrite space */
249   vec_validate_aligned (rewrite,
250                         sizeof (ip4_ipfix_data_packet_t) - 1,
251                         CLIB_CACHE_LINE_BYTES);
252
253   tp = (ip4_ipfix_data_packet_t *) rewrite;
254   ip = (ip4_header_t *) & tp->ip4;
255   udp = (udp_header_t *) (ip + 1);
256   h = (ipfix_message_header_t *) (udp + 1);
257   s = (ipfix_set_header_t *) (h + 1);
258
259   ip->ip_version_and_header_length = 0x45;
260   ip->ttl = 254;
261   ip->protocol = IP_PROTOCOL_UDP;
262   ip->src_address.as_u32 = src_address->as_u32;
263   ip->dst_address.as_u32 = collector_address->as_u32;
264   udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
265   udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
266   /* FIXUP: UDP length */
267   udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
268                                       (DEFAULT_EXPORT_RECORDS *
269                                        DEFAULT_EXPORT_SIZE) - sizeof (*ip));
270
271   /* FIXUP: message header export_time */
272   /* FIXUP: message header sequence_number */
273   h->domain_id = clib_host_to_net_u32 (em->domain_id);
274
275   /*FIXUP: Setid length in octets if records exported are not default */
276   s->set_id_length = ipfix_set_id_length (em->set_id,
277                                           (sizeof (*s) +
278                                            (DEFAULT_EXPORT_RECORDS *
279                                             DEFAULT_EXPORT_SIZE)));
280
281   /* FIXUP: h version and length length in octets if records exported are not default */
282   h->version_length = version_length (sizeof (*h) +
283                                       (sizeof (*s) +
284                                        (DEFAULT_EXPORT_RECORDS *
285                                         DEFAULT_EXPORT_SIZE)));
286
287   /* FIXUP: ip length if records exported are not default */
288   /* FIXUP: ip checksum if records exported are not default */
289   ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
290                                      (DEFAULT_EXPORT_RECORDS *
291                                       DEFAULT_EXPORT_SIZE));
292   ip->checksum = ip4_header_checksum (ip);
293   _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
294   em->record_header = rewrite;
295   return (1);
296 }
297
298 inline static int
299 ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
300                          ioam_export_buffer_t * eb)
301 {
302   ip4_header_t *ip;
303   udp_header_t *udp;
304   ipfix_message_header_t *h;
305   ipfix_set_header_t *s;
306   ip4_ipfix_data_packet_t *tp;
307   vlib_buffer_t *b0;
308   u16 new_l0, old_l0;
309   ip_csum_t sum0;
310   vlib_frame_t *nf = 0;
311   u32 *to_next;
312
313   b0 = vlib_get_buffer (vm, eb->buffer_index);
314   tp = vlib_buffer_get_current (b0);
315   ip = (ip4_header_t *) & tp->ip4;
316   udp = (udp_header_t *) (ip + 1);
317   h = (ipfix_message_header_t *) (udp + 1);
318   s = (ipfix_set_header_t *) (h + 1);
319
320   /* FIXUP: message header export_time */
321   h->export_time = clib_host_to_net_u32 ((u32)
322                                          (((f64) em->unix_time_0) +
323                                           (vlib_time_now (em->vlib_main) -
324                                            em->vlib_time_0)));
325
326   /* FIXUP: message header sequence_number */
327   h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
328
329   /* FIXUP: lengths if different from default */
330   if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
331     {
332       s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
333                                               b0->current_length -
334                                               (sizeof (*ip) + sizeof (*udp) +
335                                                sizeof (*h)));
336       h->version_length =
337         version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
338       sum0 = ip->checksum;
339       old_l0 = ip->length;
340       new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
341       sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
342                              length /* changed member */ );
343       ip->checksum = ip_csum_fold (sum0);
344       ip->length = new_l0;
345       udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
346     }
347
348   /* Enqueue pkts to ip4-lookup */
349
350   nf = vlib_get_frame_to_node (vm, em->next_node_index);
351   nf->n_vectors = 0;
352   to_next = vlib_frame_vector_args (nf);
353   nf->n_vectors = 1;
354   to_next[0] = eb->buffer_index;
355   vlib_put_frame_to_node (vm, em->next_node_index, nf);
356   return (1);
357
358 }
359
360 #define EXPORT_TIMEOUT (20.0)
361 #define THREAD_PERIOD (30.0)
362 inline static uword
363 ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
364                             vlib_node_runtime_t * rt, vlib_frame_t * f,
365                             u32 index)
366 {
367   f64 now;
368   f64 timeout = 30.0;
369   uword event_type;
370   uword *event_data = 0;
371   int i;
372   ioam_export_buffer_t *eb = 0, *new_eb = 0;
373   u32 *vec_buffer_indices = 0;
374   u32 *vec_buffer_to_be_sent = 0;
375   u32 *thread_index = 0;
376   u32 new_pool_index = 0;
377
378   em->export_process_node_index = index;
379   /* Wait for Godot... */
380   vlib_process_wait_for_event_or_clock (vm, 1e9);
381   event_type = vlib_process_get_events (vm, &event_data);
382   if (event_type != 1)
383     clib_warning ("bogus kickoff event received, %d", event_type);
384   vec_reset_length (event_data);
385
386   while (1)
387     {
388       vlib_process_wait_for_event_or_clock (vm, timeout);
389       event_type = vlib_process_get_events (vm, &event_data);
390       switch (event_type)
391         {
392         case 2:         /* Stop and Wait for kickoff again */
393           timeout = 1e9;
394           break;
395         case 1:         /* kickoff : Check for unsent buffers */
396           timeout = THREAD_PERIOD;
397           break;
398         case ~0:                /* timeout */
399           break;
400         }
401       vec_reset_length (event_data);
402       now = vlib_time_now (vm);
403       /*
404        * Create buffers for threads that are not active enough
405        * to send out the export records
406        */
407       for (i = 0; i < vec_len (em->buffer_per_thread); i++)
408         {
409           /* If the worker thread is processing export records ignore further checks */
410           if (*em->lockp[i] == 1)
411             continue;
412           eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
413           if (eb->records_in_this_buffer > 0
414               && now > (eb->touched_at + EXPORT_TIMEOUT))
415             {
416               pool_get_aligned (em->buffer_pool, new_eb,
417                                 CLIB_CACHE_LINE_BYTES);
418               memset (new_eb, 0, sizeof (*new_eb));
419               if (ioam_export_init_buffer (em, vm, new_eb) == 1)
420                 {
421                   new_pool_index = new_eb - em->buffer_pool;
422                   vec_add (vec_buffer_indices, &new_pool_index, 1);
423                   vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
424                            1);
425                   vec_add (thread_index, &i, 1);
426                 }
427               else
428                 {
429                   pool_put (em->buffer_pool, new_eb);
430                   /*Give up */
431                   goto CLEANUP;
432                 }
433             }
434         }
435       if (vec_len (thread_index) != 0)
436         {
437           /*
438            * Now swap the buffers out
439            */
440           for (i = 0; i < vec_len (thread_index); i++)
441             {
442               while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
443                 ;
444               em->buffer_per_thread[thread_index[i]] =
445                 vec_pop (vec_buffer_indices);
446               *em->lockp[thread_index[i]] = 0;
447             }
448
449           /* Send the buffers */
450           for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
451             {
452               eb =
453                 pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
454               ioam_export_send_buffer (em, vm, eb);
455               pool_put (em->buffer_pool, eb);
456             }
457         }
458
459     CLEANUP:
460       /* Free any leftover/unused buffers and everything that was allocated */
461       for (i = 0; i < vec_len (vec_buffer_indices); i++)
462         {
463           new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
464           vlib_buffer_free (vm, &new_eb->buffer_index, 1);
465           pool_put (em->buffer_pool, new_eb);
466         }
467       vec_free (vec_buffer_indices);
468       vec_free (vec_buffer_to_be_sent);
469       vec_free (thread_index);
470     }
471   return 0;                     /* not so much */
472 }
473
474 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC)   \
475 do {                                                                           \
476   u32 n_left_from, *from, *to_next;                                            \
477   export_next_t next_index;                                                    \
478   u32 pkts_recorded = 0;                                                       \
479   ioam_export_buffer_t *my_buf = 0;                                            \
480   vlib_buffer_t *eb0 = 0;                                                      \
481   u32 ebi0 = 0;                                                                \
482   from = vlib_frame_vector_args (F);                                           \
483   n_left_from = (F)->n_vectors;                                                \
484   next_index = (N)->cached_next_index;                                         \
485   while (__sync_lock_test_and_set ((EM)->lockp[(VM)->cpu_index], 1));          \
486   my_buf = ioam_export_get_my_buffer (EM, (VM)->cpu_index);                    \
487   my_buf->touched_at = vlib_time_now (VM);                                     \
488   while (n_left_from > 0)                                                      \
489     {                                                                          \
490       u32 n_left_to_next;                                                      \
491       vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next);        \
492       while (n_left_from >= 4 && n_left_to_next >= 2)                          \
493         {                                                                      \
494           u32 next0 = NEXT;                                                    \
495           u32 next1 = NEXT;                                                    \
496           u32 bi0, bi1;                                                        \
497           HTYPE *ip0, *ip1;                                                    \
498           vlib_buffer_t *p0, *p1;                                              \
499           u32 ip_len0, ip_len1;                                                \
500           {                                                                    \
501             vlib_buffer_t *p2, *p3;                                            \
502             p2 = vlib_get_buffer (VM, from[2]);                                \
503             p3 = vlib_get_buffer (VM, from[3]);                                \
504             vlib_prefetch_buffer_header (p2, LOAD);                            \
505             vlib_prefetch_buffer_header (p3, LOAD);                            \
506             CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
507             CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
508           }                                                                    \
509           to_next[0] = bi0 = from[0];                                          \
510           to_next[1] = bi1 = from[1];                                          \
511           from += 2;                                                           \
512           to_next += 2;                                                        \
513           n_left_from -= 2;                                                    \
514           n_left_to_next -= 2;                                                 \
515           p0 = vlib_get_buffer (VM, bi0);                                      \
516           p1 = vlib_get_buffer (VM, bi1);                                      \
517           ip0 = vlib_buffer_get_current (p0);                                  \
518           ip1 = vlib_buffer_get_current (p1);                                  \
519           ip_len0 =                                                            \
520             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
521           ip_len1 =                                                            \
522             clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE);                    \
523           ebi0 = my_buf->buffer_index;                                         \
524           eb0 = vlib_get_buffer (VM, ebi0);                                    \
525           if (PREDICT_FALSE (eb0 == 0))                                        \
526             goto NO_BUFFER1;                                                   \
527           ip_len0 =                                                            \
528             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
529           ip_len1 =                                                            \
530             ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1;     \
531           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
532           FIXUP_FUNC(eb0, p0);                                                 \
533           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
534           my_buf->records_in_this_buffer++;                                    \
535           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
536             {                                                                  \
537               ioam_export_send_buffer (EM, VM, my_buf);                        \
538               ioam_export_init_buffer (EM, VM, my_buf);                        \
539             }                                                                  \
540           ebi0 = my_buf->buffer_index;                                         \
541           eb0 = vlib_get_buffer (VM, ebi0);                                    \
542           if (PREDICT_FALSE (eb0 == 0))                                        \
543             goto NO_BUFFER1;                                                   \
544           copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1);     \
545           FIXUP_FUNC(eb0, p1);                                                 \
546           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
547           my_buf->records_in_this_buffer++;                                    \
548           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
549             {                                                                  \
550               ioam_export_send_buffer (EM, VM, my_buf);                        \
551               ioam_export_init_buffer (EM, VM, my_buf);                        \
552             }                                                                  \
553           pkts_recorded += 2;                                                  \
554           if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE)))          \
555             {                                                                  \
556               if (p0->flags & VLIB_BUFFER_IS_TRACED)                           \
557                 {                                                              \
558                   export_trace_t *t =                                          \
559                     vlib_add_trace (VM, node, p0, sizeof (*t));                \
560                   t->flow_label =                                              \
561                     clib_net_to_host_u32 (ip0->V);                             \
562                   t->next_index = next0;                                       \
563                 }                                                              \
564               if (p1->flags & VLIB_BUFFER_IS_TRACED)                           \
565                 {                                                              \
566                   export_trace_t *t =                                          \
567                     vlib_add_trace (VM, N, p1, sizeof (*t));                   \
568                   t->flow_label =                                              \
569                     clib_net_to_host_u32 (ip1->V);                             \
570                   t->next_index = next1;                                       \
571                 }                                                              \
572             }                                                                  \
573         NO_BUFFER1:                                                            \
574           vlib_validate_buffer_enqueue_x2 (VM, N, next_index,                  \
575                                            to_next, n_left_to_next,            \
576                                            bi0, bi1, next0, next1);            \
577         }                                                                      \
578       while (n_left_from > 0 && n_left_to_next > 0)                            \
579         {                                                                      \
580           u32 bi0;                                                             \
581           vlib_buffer_t *p0;                                                   \
582           u32 next0 = NEXT;                                                    \
583           HTYPE *ip0;                                                          \
584           u32 ip_len0;                                                         \
585           bi0 = from[0];                                                       \
586           to_next[0] = bi0;                                                    \
587           from += 1;                                                           \
588           to_next += 1;                                                        \
589           n_left_from -= 1;                                                    \
590           n_left_to_next -= 1;                                                 \
591           p0 = vlib_get_buffer (VM, bi0);                                      \
592           ip0 = vlib_buffer_get_current (p0);                                  \
593           ip_len0 =                                                            \
594             clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
595           ebi0 = my_buf->buffer_index;                                         \
596           eb0 = vlib_get_buffer (VM, ebi0);                                    \
597           if (PREDICT_FALSE (eb0 == 0))                                        \
598             goto NO_BUFFER;                                                    \
599           ip_len0 =                                                            \
600             ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
601           copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
602           FIXUP_FUNC(eb0, p0);                                                 \
603           eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
604           my_buf->records_in_this_buffer++;                                    \
605           if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
606             {                                                                  \
607               ioam_export_send_buffer (EM, VM, my_buf);                        \
608               ioam_export_init_buffer (EM, VM, my_buf);                        \
609             }                                                                  \
610           if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE)               \
611                              && (p0->flags & VLIB_BUFFER_IS_TRACED)))          \
612             {                                                                  \
613               export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t));   \
614               t->flow_label =                                                  \
615                 clib_net_to_host_u32 (ip0->V);                                 \
616               t->next_index = next0;                                           \
617             }                                                                  \
618           pkts_recorded += 1;                                                  \
619         NO_BUFFER:                                                             \
620           vlib_validate_buffer_enqueue_x1 (VM, N, next_index,                  \
621                                            to_next, n_left_to_next,            \
622                                            bi0, next0);                        \
623         }                                                                      \
624       vlib_put_next_frame (VM, N, next_index, n_left_to_next);                 \
625     }                                                                          \
626   vlib_node_increment_counter (VM, export_node.index,                          \
627                                EXPORT_ERROR_RECORDED, pkts_recorded);          \
628   *(EM)->lockp[(VM)->cpu_index] = 0;                                           \
629 } while(0)
630
631 #endif /* __included_ioam_export_h__ */
632
633 /*
634  * fd.io coding-style-patch-verification: ON
635  *
636  * Local Variables:
637  * eval: (c-set-style "gnu")
638  * End:
639  */