VPP-311 Coding standards cleanup for vnet/vnet/*.[ch]
[vpp.git] / vnet / vnet / interface_output.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * interface_output.c: interface output node
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39
40 #include <vnet/vnet.h>
41
42 typedef struct
43 {
44   u32 sw_if_index;
45   u8 data[128 - sizeof (u32)];
46 }
47 interface_output_trace_t;
48
49 u8 *
50 format_vnet_interface_output_trace (u8 * s, va_list * va)
51 {
52   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
53   vlib_node_t *node = va_arg (*va, vlib_node_t *);
54   interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
55   vnet_main_t *vnm = vnet_get_main ();
56   vnet_sw_interface_t *si;
57   uword indent;
58
59   if (t->sw_if_index != (u32) ~ 0)
60     {
61       si = vnet_get_sw_interface (vnm, t->sw_if_index);
62       indent = format_get_indent (s);
63
64       s = format (s, "%U\n%U%U",
65                   format_vnet_sw_interface_name, vnm, si,
66                   format_white_space, indent,
67                   node->format_buffer ? node->
68                   format_buffer : format_hex_bytes, t->data,
69                   sizeof (t->data));
70     }
71   return s;
72 }
73
74 static void
75 vnet_interface_output_trace (vlib_main_t * vm,
76                              vlib_node_runtime_t * node,
77                              vlib_frame_t * frame, uword n_buffers)
78 {
79   u32 n_left, *from;
80
81   n_left = n_buffers;
82   from = vlib_frame_args (frame);
83
84   while (n_left >= 4)
85     {
86       u32 bi0, bi1;
87       vlib_buffer_t *b0, *b1;
88       interface_output_trace_t *t0, *t1;
89
90       /* Prefetch next iteration. */
91       vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
92       vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
93
94       bi0 = from[0];
95       bi1 = from[1];
96
97       b0 = vlib_get_buffer (vm, bi0);
98       b1 = vlib_get_buffer (vm, bi1);
99
100       if (b0->flags & VLIB_BUFFER_IS_TRACED)
101         {
102           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
103           t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
104           clib_memcpy (t0->data, vlib_buffer_get_current (b0),
105                        sizeof (t0->data));
106         }
107       if (b1->flags & VLIB_BUFFER_IS_TRACED)
108         {
109           t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
110           t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
111           clib_memcpy (t1->data, vlib_buffer_get_current (b1),
112                        sizeof (t1->data));
113         }
114       from += 2;
115       n_left -= 2;
116     }
117
118   while (n_left >= 1)
119     {
120       u32 bi0;
121       vlib_buffer_t *b0;
122       interface_output_trace_t *t0;
123
124       bi0 = from[0];
125
126       b0 = vlib_get_buffer (vm, bi0);
127
128       if (b0->flags & VLIB_BUFFER_IS_TRACED)
129         {
130           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
131           t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
132           clib_memcpy (t0->data, vlib_buffer_get_current (b0),
133                        sizeof (t0->data));
134         }
135       from += 1;
136       n_left -= 1;
137     }
138 }
139
140 static never_inline u32
141 slow_path (vlib_main_t * vm,
142            u32 bi,
143            vlib_buffer_t * b,
144            u32 n_left_to_tx, u32 * to_tx, u32 * n_slow_bytes_result)
145 {
146   /* We've already enqueued a single buffer. */
147   u32 n_buffers = 0;
148   u32 n_slow_bytes = 0;
149
150   while (n_left_to_tx > 0)
151     {
152       to_tx[0] = bi;
153       to_tx += 1;
154       n_left_to_tx -= 1;
155       n_buffers += 1;
156       n_slow_bytes += vlib_buffer_length_in_chain (vm, b);
157
158       /* Be grumpy about zero length buffers for benefit of
159          driver tx function. */
160       ASSERT (b->current_length > 0);
161
162       if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
163         break;
164
165       bi = b->next_buffer;
166       b = vlib_get_buffer (vm, bi);
167     }
168
169   /* Ran out of space in next frame trying to enqueue buffers? */
170   if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
171     return 0;
172
173   *n_slow_bytes_result = n_slow_bytes;
174   return n_buffers;
175 }
176
177 /*
178  * Increment TX stats. Roll up consecutive increments to the same sw_if_index
179  * into one increment.
180  */
181 static_always_inline void
182 incr_output_stats (vnet_main_t * vnm,
183                    u32 cpu_index,
184                    u32 length,
185                    u32 sw_if_index,
186                    u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes)
187 {
188   vnet_interface_main_t *im;
189
190   if (PREDICT_TRUE (sw_if_index == *last_sw_if_index))
191     {
192       *n_packets += 1;
193       *n_bytes += length;
194     }
195   else
196     {
197       if (PREDICT_TRUE (*last_sw_if_index != ~0))
198         {
199           im = &vnm->interface_main;
200
201           vlib_increment_combined_counter (im->combined_sw_if_counters
202                                            + VNET_INTERFACE_COUNTER_TX,
203                                            cpu_index,
204                                            *last_sw_if_index,
205                                            *n_packets, *n_bytes);
206         }
207       *last_sw_if_index = sw_if_index;
208       *n_packets = 1;
209       *n_bytes = length;
210     }
211 }
212
213
214 /* Interface output functions. */
215 uword
216 vnet_interface_output_node (vlib_main_t * vm,
217                             vlib_node_runtime_t * node, vlib_frame_t * frame)
218 {
219   vnet_main_t *vnm = vnet_get_main ();
220   vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
221   vnet_sw_interface_t *si;
222   vnet_hw_interface_t *hi;
223   u32 n_left_to_tx, *from, *from_end, *to_tx;
224   u32 n_bytes, n_buffers, n_packets;
225   u32 last_sw_if_index;
226   u32 cpu_index = vm->cpu_index;
227
228   n_buffers = frame->n_vectors;
229
230   if (node->flags & VLIB_NODE_FLAG_TRACE)
231     vnet_interface_output_trace (vm, node, frame, n_buffers);
232
233   from = vlib_frame_args (frame);
234
235   if (rt->is_deleted)
236     return vlib_error_drop_buffers (vm, node, from,
237                                     /* buffer stride */ 1,
238                                     n_buffers,
239                                     VNET_INTERFACE_OUTPUT_NEXT_DROP,
240                                     node->node_index,
241                                     VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
242
243   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
244   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
245   if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
246       !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
247     {
248       vlib_simple_counter_main_t *cm;
249
250       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
251                              VNET_INTERFACE_COUNTER_TX_ERROR);
252       vlib_increment_simple_counter (cm, cpu_index,
253                                      rt->sw_if_index, n_buffers);
254       return vlib_error_drop_buffers (vm, node, from,
255                                       /* buffer stride */ 1,
256                                       n_buffers,
257                                       VNET_INTERFACE_OUTPUT_NEXT_DROP,
258                                       node->node_index,
259                                       VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
260     }
261
262   from_end = from + n_buffers;
263
264   /* Total byte count of all buffers. */
265   n_bytes = 0;
266   n_packets = 0;
267   last_sw_if_index = ~0;
268
269   while (from < from_end)
270     {
271       /* Get new next frame since previous incomplete frame may have less
272          than VNET_FRAME_SIZE vectors in it. */
273       vlib_get_new_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
274                                to_tx, n_left_to_tx);
275
276       while (from + 4 <= from_end && n_left_to_tx >= 2)
277         {
278           u32 bi0, bi1;
279           vlib_buffer_t *b0, *b1;
280
281           /* Prefetch next iteration. */
282           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
283           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
284
285           bi0 = from[0];
286           bi1 = from[1];
287           to_tx[0] = bi0;
288           to_tx[1] = bi1;
289           from += 2;
290           to_tx += 2;
291           n_left_to_tx -= 2;
292
293           b0 = vlib_get_buffer (vm, bi0);
294           b1 = vlib_get_buffer (vm, bi1);
295
296           /* Be grumpy about zero length buffers for benefit of
297              driver tx function. */
298           ASSERT (b0->current_length > 0);
299           ASSERT (b1->current_length > 0);
300
301           if (PREDICT_FALSE
302               ((b0->flags | b1->flags) & VLIB_BUFFER_NEXT_PRESENT))
303             {
304               u32 n_buffers, n_slow_bytes, i;
305
306               /* Undo. */
307               from -= 2;
308               to_tx -= 2;
309               n_left_to_tx += 2;
310
311               /* Do slow path two times. */
312               for (i = 0; i < 2; i++)
313                 {
314                   u32 bi = i ? bi1 : bi0;
315                   vlib_buffer_t *b = i ? b1 : b0;
316
317                   n_buffers = slow_path (vm, bi, b,
318                                          n_left_to_tx, to_tx, &n_slow_bytes);
319
320                   /* Not enough room for single packet? */
321                   if (n_buffers == 0)
322                     goto put;
323
324                   from += 1;
325                   to_tx += n_buffers;
326                   n_left_to_tx -= n_buffers;
327                   incr_output_stats (vnm, cpu_index, n_slow_bytes,
328                                      vnet_buffer (b)->sw_if_index[VLIB_TX],
329                                      &last_sw_if_index, &n_packets, &n_bytes);
330                 }
331             }
332           else
333             {
334               incr_output_stats (vnm, cpu_index,
335                                  vlib_buffer_length_in_chain (vm, b0),
336                                  vnet_buffer (b0)->sw_if_index[VLIB_TX],
337                                  &last_sw_if_index, &n_packets, &n_bytes);
338               incr_output_stats (vnm, cpu_index,
339                                  vlib_buffer_length_in_chain (vm, b0),
340                                  vnet_buffer (b1)->sw_if_index[VLIB_TX],
341                                  &last_sw_if_index, &n_packets, &n_bytes);
342             }
343         }
344
345       while (from + 1 <= from_end && n_left_to_tx >= 1)
346         {
347           u32 bi0;
348           vlib_buffer_t *b0;
349
350           bi0 = from[0];
351           to_tx[0] = bi0;
352           from += 1;
353           to_tx += 1;
354           n_left_to_tx -= 1;
355
356           b0 = vlib_get_buffer (vm, bi0);
357
358           /* Be grumpy about zero length buffers for benefit of
359              driver tx function. */
360           ASSERT (b0->current_length > 0);
361
362           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
363             {
364               u32 n_buffers, n_slow_bytes;
365
366               /* Undo. */
367               from -= 1;
368               to_tx -= 1;
369               n_left_to_tx += 1;
370
371               n_buffers = slow_path (vm, bi0, b0,
372                                      n_left_to_tx, to_tx, &n_slow_bytes);
373
374               /* Not enough room for single packet? */
375               if (n_buffers == 0)
376                 goto put;
377
378               from += 1;
379               to_tx += n_buffers;
380               n_left_to_tx -= n_buffers;
381             }
382           incr_output_stats (vnm, cpu_index,
383                              vlib_buffer_length_in_chain (vm, b0),
384                              vnet_buffer (b0)->sw_if_index[VLIB_TX],
385                              &last_sw_if_index, &n_packets, &n_bytes);
386         }
387
388     put:
389       vlib_put_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
390                            n_left_to_tx);
391     }
392
393   /* Final update of interface stats. */
394   incr_output_stats (vnm, cpu_index, 0, ~0,     /* ~0 will flush stats */
395                      &last_sw_if_index, &n_packets, &n_bytes);
396
397   return n_buffers;
398 }
399
400 VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node);
401 CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node);
402
403 always_inline uword
404 vnet_interface_output_node_no_flatten_inline (vlib_main_t * vm,
405                                               vlib_node_runtime_t * node,
406                                               vlib_frame_t * frame,
407                                               int with_features)
408 {
409   vnet_main_t *vnm = vnet_get_main ();
410   vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
411   vnet_sw_interface_t *si;
412   vnet_hw_interface_t *hi;
413   u32 n_left_to_tx, *from, *from_end, *to_tx;
414   u32 n_bytes, n_buffers, n_packets;
415   u32 n_bytes_b0, n_bytes_b1;
416   u32 cpu_index = vm->cpu_index;
417   vnet_interface_main_t *im = &vnm->interface_main;
418   u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
419
420   n_buffers = frame->n_vectors;
421
422   if (node->flags & VLIB_NODE_FLAG_TRACE)
423     vnet_interface_output_trace (vm, node, frame, n_buffers);
424
425   from = vlib_frame_args (frame);
426
427   if (rt->is_deleted)
428     return vlib_error_drop_buffers (vm, node, from,
429                                     /* buffer stride */ 1,
430                                     n_buffers,
431                                     VNET_INTERFACE_OUTPUT_NEXT_DROP,
432                                     node->node_index,
433                                     VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
434
435   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
436   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
437   if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
438       !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
439     {
440       vlib_simple_counter_main_t *cm;
441
442       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
443                              VNET_INTERFACE_COUNTER_TX_ERROR);
444       vlib_increment_simple_counter (cm, cpu_index,
445                                      rt->sw_if_index, n_buffers);
446
447       return vlib_error_drop_buffers (vm, node, from,
448                                       /* buffer stride */ 1,
449                                       n_buffers,
450                                       VNET_INTERFACE_OUTPUT_NEXT_DROP,
451                                       node->node_index,
452                                       VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
453     }
454
455   from_end = from + n_buffers;
456
457   /* Total byte count of all buffers. */
458   n_bytes = 0;
459   n_packets = 0;
460
461   while (from < from_end)
462     {
463       /* Get new next frame since previous incomplete frame may have less
464          than VNET_FRAME_SIZE vectors in it. */
465       vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
466
467       while (from + 4 <= from_end && n_left_to_tx >= 2)
468         {
469           u32 bi0, bi1;
470           vlib_buffer_t *b0, *b1;
471           u32 tx_swif0, tx_swif1;
472           u32 next0, next1;
473
474           /* Prefetch next iteration. */
475           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
476           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
477
478           bi0 = from[0];
479           bi1 = from[1];
480           to_tx[0] = bi0;
481           to_tx[1] = bi1;
482           from += 2;
483           to_tx += 2;
484           n_left_to_tx -= 2;
485
486           b0 = vlib_get_buffer (vm, bi0);
487           b1 = vlib_get_buffer (vm, bi1);
488
489           /* Be grumpy about zero length buffers for benefit of
490              driver tx function. */
491           ASSERT (b0->current_length > 0);
492           ASSERT (b1->current_length > 0);
493
494           n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
495           n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
496           tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
497           tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
498
499           n_bytes += n_bytes_b0 + n_bytes_b1;
500           n_packets += 2;
501           if (with_features)
502             {
503               b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
504               vnet_buffer (b0)->output_features.bitmap =
505                 si->output_feature_bitmap;
506               count_trailing_zeros (next0,
507                                     vnet_buffer (b0)->output_features.bitmap);
508               vnet_buffer (b0)->output_features.bitmap &= ~(1 << next0);
509             }
510           else
511             {
512               next0 = VNET_INTERFACE_OUTPUT_NEXT_TX;
513               vnet_buffer (b0)->output_features.bitmap = 0;
514
515               if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
516                 {
517                   /* update vlan subif tx counts, if required */
518                   vlib_increment_combined_counter (im->combined_sw_if_counters
519                                                    +
520                                                    VNET_INTERFACE_COUNTER_TX,
521                                                    cpu_index, tx_swif0, 1,
522                                                    n_bytes_b0);
523                 }
524             }
525
526           if (with_features)
527             {
528               b1->flags |= BUFFER_OUTPUT_FEAT_DONE;
529               vnet_buffer (b1)->output_features.bitmap =
530                 si->output_feature_bitmap;
531               count_trailing_zeros (next1,
532                                     vnet_buffer (b1)->output_features.bitmap);
533               vnet_buffer (b1)->output_features.bitmap &= ~(1 << next1);
534             }
535           else
536             {
537               next1 = VNET_INTERFACE_OUTPUT_NEXT_TX;
538               vnet_buffer (b1)->output_features.bitmap = 0;
539
540               /* update vlan subif tx counts, if required */
541               if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
542                 {
543
544                   vlib_increment_combined_counter (im->combined_sw_if_counters
545                                                    +
546                                                    VNET_INTERFACE_COUNTER_TX,
547                                                    cpu_index, tx_swif1, 1,
548                                                    n_bytes_b1);
549                 }
550             }
551           if (with_features)
552             vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_tx,
553                                              n_left_to_tx, bi0, bi1, next0,
554                                              next1);
555         }
556
557       while (from + 1 <= from_end && n_left_to_tx >= 1)
558         {
559           u32 bi0;
560           vlib_buffer_t *b0;
561           u32 tx_swif0;
562
563           bi0 = from[0];
564           to_tx[0] = bi0;
565           from += 1;
566           to_tx += 1;
567           n_left_to_tx -= 1;
568
569           b0 = vlib_get_buffer (vm, bi0);
570
571           /* Be grumpy about zero length buffers for benefit of
572              driver tx function. */
573           ASSERT (b0->current_length > 0);
574
575           n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
576           tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
577           n_bytes += n_bytes_b0;
578           n_packets += 1;
579
580           if (with_features)
581             {
582               u32 next0;
583               b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
584               vnet_buffer (b0)->output_features.bitmap =
585                 si->output_feature_bitmap;
586               count_trailing_zeros (next0,
587                                     vnet_buffer (b0)->output_features.bitmap);
588               vnet_buffer (b0)->output_features.bitmap &= ~(1 << next0);
589               vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx,
590                                                n_left_to_tx, bi0, next0);
591             }
592           else
593             {
594               vnet_buffer (b0)->output_features.bitmap = 0;
595
596               if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
597                 {
598
599                   vlib_increment_combined_counter (im->combined_sw_if_counters
600                                                    +
601                                                    VNET_INTERFACE_COUNTER_TX,
602                                                    cpu_index, tx_swif0, 1,
603                                                    n_bytes_b0);
604                 }
605             }
606         }
607
608       vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
609     }
610
611   /* Update main interface stats. */
612   vlib_increment_combined_counter (im->combined_sw_if_counters
613                                    + VNET_INTERFACE_COUNTER_TX,
614                                    cpu_index,
615                                    rt->sw_if_index, n_packets, n_bytes);
616   return n_buffers;
617 }
618
619 uword
620 vnet_interface_output_node_no_flatten (vlib_main_t * vm,
621                                        vlib_node_runtime_t * node,
622                                        vlib_frame_t * frame)
623 {
624   vnet_main_t *vnm = vnet_get_main ();
625   vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
626   vnet_sw_interface_t *si;
627   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
628
629   if (PREDICT_FALSE (si->output_feature_bitmap))
630     {
631       /* if first pakcet in the frame have BUFFER_OUTPUT_FEAT_DONE flag set
632          then whole frame is arriving from feature node */
633
634       u32 *from = vlib_frame_args (frame);
635       vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
636
637       if ((b->flags & BUFFER_OUTPUT_FEAT_DONE) == 0)
638         return vnet_interface_output_node_no_flatten_inline (vm, node, frame,
639                                                              1);
640     }
641   return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 0);
642 }
643
644 VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node_no_flatten);
645 CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node_no_flatten);
646
647 /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
648 static uword
649 vnet_per_buffer_interface_output (vlib_main_t * vm,
650                                   vlib_node_runtime_t * node,
651                                   vlib_frame_t * frame)
652 {
653   vnet_main_t *vnm = vnet_get_main ();
654   u32 n_left_to_next, *from, *to_next;
655   u32 n_left_from, next_index;
656
657   n_left_from = frame->n_vectors;
658
659   from = vlib_frame_args (frame);
660   next_index = node->cached_next_index;
661
662   while (n_left_from > 0)
663     {
664       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
665
666       while (n_left_from >= 4 && n_left_to_next >= 2)
667         {
668           u32 bi0, bi1, next0, next1;
669           vlib_buffer_t *b0, *b1;
670           vnet_hw_interface_t *hi0, *hi1;
671
672           /* Prefetch next iteration. */
673           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
674           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
675
676           bi0 = from[0];
677           bi1 = from[1];
678           to_next[0] = bi0;
679           to_next[1] = bi1;
680           from += 2;
681           to_next += 2;
682           n_left_to_next -= 2;
683           n_left_from -= 2;
684
685           b0 = vlib_get_buffer (vm, bi0);
686           b1 = vlib_get_buffer (vm, bi1);
687
688           hi0 =
689             vnet_get_sup_hw_interface (vnm,
690                                        vnet_buffer (b0)->sw_if_index
691                                        [VLIB_TX]);
692           hi1 =
693             vnet_get_sup_hw_interface (vnm,
694                                        vnet_buffer (b1)->sw_if_index
695                                        [VLIB_TX]);
696
697           next0 = hi0->hw_if_index;
698           next1 = hi1->hw_if_index;
699
700           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
701                                            n_left_to_next, bi0, bi1, next0,
702                                            next1);
703         }
704
705       while (n_left_from > 0 && n_left_to_next > 0)
706         {
707           u32 bi0, next0;
708           vlib_buffer_t *b0;
709           vnet_hw_interface_t *hi0;
710
711           bi0 = from[0];
712           to_next[0] = bi0;
713           from += 1;
714           to_next += 1;
715           n_left_to_next -= 1;
716           n_left_from -= 1;
717
718           b0 = vlib_get_buffer (vm, bi0);
719
720           hi0 =
721             vnet_get_sup_hw_interface (vnm,
722                                        vnet_buffer (b0)->sw_if_index
723                                        [VLIB_TX]);
724
725           next0 = hi0->hw_if_index;
726
727           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
728                                            n_left_to_next, bi0, next0);
729         }
730
731       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
732     }
733
734   return frame->n_vectors;
735 }
736
737 always_inline u32
738 counter_index (vlib_main_t * vm, vlib_error_t e)
739 {
740   vlib_node_t *n;
741   u32 ci, ni;
742
743   ni = vlib_error_get_node (e);
744   n = vlib_get_node (vm, ni);
745
746   ci = vlib_error_get_code (e);
747   ASSERT (ci < n->n_errors);
748
749   ci += n->error_heap_index;
750
751   return ci;
752 }
753
754 static u8 *
755 format_vnet_error_trace (u8 * s, va_list * va)
756 {
757   vlib_main_t *vm = va_arg (*va, vlib_main_t *);
758   CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
759   vlib_error_t *e = va_arg (*va, vlib_error_t *);
760   vlib_node_t *error_node;
761   vlib_error_main_t *em = &vm->error_main;
762   u32 i;
763
764   error_node = vlib_get_node (vm, vlib_error_get_node (e[0]));
765   i = counter_index (vm, e[0]);
766   s = format (s, "%v: %s", error_node->name, em->error_strings_heap[i]);
767
768   return s;
769 }
770
771 static void
772 trace_errors_with_buffers (vlib_main_t * vm,
773                            vlib_node_runtime_t * node, vlib_frame_t * frame)
774 {
775   u32 n_left, *buffers;
776
777   buffers = vlib_frame_vector_args (frame);
778   n_left = frame->n_vectors;
779
780   while (n_left >= 4)
781     {
782       u32 bi0, bi1;
783       vlib_buffer_t *b0, *b1;
784       vlib_error_t *t0, *t1;
785
786       /* Prefetch next iteration. */
787       vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
788       vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
789
790       bi0 = buffers[0];
791       bi1 = buffers[1];
792
793       b0 = vlib_get_buffer (vm, bi0);
794       b1 = vlib_get_buffer (vm, bi1);
795
796       if (b0->flags & VLIB_BUFFER_IS_TRACED)
797         {
798           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
799           t0[0] = b0->error;
800         }
801       if (b1->flags & VLIB_BUFFER_IS_TRACED)
802         {
803           t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
804           t1[0] = b1->error;
805         }
806       buffers += 2;
807       n_left -= 2;
808     }
809
810   while (n_left >= 1)
811     {
812       u32 bi0;
813       vlib_buffer_t *b0;
814       vlib_error_t *t0;
815
816       bi0 = buffers[0];
817
818       b0 = vlib_get_buffer (vm, bi0);
819
820       if (b0->flags & VLIB_BUFFER_IS_TRACED)
821         {
822           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
823           t0[0] = b0->error;
824         }
825       buffers += 1;
826       n_left -= 1;
827     }
828 }
829
830 static u8 *
831 validate_error (vlib_main_t * vm, vlib_error_t * e, u32 index)
832 {
833   uword node_index = vlib_error_get_node (e[0]);
834   uword code = vlib_error_get_code (e[0]);
835   vlib_node_t *n;
836
837   if (node_index >= vec_len (vm->node_main.nodes))
838     return format (0, "[%d], node index out of range 0x%x, error 0x%x",
839                    index, node_index, e[0]);
840
841   n = vlib_get_node (vm, node_index);
842   if (code >= n->n_errors)
843     return format (0, "[%d], code %d out of range for node %v",
844                    index, code, n->name);
845
846   return 0;
847 }
848
849 static u8 *
850 validate_error_frame (vlib_main_t * vm,
851                       vlib_node_runtime_t * node, vlib_frame_t * f)
852 {
853   u32 *buffers = vlib_frame_args (f);
854   vlib_buffer_t *b;
855   u8 *msg = 0;
856   uword i;
857
858   for (i = 0; i < f->n_vectors; i++)
859     {
860       b = vlib_get_buffer (vm, buffers[i]);
861       msg = validate_error (vm, &b->error, i);
862       if (msg)
863         return msg;
864     }
865
866   return msg;
867 }
868
869 typedef enum
870 {
871   VNET_ERROR_DISPOSITION_DROP,
872   VNET_ERROR_DISPOSITION_PUNT,
873   VNET_ERROR_N_DISPOSITION,
874 } vnet_error_disposition_t;
875
876 always_inline void
877 do_packet (vlib_main_t * vm, vlib_error_t a)
878 {
879   vlib_error_main_t *em = &vm->error_main;
880   u32 i = counter_index (vm, a);
881   em->counters[i] += 1;
882   vlib_error_elog_count (vm, i, 1);
883 }
884
885 static_always_inline uword
886 process_drop_punt (vlib_main_t * vm,
887                    vlib_node_runtime_t * node,
888                    vlib_frame_t * frame, vnet_error_disposition_t disposition)
889 {
890   vnet_main_t *vnm = vnet_get_main ();
891   vlib_error_main_t *em = &vm->error_main;
892   u32 *buffers, *first_buffer;
893   vlib_error_t current_error;
894   u32 current_counter_index, n_errors_left;
895   u32 current_sw_if_index, n_errors_current_sw_if_index;
896   u64 current_counter;
897   vlib_simple_counter_main_t *cm;
898   u32 cpu_index = vm->cpu_index;
899
900   static vlib_error_t memory[VNET_ERROR_N_DISPOSITION];
901   static char memory_init[VNET_ERROR_N_DISPOSITION];
902
903   buffers = vlib_frame_args (frame);
904   first_buffer = buffers;
905
906   {
907     vlib_buffer_t *b = vlib_get_buffer (vm, first_buffer[0]);
908
909     if (!memory_init[disposition])
910       {
911         memory_init[disposition] = 1;
912         memory[disposition] = b->error;
913       }
914
915     current_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
916     n_errors_current_sw_if_index = 0;
917   }
918
919   current_error = memory[disposition];
920   current_counter_index = counter_index (vm, memory[disposition]);
921   current_counter = em->counters[current_counter_index];
922
923   if (node->flags & VLIB_NODE_FLAG_TRACE)
924     trace_errors_with_buffers (vm, node, frame);
925
926   n_errors_left = frame->n_vectors;
927   cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
928                          (disposition == VNET_ERROR_DISPOSITION_PUNT
929                           ? VNET_INTERFACE_COUNTER_PUNT
930                           : VNET_INTERFACE_COUNTER_DROP));
931
932   while (n_errors_left >= 2)
933     {
934       vlib_buffer_t *b0, *b1;
935       vnet_sw_interface_t *sw_if0, *sw_if1;
936       vlib_error_t e0, e1;
937       u32 bi0, bi1;
938       u32 sw_if_index0, sw_if_index1;
939
940       bi0 = buffers[0];
941       bi1 = buffers[1];
942
943       buffers += 2;
944       n_errors_left -= 2;
945
946       b0 = vlib_get_buffer (vm, bi0);
947       b1 = vlib_get_buffer (vm, bi1);
948
949       e0 = b0->error;
950       e1 = b1->error;
951
952       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
953       sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
954
955       /* Speculate that sw_if_index == sw_if_index[01]. */
956       n_errors_current_sw_if_index += 2;
957
958       /* Speculatively assume all 2 (node, code) pairs are equal
959          to current (node, code). */
960       current_counter += 2;
961
962       if (PREDICT_FALSE (e0 != current_error
963                          || e1 != current_error
964                          || sw_if_index0 != current_sw_if_index
965                          || sw_if_index1 != current_sw_if_index))
966         {
967           current_counter -= 2;
968           n_errors_current_sw_if_index -= 2;
969
970           vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
971           vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
972
973           /* Increment super-interface drop/punt counters for
974              sub-interfaces. */
975           sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
976           vlib_increment_simple_counter
977             (cm, cpu_index, sw_if0->sup_sw_if_index,
978              sw_if0->sup_sw_if_index != sw_if_index0);
979
980           sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1);
981           vlib_increment_simple_counter
982             (cm, cpu_index, sw_if1->sup_sw_if_index,
983              sw_if1->sup_sw_if_index != sw_if_index1);
984
985           em->counters[current_counter_index] = current_counter;
986           do_packet (vm, e0);
987           do_packet (vm, e1);
988
989           /* For 2 repeated errors, change current error. */
990           if (e0 == e1 && e1 != current_error)
991             {
992               current_error = e0;
993               current_counter_index = counter_index (vm, e0);
994             }
995           current_counter = em->counters[current_counter_index];
996         }
997     }
998
999   while (n_errors_left >= 1)
1000     {
1001       vlib_buffer_t *b0;
1002       vnet_sw_interface_t *sw_if0;
1003       vlib_error_t e0;
1004       u32 bi0, sw_if_index0;
1005
1006       bi0 = buffers[0];
1007
1008       buffers += 1;
1009       n_errors_left -= 1;
1010       current_counter += 1;
1011
1012       b0 = vlib_get_buffer (vm, bi0);
1013       e0 = b0->error;
1014
1015       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1016
1017       /* Increment drop/punt counters. */
1018       vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
1019
1020       /* Increment super-interface drop/punt counters for sub-interfaces. */
1021       sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
1022       vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index,
1023                                      sw_if0->sup_sw_if_index != sw_if_index0);
1024
1025       if (PREDICT_FALSE (e0 != current_error))
1026         {
1027           current_counter -= 1;
1028
1029           vlib_error_elog_count (vm, current_counter_index,
1030                                  (current_counter
1031                                   - em->counters[current_counter_index]));
1032
1033           em->counters[current_counter_index] = current_counter;
1034
1035           do_packet (vm, e0);
1036           current_error = e0;
1037           current_counter_index = counter_index (vm, e0);
1038           current_counter = em->counters[current_counter_index];
1039         }
1040     }
1041
1042   if (n_errors_current_sw_if_index > 0)
1043     {
1044       vnet_sw_interface_t *si;
1045
1046       vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index,
1047                                      n_errors_current_sw_if_index);
1048
1049       si = vnet_get_sw_interface (vnm, current_sw_if_index);
1050       if (si->sup_sw_if_index != current_sw_if_index)
1051         vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index,
1052                                        n_errors_current_sw_if_index);
1053     }
1054
1055   vlib_error_elog_count (vm, current_counter_index,
1056                          (current_counter
1057                           - em->counters[current_counter_index]));
1058
1059   /* Return cached counter. */
1060   em->counters[current_counter_index] = current_counter;
1061
1062   /* Save memory for next iteration. */
1063   memory[disposition] = current_error;
1064
1065   if (disposition == VNET_ERROR_DISPOSITION_DROP || !vm->os_punt_frame)
1066     {
1067       vlib_buffer_free (vm, first_buffer, frame->n_vectors);
1068
1069       /* If there is no punt function, free the frame as well. */
1070       if (disposition == VNET_ERROR_DISPOSITION_PUNT && !vm->os_punt_frame)
1071         vlib_frame_free (vm, node, frame);
1072     }
1073   else
1074     vm->os_punt_frame (vm, node, frame);
1075
1076   return frame->n_vectors;
1077 }
1078
1079 static inline void
1080 pcap_drop_trace (vlib_main_t * vm,
1081                  vnet_interface_main_t * im, vlib_frame_t * f)
1082 {
1083   u32 *from;
1084   u32 n_left = f->n_vectors;
1085   vlib_buffer_t *b0, *p1;
1086   u32 bi0;
1087   i16 save_current_data;
1088   u16 save_current_length;
1089
1090   from = vlib_frame_vector_args (f);
1091
1092   while (n_left > 0)
1093     {
1094       if (PREDICT_TRUE (n_left > 1))
1095         {
1096           p1 = vlib_get_buffer (vm, from[1]);
1097           vlib_prefetch_buffer_header (p1, LOAD);
1098         }
1099
1100       bi0 = from[0];
1101       b0 = vlib_get_buffer (vm, bi0);
1102       from++;
1103       n_left--;
1104
1105       /* See if we're pointedly ignoring this specific error */
1106       if (im->pcap_drop_filter_hash
1107           && hash_get (im->pcap_drop_filter_hash, b0->error))
1108         continue;
1109
1110       /* Trace all drops, or drops received on a specific interface */
1111       if (im->pcap_sw_if_index == 0 ||
1112           im->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
1113         {
1114           save_current_data = b0->current_data;
1115           save_current_length = b0->current_length;
1116
1117           /*
1118            * Typically, we'll need to rewind the buffer
1119            */
1120           if (b0->current_data > 0)
1121             vlib_buffer_advance (b0, (word) - b0->current_data);
1122
1123           pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
1124
1125           b0->current_data = save_current_data;
1126           b0->current_length = save_current_length;
1127         }
1128     }
1129 }
1130
1131 void
1132 vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
1133 {
1134   vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
1135
1136   if (im->pcap_drop_filter_hash == 0)
1137     im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
1138
1139   if (is_add)
1140     hash_set (im->pcap_drop_filter_hash, error_index, 1);
1141   else
1142     hash_unset (im->pcap_drop_filter_hash, error_index);
1143 }
1144
1145 static uword
1146 process_drop (vlib_main_t * vm,
1147               vlib_node_runtime_t * node, vlib_frame_t * frame)
1148 {
1149   vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
1150
1151   if (PREDICT_FALSE (im->drop_pcap_enable))
1152     pcap_drop_trace (vm, im, frame);
1153
1154   return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
1155 }
1156
1157 static uword
1158 process_punt (vlib_main_t * vm,
1159               vlib_node_runtime_t * node, vlib_frame_t * frame)
1160 {
1161   return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
1162 }
1163
1164 /* *INDENT-OFF* */
1165 VLIB_REGISTER_NODE (drop_buffers,static) = {
1166   .function = process_drop,
1167   .name = "error-drop",
1168   .flags = VLIB_NODE_FLAG_IS_DROP,
1169   .vector_size = sizeof (u32),
1170   .format_trace = format_vnet_error_trace,
1171   .validate_frame = validate_error_frame,
1172 };
1173 /* *INDENT-ON* */
1174
1175 VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop);
1176
1177 /* *INDENT-OFF* */
1178 VLIB_REGISTER_NODE (punt_buffers,static) = {
1179   .function = process_punt,
1180   .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH
1181             | VLIB_NODE_FLAG_IS_PUNT),
1182   .name = "error-punt",
1183   .vector_size = sizeof (u32),
1184   .format_trace = format_vnet_error_trace,
1185   .validate_frame = validate_error_frame,
1186 };
1187 /* *INDENT-ON* */
1188
1189 VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt);
1190
1191 /* *INDENT-OFF* */
1192 VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node,static) = {
1193   .function = vnet_per_buffer_interface_output,
1194   .name = "interface-output",
1195   .vector_size = sizeof (u32),
1196 };
1197 /* *INDENT-ON* */
1198
1199 VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node,
1200                               vnet_per_buffer_interface_output);
1201
1202 clib_error_t *
1203 vnet_per_buffer_interface_output_hw_interface_add_del (vnet_main_t * vnm,
1204                                                        u32 hw_if_index,
1205                                                        u32 is_create)
1206 {
1207   vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1208   u32 next_index;
1209
1210   next_index = vlib_node_add_next_with_slot
1211     (vnm->vlib_main, vnet_per_buffer_interface_output_node.index,
1212      hi->output_node_index,
1213      /* next_index */ hw_if_index);
1214
1215   ASSERT (next_index == hw_if_index);
1216
1217   return 0;
1218 }
1219
1220 VNET_HW_INTERFACE_ADD_DEL_FUNCTION
1221   (vnet_per_buffer_interface_output_hw_interface_add_del);
1222
1223 static clib_error_t *
1224 pcap_drop_trace_command_fn (vlib_main_t * vm,
1225                             unformat_input_t * input,
1226                             vlib_cli_command_t * cmd)
1227 {
1228   vnet_main_t *vnm = vnet_get_main ();
1229   vnet_interface_main_t *im = &vnm->interface_main;
1230   u8 *filename;
1231   u32 max;
1232   int matched = 0;
1233   clib_error_t *error = 0;
1234
1235   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1236     {
1237       if (unformat (input, "on"))
1238         {
1239           if (im->drop_pcap_enable == 0)
1240             {
1241               if (im->pcap_filename == 0)
1242                 im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
1243
1244               memset (&im->pcap_main, 0, sizeof (im->pcap_main));
1245               im->pcap_main.file_name = (char *) im->pcap_filename;
1246               im->pcap_main.n_packets_to_capture = 100;
1247               if (im->pcap_pkts_to_capture)
1248                 im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
1249
1250               im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
1251               im->drop_pcap_enable = 1;
1252               matched = 1;
1253               vlib_cli_output (vm, "pcap drop capture on...");
1254             }
1255           else
1256             {
1257               vlib_cli_output (vm, "pcap drop capture already on...");
1258             }
1259           matched = 1;
1260         }
1261       else if (unformat (input, "off"))
1262         {
1263           matched = 1;
1264
1265           if (im->drop_pcap_enable)
1266             {
1267               vlib_cli_output (vm, "captured %d pkts...",
1268                                im->pcap_main.n_packets_captured);
1269               if (im->pcap_main.n_packets_captured)
1270                 {
1271                   im->pcap_main.n_packets_to_capture =
1272                     im->pcap_main.n_packets_captured;
1273                   error = pcap_write (&im->pcap_main);
1274                   if (error)
1275                     clib_error_report (error);
1276                   else
1277                     vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
1278                 }
1279             }
1280           else
1281             {
1282               vlib_cli_output (vm, "pcap drop capture already off...");
1283             }
1284
1285           im->drop_pcap_enable = 0;
1286         }
1287       else if (unformat (input, "max %d", &max))
1288         {
1289           im->pcap_pkts_to_capture = max;
1290           matched = 1;
1291         }
1292
1293       else if (unformat (input, "intfc %U",
1294                          unformat_vnet_sw_interface, vnm,
1295                          &im->pcap_sw_if_index))
1296         matched = 1;
1297       else if (unformat (input, "intfc any"))
1298         {
1299           im->pcap_sw_if_index = 0;
1300           matched = 1;
1301         }
1302       else if (unformat (input, "file %s", &filename))
1303         {
1304           u8 *chroot_filename;
1305           /* Brain-police user path input */
1306           if (strstr ((char *) filename, "..")
1307               || index ((char *) filename, '/'))
1308             {
1309               vlib_cli_output (vm, "illegal characters in filename '%s'",
1310                                filename);
1311               continue;
1312             }
1313
1314           chroot_filename = format (0, "/tmp/%s%c", filename, 0);
1315           vec_free (filename);
1316
1317           if (im->pcap_filename)
1318             vec_free (im->pcap_filename);
1319           vec_add1 (filename, 0);
1320           im->pcap_filename = chroot_filename;
1321           matched = 1;
1322         }
1323       else if (unformat (input, "status"))
1324         {
1325           if (im->drop_pcap_enable == 0)
1326             {
1327               vlib_cli_output (vm, "pcap drop capture is off...");
1328               continue;
1329             }
1330
1331           vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
1332                            im->pcap_main.n_packets_captured,
1333                            im->pcap_main.n_packets_to_capture);
1334           matched = 1;
1335         }
1336
1337       else
1338         break;
1339     }
1340
1341   if (matched == 0)
1342     return clib_error_return (0, "unknown input `%U'",
1343                               format_unformat_error, input);
1344
1345   return 0;
1346 }
1347
1348 /* *INDENT-OFF* */
1349 VLIB_CLI_COMMAND (pcap_trace_command, static) = {
1350   .path = "pcap drop trace",
1351   .short_help =
1352   "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
1353   .function = pcap_drop_trace_command_fn,
1354 };
1355 /* *INDENT-ON* */
1356
1357 /*
1358  * fd.io coding-style-patch-verification: ON
1359  *
1360  * Local Variables:
1361  * eval: (c-set-style "gnu")
1362  * End:
1363  */