Fix double-enqueued packet in interface-output dual-loop, fixes VPP-116
[vpp.git] / vnet / vnet / interface_output.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * interface_output.c: interface output node
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39
40 #include <vnet/vnet.h>
41
42 typedef struct {
43   u32 sw_if_index;
44   u8 data[128 - sizeof (u32)];
45 } interface_output_trace_t;
46
47 u8 * format_vnet_interface_output_trace (u8 * s, va_list * va)
48 {
49   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
50   vlib_node_t * node = va_arg (*va, vlib_node_t *);
51   interface_output_trace_t * t = va_arg (*va, interface_output_trace_t *);
52   vnet_main_t * vnm = vnet_get_main();
53   vnet_sw_interface_t * si;
54   uword indent;
55
56   if (t->sw_if_index != (u32)~0)
57     {
58       si = vnet_get_sw_interface (vnm, t->sw_if_index);
59       indent = format_get_indent (s);
60       
61       s = format (s, "%U\n%U%U",
62                   format_vnet_sw_interface_name, vnm, si,
63                   format_white_space, indent,
64                   node->format_buffer ? node->format_buffer : format_hex_bytes,
65                   t->data, sizeof (t->data));
66     }
67   return s;
68 }
69
70 static void
71 vnet_interface_output_trace (vlib_main_t * vm,
72                              vlib_node_runtime_t * node,
73                              vlib_frame_t * frame,
74                              uword n_buffers)
75 {
76   u32 n_left, * from;
77
78   n_left = n_buffers;
79   from = vlib_frame_args (frame);
80   
81   while (n_left >= 4)
82     {
83       u32 bi0, bi1;
84       vlib_buffer_t * b0, * b1;
85       interface_output_trace_t * t0, * t1;
86
87       /* Prefetch next iteration. */
88       vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
89       vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
90
91       bi0 = from[0];
92       bi1 = from[1];
93
94       b0 = vlib_get_buffer (vm, bi0);
95       b1 = vlib_get_buffer (vm, bi1);
96
97       if (b0->flags & VLIB_BUFFER_IS_TRACED)
98         {
99           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
100           t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
101           clib_memcpy (t0->data, vlib_buffer_get_current (b0),
102                   sizeof (t0->data));
103         }
104       if (b1->flags & VLIB_BUFFER_IS_TRACED)
105         {
106           t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
107           t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
108           clib_memcpy (t1->data, vlib_buffer_get_current (b1),
109                   sizeof (t1->data));
110         }
111       from += 2;
112       n_left -= 2;
113     }
114
115   while (n_left >= 1)
116     {
117       u32 bi0;
118       vlib_buffer_t * b0;
119       interface_output_trace_t * t0;
120
121       bi0 = from[0];
122
123       b0 = vlib_get_buffer (vm, bi0);
124
125       if (b0->flags & VLIB_BUFFER_IS_TRACED)
126         {
127           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
128           t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
129           clib_memcpy (t0->data, vlib_buffer_get_current (b0),
130                   sizeof (t0->data));
131         }
132       from += 1;
133       n_left -= 1;
134     }
135 }
136
137 static never_inline u32
138 slow_path (vlib_main_t * vm,
139            u32 bi,
140            vlib_buffer_t * b,
141            u32 n_left_to_tx,
142            u32 * to_tx,
143            u32 * n_slow_bytes_result)
144 {
145   /* We've already enqueued a single buffer. */
146   u32 n_buffers = 0;
147   u32 n_slow_bytes = 0;
148
149   while (n_left_to_tx > 0)
150     {
151       to_tx[0] = bi;
152       to_tx += 1;
153       n_left_to_tx -= 1;
154       n_buffers += 1;
155       n_slow_bytes += vlib_buffer_length_in_chain (vm, b);
156
157       /* Be grumpy about zero length buffers for benefit of
158          driver tx function. */
159       ASSERT (b->current_length > 0);
160
161       if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
162         break;
163
164       bi = b->next_buffer;
165       b = vlib_get_buffer (vm, bi);
166     }
167
168   /* Ran out of space in next frame trying to enqueue buffers? */
169   if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
170     return 0;
171
172   *n_slow_bytes_result = n_slow_bytes;
173   return n_buffers;
174 }
175
176 /* 
177  * Increment TX stats. Roll up consecutive increments to the same sw_if_index 
178  * into one increment.
179  */
180 static_always_inline
181 void incr_output_stats (vnet_main_t * vnm,
182                         u32 cpu_index, 
183                         u32 length, 
184                         u32 sw_if_index,
185                         u32 * last_sw_if_index, 
186                         u32 * n_packets, 
187                         u32 * n_bytes) {
188   vnet_interface_main_t * im;
189
190   if (PREDICT_TRUE (sw_if_index == *last_sw_if_index)) {
191     *n_packets += 1;
192     *n_bytes += length;
193   } else {
194     if (PREDICT_TRUE (*last_sw_if_index != ~0)) {
195       im = &vnm->interface_main;
196
197       vlib_increment_combined_counter (im->combined_sw_if_counters
198                                        + VNET_INTERFACE_COUNTER_TX,
199                                        cpu_index, 
200                                        *last_sw_if_index,
201                                        *n_packets,
202                                        *n_bytes);
203     }
204     *last_sw_if_index = sw_if_index;
205     *n_packets = 1;
206     *n_bytes = length;
207   }
208 }
209
210
211 /* Interface output functions. */
212 uword
213 vnet_interface_output_node (vlib_main_t * vm,
214                             vlib_node_runtime_t * node,
215                             vlib_frame_t * frame)
216 {
217   vnet_main_t * vnm = vnet_get_main();
218   vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
219   vnet_sw_interface_t * si;
220   vnet_hw_interface_t * hi;
221   u32 n_left_to_tx, * from, * from_end, * to_tx;
222   u32 n_bytes, n_buffers, n_packets;
223   u32 last_sw_if_index;
224   u32 cpu_index = vm->cpu_index;
225
226   n_buffers = frame->n_vectors;
227
228   if (node->flags & VLIB_NODE_FLAG_TRACE)
229     vnet_interface_output_trace (vm, node, frame, n_buffers);
230
231   from = vlib_frame_args (frame);
232
233   if (rt->is_deleted)
234     return vlib_error_drop_buffers (vm, node,
235                                     from,
236                                     /* buffer stride */ 1,
237                                     n_buffers,
238                                     VNET_INTERFACE_OUTPUT_NEXT_DROP,
239                                     node->node_index,
240                                     VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
241
242   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
243   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
244   if (! (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
245       ! (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
246     {
247       vlib_simple_counter_main_t * cm;
248      
249       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
250                              VNET_INTERFACE_COUNTER_TX_ERROR);
251       vlib_increment_simple_counter (cm, cpu_index,
252                                      rt->sw_if_index, n_buffers);
253       return vlib_error_drop_buffers (vm, node,
254                                       from,
255                                       /* buffer stride */ 1,
256                                       n_buffers,
257                                       VNET_INTERFACE_OUTPUT_NEXT_DROP,
258                                       node->node_index,
259                                    VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
260     }
261
262   from_end = from + n_buffers;
263
264   /* Total byte count of all buffers. */
265   n_bytes = 0;
266   n_packets = 0;
267   last_sw_if_index = ~0;
268
269   while (from < from_end)
270     {
271       /* Get new next frame since previous incomplete frame may have less
272          than VNET_FRAME_SIZE vectors in it. */
273       vlib_get_new_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
274                                to_tx, n_left_to_tx);
275
276       while (from + 4 <= from_end && n_left_to_tx >= 2)
277         {
278           u32 bi0, bi1;
279           vlib_buffer_t * b0, * b1;
280
281           /* Prefetch next iteration. */
282           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
283           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
284
285           bi0 = from[0];
286           bi1 = from[1];
287           to_tx[0] = bi0;
288           to_tx[1] = bi1;
289           from += 2;
290           to_tx += 2;
291           n_left_to_tx -= 2;
292
293           b0 = vlib_get_buffer (vm, bi0);
294           b1 = vlib_get_buffer (vm, bi1);
295
296           /* Be grumpy about zero length buffers for benefit of
297              driver tx function. */
298           ASSERT (b0->current_length > 0);
299           ASSERT (b1->current_length > 0);
300
301           if (PREDICT_FALSE ((b0->flags | b1->flags) & VLIB_BUFFER_NEXT_PRESENT))
302             {
303               u32 n_buffers, n_slow_bytes, i;
304
305               /* Undo. */
306               from -= 2;
307               to_tx -= 2;
308               n_left_to_tx += 2;
309
310               /* Do slow path two times. */
311               for (i = 0; i < 2; i++)
312                 {
313                   u32 bi = i ? bi1 : bi0;
314                   vlib_buffer_t * b = i ? b1 : b0;
315
316                   n_buffers = slow_path (vm, bi, b,
317                                          n_left_to_tx, to_tx, &n_slow_bytes);
318
319                   /* Not enough room for single packet? */
320                   if (n_buffers == 0)
321                     goto put;
322
323                   from += 1;
324                   to_tx += n_buffers;
325                   n_left_to_tx -= n_buffers;
326                   incr_output_stats (vnm, cpu_index, n_slow_bytes,
327                                      vnet_buffer(b)->sw_if_index[VLIB_TX],
328                                      &last_sw_if_index, &n_packets, &n_bytes);
329                 }
330             } else {
331               incr_output_stats (vnm, cpu_index, 
332                                  vlib_buffer_length_in_chain (vm, b0),
333                                  vnet_buffer(b0)->sw_if_index[VLIB_TX],
334                                  &last_sw_if_index, &n_packets, &n_bytes);
335               incr_output_stats (vnm, cpu_index, 
336                                  vlib_buffer_length_in_chain (vm, b0),
337                                  vnet_buffer(b1)->sw_if_index[VLIB_TX],
338                                  &last_sw_if_index, &n_packets, &n_bytes);
339             }
340         }
341
342       while (from + 1 <= from_end && n_left_to_tx >= 1)
343         {
344           u32 bi0;
345           vlib_buffer_t * b0;
346
347           bi0 = from[0];
348           to_tx[0] = bi0;
349           from += 1;
350           to_tx += 1;
351           n_left_to_tx -= 1;
352
353           b0 = vlib_get_buffer (vm, bi0);
354
355           /* Be grumpy about zero length buffers for benefit of
356              driver tx function. */
357           ASSERT (b0->current_length > 0);
358
359           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
360             {
361               u32 n_buffers, n_slow_bytes;
362
363               /* Undo. */
364               from -= 1;
365               to_tx -= 1;
366               n_left_to_tx += 1;
367
368               n_buffers = slow_path (vm, bi0, b0,
369                                      n_left_to_tx, to_tx, &n_slow_bytes);
370
371               /* Not enough room for single packet? */
372               if (n_buffers == 0)
373                 goto put;
374
375               from += 1;
376               to_tx += n_buffers;
377               n_left_to_tx -= n_buffers;
378             }
379           incr_output_stats (vnm, cpu_index, 
380                              vlib_buffer_length_in_chain (vm, b0),
381                              vnet_buffer(b0)->sw_if_index[VLIB_TX],
382                              &last_sw_if_index, &n_packets, &n_bytes);
383         }
384
385     put:
386       vlib_put_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX, n_left_to_tx);
387     }
388
389   /* Final update of interface stats. */
390   incr_output_stats (vnm, cpu_index, 0, ~0, /* ~0 will flush stats */
391                      &last_sw_if_index, &n_packets, &n_bytes); 
392
393   return n_buffers;
394 }
395
396 VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node)
397 CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node)
398
399 always_inline uword
400 vnet_interface_output_node_no_flatten_inline  (vlib_main_t * vm,
401                                                vlib_node_runtime_t * node,
402                                                vlib_frame_t * frame,
403                                                int with_features)
404 {
405   vnet_main_t * vnm = vnet_get_main();
406   vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
407   vnet_sw_interface_t * si;
408   vnet_hw_interface_t * hi;
409   u32 n_left_to_tx, * from, * from_end, * to_tx;
410   u32 n_bytes, n_buffers, n_packets;
411   u32 n_bytes_b0, n_bytes_b1;
412   u32 cpu_index = vm->cpu_index;
413   vnet_interface_main_t * im = &vnm->interface_main;
414   u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
415
416   n_buffers = frame->n_vectors;
417
418   if (node->flags & VLIB_NODE_FLAG_TRACE)
419     vnet_interface_output_trace (vm, node, frame, n_buffers);
420
421   from = vlib_frame_args (frame);
422
423   if (rt->is_deleted)
424     return vlib_error_drop_buffers (vm, node,
425                                     from,
426                                     /* buffer stride */ 1,
427                                     n_buffers,
428                                     VNET_INTERFACE_OUTPUT_NEXT_DROP,
429                                     node->node_index,
430                                     VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED);
431
432   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
433   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
434   if (! (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
435       ! (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
436     {
437       vlib_simple_counter_main_t * cm;
438      
439       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
440                              VNET_INTERFACE_COUNTER_TX_ERROR);
441       vlib_increment_simple_counter (cm, cpu_index,
442                                      rt->sw_if_index, n_buffers);
443       
444       return vlib_error_drop_buffers (vm, node,
445                                       from,
446                                       /* buffer stride */ 1,
447                                       n_buffers,
448                                       VNET_INTERFACE_OUTPUT_NEXT_DROP,
449                                       node->node_index,
450                                       VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
451     }
452
453   from_end = from + n_buffers;
454
455   /* Total byte count of all buffers. */
456   n_bytes = 0;
457   n_packets = 0;
458
459   while (from < from_end)
460     {
461       /* Get new next frame since previous incomplete frame may have less
462          than VNET_FRAME_SIZE vectors in it. */
463       vlib_get_new_next_frame (vm, node, next_index,
464                                to_tx, n_left_to_tx);
465
466       while (from + 4 <= from_end && n_left_to_tx >= 2)
467         {
468           u32 bi0, bi1;
469           vlib_buffer_t * b0, * b1;
470           u32 tx_swif0, tx_swif1;
471           u32 next0, next1;
472
473           /* Prefetch next iteration. */
474           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
475           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
476
477           bi0 = from[0];
478           bi1 = from[1];
479           to_tx[0] = bi0;
480           to_tx[1] = bi1;
481           from += 2;
482           to_tx += 2;
483           n_left_to_tx -= 2;
484
485           b0 = vlib_get_buffer (vm, bi0);
486           b1 = vlib_get_buffer (vm, bi1);
487
488           /* Be grumpy about zero length buffers for benefit of
489              driver tx function. */
490           ASSERT (b0->current_length > 0);
491           ASSERT (b1->current_length > 0);
492
493           n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
494           n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
495           tx_swif0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
496           tx_swif1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
497
498           n_bytes += n_bytes_b0 + n_bytes_b1;
499           n_packets += 2;
500           if (with_features)
501             {
502               b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
503               vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap;
504               count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap);
505               vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0);
506             }
507           else
508             {
509               next0 = VNET_INTERFACE_OUTPUT_NEXT_TX;
510               vnet_buffer(b0)->output_features.bitmap = 0;
511
512               if (PREDICT_FALSE(tx_swif0 != rt->sw_if_index))
513                 {
514                   /* update vlan subif tx counts, if required */
515                   vlib_increment_combined_counter (im->combined_sw_if_counters
516                                                    + VNET_INTERFACE_COUNTER_TX,
517                                                    cpu_index,
518                                                    tx_swif0,
519                                                    1,
520                                                    n_bytes_b0);
521                 }
522             }
523
524           if (with_features)
525             {
526               b1->flags |= BUFFER_OUTPUT_FEAT_DONE;
527               vnet_buffer(b1)->output_features.bitmap = si->output_feature_bitmap;
528               count_trailing_zeros(next1, vnet_buffer(b1)->output_features.bitmap);
529               vnet_buffer(b1)->output_features.bitmap &= ~(1 << next1);
530             }
531           else
532             {
533               next1 = VNET_INTERFACE_OUTPUT_NEXT_TX;
534               vnet_buffer(b1)->output_features.bitmap = 0;
535
536               /* update vlan subif tx counts, if required */
537               if (PREDICT_FALSE(tx_swif1 != rt->sw_if_index))
538                 {
539
540                   vlib_increment_combined_counter (im->combined_sw_if_counters
541                                                    + VNET_INTERFACE_COUNTER_TX,
542                                                    cpu_index,
543                                                    tx_swif1,
544                                                    1,
545                                                    n_bytes_b1);
546                 }
547             }
548           if (with_features)
549             vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_tx,
550                                             n_left_to_tx, bi0, bi1, next0, next1);
551         }
552
553       while (from + 1 <= from_end && n_left_to_tx >= 1)
554         {
555           u32 bi0;
556           vlib_buffer_t * b0;
557           u32 tx_swif0;
558
559           bi0 = from[0];
560           to_tx[0] = bi0;
561           from += 1;
562           to_tx += 1;
563           n_left_to_tx -= 1;
564
565           b0 = vlib_get_buffer (vm, bi0);
566
567           /* Be grumpy about zero length buffers for benefit of
568              driver tx function. */
569           ASSERT (b0->current_length > 0);
570
571           n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
572           tx_swif0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
573           n_bytes += n_bytes_b0;
574           n_packets += 1;
575
576           if (with_features)
577             {
578               u32 next0;
579               b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
580               vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap;
581               count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap);
582               vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0);
583               vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx,
584                                                n_left_to_tx, bi0, next0);
585             }
586           else
587             {
588               vnet_buffer(b0)->output_features.bitmap = 0;
589
590               if (PREDICT_FALSE(tx_swif0 != rt->sw_if_index))
591                 {
592
593                   vlib_increment_combined_counter (im->combined_sw_if_counters
594                                                    + VNET_INTERFACE_COUNTER_TX,
595                                                    cpu_index,
596                                                    tx_swif0,
597                                                    1,
598                                                    n_bytes_b0);
599                 }
600             }
601         }
602
603       vlib_put_next_frame (vm, node, next_index,
604                            n_left_to_tx);
605     }
606
607   /* Update main interface stats. */
608   vlib_increment_combined_counter (im->combined_sw_if_counters
609                                    + VNET_INTERFACE_COUNTER_TX,
610                                    cpu_index,
611                                    rt->sw_if_index,
612                                    n_packets,
613                                    n_bytes);
614   return n_buffers;
615 }
616
617 uword
618 vnet_interface_output_node_no_flatten (vlib_main_t * vm,
619                                        vlib_node_runtime_t * node,
620                                        vlib_frame_t * frame)
621 {
622   vnet_main_t * vnm = vnet_get_main ();
623   vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
624   vnet_sw_interface_t * si;
625   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
626
627   if (PREDICT_FALSE(si->output_feature_bitmap))
628     {
629       /* if first pakcet in the frame have BUFFER_OUTPUT_FEAT_DONE flag set
630          then whole frame is arriving from feature node */
631
632       u32 * from = vlib_frame_args (frame);
633       vlib_buffer_t * b = vlib_get_buffer (vm, from[0]);
634
635       if ((b->flags & BUFFER_OUTPUT_FEAT_DONE) == 0)
636         return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 1);
637     }
638     return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 0);
639 }
640
641 VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node_no_flatten)
642 CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node_no_flatten)
643
644 /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
645 static uword
646 vnet_per_buffer_interface_output (vlib_main_t * vm,
647                                   vlib_node_runtime_t * node,
648                                   vlib_frame_t * frame)
649 {
650   vnet_main_t * vnm = vnet_get_main();
651   u32 n_left_to_next, * from, * to_next;
652   u32 n_left_from, next_index;
653
654   n_left_from = frame->n_vectors;
655
656   from = vlib_frame_args (frame);
657   next_index = node->cached_next_index;
658
659   while (n_left_from > 0)
660     {
661       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
662
663       while (n_left_from >= 4 && n_left_to_next >= 2)
664         {
665           u32 bi0, bi1, next0, next1;
666           vlib_buffer_t * b0, * b1;
667           vnet_hw_interface_t * hi0, * hi1;
668
669           /* Prefetch next iteration. */
670           vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
671           vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
672
673           bi0 = from[0];
674           bi1 = from[1];
675           to_next[0] = bi0;
676           to_next[1] = bi1;
677           from += 2;
678           to_next += 2;
679           n_left_to_next -= 2;
680           n_left_from -= 2;
681
682           b0 = vlib_get_buffer (vm, bi0);
683           b1 = vlib_get_buffer (vm, bi1);
684
685           hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b0)->sw_if_index[VLIB_TX]);
686           hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b1)->sw_if_index[VLIB_TX]);
687
688           next0 = hi0->hw_if_index;
689           next1 = hi1->hw_if_index;
690
691           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next,
692                                            bi0, bi1, next0, next1);
693         }
694
695       while (n_left_from > 0 && n_left_to_next > 0)
696         {
697           u32 bi0, next0;
698           vlib_buffer_t * b0;
699           vnet_hw_interface_t * hi0;
700
701           bi0 = from[0];
702           to_next[0] = bi0;
703           from += 1;
704           to_next += 1;
705           n_left_to_next -= 1;
706           n_left_from -= 1;
707
708           b0 = vlib_get_buffer (vm, bi0);
709
710           hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b0)->sw_if_index[VLIB_TX]);
711
712           next0 = hi0->hw_if_index;
713
714           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next,
715                                            bi0, next0);
716         }
717
718       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
719     }
720
721   return frame->n_vectors;
722 }
723
724 always_inline u32
725 counter_index (vlib_main_t * vm, vlib_error_t e)
726 {
727   vlib_node_t * n;
728   u32 ci, ni;
729
730   ni = vlib_error_get_node (e);
731   n = vlib_get_node (vm, ni);
732
733   ci = vlib_error_get_code (e);
734   ASSERT (ci < n->n_errors);
735
736   ci += n->error_heap_index;
737
738   return ci;
739 }
740
741 static u8 * format_vnet_error_trace (u8 * s, va_list * va)
742 {
743   vlib_main_t * vm = va_arg (*va, vlib_main_t *);
744   CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
745   vlib_error_t * e = va_arg (*va, vlib_error_t *);
746   vlib_node_t * error_node;
747   vlib_error_main_t * em = &vm->error_main;
748   u32 i;
749
750   error_node = vlib_get_node (vm, vlib_error_get_node (e[0]));
751   i = counter_index (vm, e[0]);
752   s = format (s, "%v: %s", error_node->name, em->error_strings_heap[i]);
753
754   return s;
755 }
756
757 static void
758 trace_errors_with_buffers (vlib_main_t * vm,
759                            vlib_node_runtime_t * node,
760                            vlib_frame_t * frame)
761 {
762   u32 n_left, * buffers;
763
764   buffers = vlib_frame_vector_args (frame);
765   n_left = frame->n_vectors;
766   
767   while (n_left >= 4)
768     {
769       u32 bi0, bi1;
770       vlib_buffer_t * b0, * b1;
771       vlib_error_t * t0, * t1;
772
773       /* Prefetch next iteration. */
774       vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
775       vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
776
777       bi0 = buffers[0];
778       bi1 = buffers[1];
779
780       b0 = vlib_get_buffer (vm, bi0);
781       b1 = vlib_get_buffer (vm, bi1);
782
783       if (b0->flags & VLIB_BUFFER_IS_TRACED)
784         {
785           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
786           t0[0] = b0->error;
787         }
788       if (b1->flags & VLIB_BUFFER_IS_TRACED)
789         {
790           t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
791           t1[0] = b1->error;
792         }
793       buffers += 2;
794       n_left -= 2;
795     }
796
797   while (n_left >= 1)
798     {
799       u32 bi0;
800       vlib_buffer_t * b0;
801       vlib_error_t * t0;
802
803       bi0 = buffers[0];
804
805       b0 = vlib_get_buffer (vm, bi0);
806
807       if (b0->flags & VLIB_BUFFER_IS_TRACED)
808         {
809           t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
810           t0[0] = b0->error;
811         }
812       buffers += 1;
813       n_left -= 1;
814     }
815 }
816
817 static u8 *
818 validate_error (vlib_main_t * vm, vlib_error_t * e, u32 index)
819 {
820   uword node_index = vlib_error_get_node (e[0]);
821   uword code = vlib_error_get_code (e[0]);
822   vlib_node_t * n;
823
824   if (node_index >= vec_len (vm->node_main.nodes))
825     return format (0, "[%d], node index out of range 0x%x, error 0x%x",
826                    index, node_index, e[0]);
827
828   n = vlib_get_node (vm, node_index);
829   if (code >= n->n_errors)
830     return format (0, "[%d], code %d out of range for node %v",
831                    index, code, n->name);
832
833   return 0;
834 }
835
836 static u8 *
837 validate_error_frame (vlib_main_t * vm,
838                       vlib_node_runtime_t * node,
839                       vlib_frame_t * f)
840 {
841   u32 * buffers = vlib_frame_args (f);
842   vlib_buffer_t * b;
843   u8 * msg = 0;
844   uword i;
845
846   for (i = 0; i < f->n_vectors; i++)
847     {
848       b = vlib_get_buffer (vm, buffers[i]);
849       msg = validate_error (vm, &b->error, i);
850       if (msg)
851         return msg;
852     }
853
854   return msg;
855 }
856
857 typedef enum {
858   VNET_ERROR_DISPOSITION_DROP,
859   VNET_ERROR_DISPOSITION_PUNT,
860   VNET_ERROR_N_DISPOSITION,
861 } vnet_error_disposition_t;
862
863 always_inline void
864 do_packet (vlib_main_t * vm, vlib_error_t a)
865 {
866   vlib_error_main_t * em = &vm->error_main;
867   u32 i = counter_index (vm, a);
868   em->counters[i] += 1;
869   vlib_error_elog_count (vm, i, 1);
870 }
871     
872 static_always_inline uword
873 process_drop_punt (vlib_main_t * vm,
874                    vlib_node_runtime_t * node,
875                    vlib_frame_t * frame,
876                    vnet_error_disposition_t disposition)
877 {
878   vnet_main_t * vnm = vnet_get_main();
879   vlib_error_main_t * em = &vm->error_main;
880   u32 * buffers, * first_buffer;
881   vlib_error_t current_error;
882   u32 current_counter_index, n_errors_left;
883   u32 current_sw_if_index, n_errors_current_sw_if_index;
884   u64 current_counter;
885   vlib_simple_counter_main_t * cm;
886   u32 cpu_index = vm->cpu_index;
887
888   static vlib_error_t memory[VNET_ERROR_N_DISPOSITION];
889   static char memory_init[VNET_ERROR_N_DISPOSITION];
890
891   buffers = vlib_frame_args (frame);
892   first_buffer = buffers;
893
894   {
895     vlib_buffer_t * b = vlib_get_buffer (vm, first_buffer[0]);
896
897     if (! memory_init[disposition])
898       {
899         memory_init[disposition] = 1;
900         memory[disposition] = b->error;
901       }
902
903     current_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
904     n_errors_current_sw_if_index = 0;
905   }
906
907   current_error = memory[disposition];
908   current_counter_index = counter_index (vm, memory[disposition]);
909   current_counter = em->counters[current_counter_index];
910
911   if (node->flags & VLIB_NODE_FLAG_TRACE)
912     trace_errors_with_buffers (vm, node, frame);
913   
914   n_errors_left = frame->n_vectors;
915   cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
916                          (disposition == VNET_ERROR_DISPOSITION_PUNT
917                           ? VNET_INTERFACE_COUNTER_PUNT
918                           : VNET_INTERFACE_COUNTER_DROP));
919
920   while (n_errors_left >= 2)
921     {
922       vlib_buffer_t * b0, * b1;
923       vnet_sw_interface_t * sw_if0, * sw_if1;
924       vlib_error_t e0, e1;
925       u32 bi0, bi1;
926       u32 sw_if_index0, sw_if_index1;
927
928       bi0 = buffers[0];
929       bi1 = buffers[1];
930
931       buffers += 2;
932       n_errors_left -= 2;
933
934       b0 = vlib_get_buffer (vm, bi0);
935       b1 = vlib_get_buffer (vm, bi1);
936
937       e0 = b0->error;
938       e1 = b1->error;
939
940       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
941       sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
942
943       /* Speculate that sw_if_index == sw_if_index[01]. */
944       n_errors_current_sw_if_index += 2;
945
946       /* Speculatively assume all 2 (node, code) pairs are equal
947          to current (node, code). */
948       current_counter += 2;
949
950       if (PREDICT_FALSE (e0 != current_error
951                          || e1 != current_error
952                          || sw_if_index0 != current_sw_if_index
953                          || sw_if_index1 != current_sw_if_index))
954         {
955           current_counter -= 2;
956           n_errors_current_sw_if_index -= 2;
957
958           vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
959           vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1);
960
961           /* Increment super-interface drop/punt counters for
962              sub-interfaces. */
963           sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
964           vlib_increment_simple_counter
965               (cm, cpu_index, sw_if0->sup_sw_if_index,
966                sw_if0->sup_sw_if_index != sw_if_index0);
967
968           sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1);
969           vlib_increment_simple_counter
970               (cm, cpu_index, sw_if1->sup_sw_if_index, 
971                sw_if1->sup_sw_if_index != sw_if_index1);
972
973           em->counters[current_counter_index] = current_counter;
974           do_packet (vm, e0);
975           do_packet (vm, e1);
976
977           /* For 2 repeated errors, change current error. */
978           if (e0 == e1 && e1 != current_error)
979             {
980               current_error = e0;
981               current_counter_index = counter_index (vm, e0);
982             }
983           current_counter = em->counters[current_counter_index];
984         }
985     }
986
987   while (n_errors_left >= 1)
988     {
989       vlib_buffer_t * b0;
990       vnet_sw_interface_t * sw_if0;
991       vlib_error_t e0;
992       u32 bi0, sw_if_index0;
993
994       bi0 = buffers[0];
995
996       buffers += 1;
997       n_errors_left -= 1;
998       current_counter += 1;
999
1000       b0 = vlib_get_buffer (vm, bi0);
1001       e0 = b0->error;
1002
1003       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1004
1005       /* Increment drop/punt counters. */
1006       vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1);
1007
1008       /* Increment super-interface drop/punt counters for sub-interfaces. */
1009       sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
1010       vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index, 
1011                                      sw_if0->sup_sw_if_index != sw_if_index0);
1012
1013       if (PREDICT_FALSE (e0 != current_error))
1014         {
1015           current_counter -= 1;
1016
1017           vlib_error_elog_count (vm, current_counter_index,
1018                                  (current_counter
1019                                   - em->counters[current_counter_index]));
1020             
1021           em->counters[current_counter_index] = current_counter;
1022
1023           do_packet (vm, e0);
1024           current_error = e0;
1025           current_counter_index = counter_index (vm, e0);
1026           current_counter = em->counters[current_counter_index];
1027         }
1028     }
1029
1030   if (n_errors_current_sw_if_index > 0)
1031     {
1032       vnet_sw_interface_t * si;
1033
1034       vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index,
1035                                      n_errors_current_sw_if_index);
1036
1037       si = vnet_get_sw_interface (vnm, current_sw_if_index);
1038       if (si->sup_sw_if_index != current_sw_if_index)
1039           vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index,
1040                                          n_errors_current_sw_if_index);
1041     }
1042
1043   vlib_error_elog_count (vm, current_counter_index,
1044                          (current_counter
1045                           - em->counters[current_counter_index]));
1046
1047   /* Return cached counter. */
1048   em->counters[current_counter_index] = current_counter;
1049
1050   /* Save memory for next iteration. */
1051   memory[disposition] = current_error;
1052
1053   if (disposition == VNET_ERROR_DISPOSITION_DROP
1054       || ! vm->os_punt_frame)
1055     {
1056       vlib_buffer_free
1057         (vm,
1058          first_buffer,
1059          frame->n_vectors);
1060
1061       /* If there is no punt function, free the frame as well. */
1062       if (disposition == VNET_ERROR_DISPOSITION_PUNT && ! vm->os_punt_frame)
1063         vlib_frame_free (vm, node, frame);
1064     }
1065   else
1066     vm->os_punt_frame (vm, node, frame);
1067
1068   return frame->n_vectors;
1069 }
1070
1071 static inline void 
1072 pcap_drop_trace (vlib_main_t * vm, 
1073                  vnet_interface_main_t * im, 
1074                  vlib_frame_t * f)
1075 {
1076   u32 * from;
1077   u32 n_left = f->n_vectors;
1078   vlib_buffer_t * b0, * p1;
1079   u32 bi0;
1080   i16 save_current_data;
1081   u16 save_current_length;
1082
1083   from = vlib_frame_vector_args (f);
1084
1085   while (n_left > 0)
1086     {
1087       if (PREDICT_TRUE (n_left > 1))
1088         {
1089           p1 = vlib_get_buffer (vm, from[1]);
1090           vlib_prefetch_buffer_header (p1, LOAD);
1091         }
1092       
1093       bi0 = from[0];
1094       b0 = vlib_get_buffer (vm, bi0);
1095       from++;
1096       n_left--;
1097       
1098       /* See if we're pointedly ignoring this specific error */
1099       if (im->pcap_drop_filter_hash 
1100           && hash_get (im->pcap_drop_filter_hash, b0->error))
1101         continue;
1102
1103       /* Trace all drops, or drops received on a specific interface */
1104       if (im->pcap_sw_if_index == 0 ||
1105           im->pcap_sw_if_index == vnet_buffer(b0)->sw_if_index [VLIB_RX])
1106         {
1107           save_current_data = b0->current_data;
1108           save_current_length = b0->current_length;
1109           
1110           /* 
1111            * Typically, we'll need to rewind the buffer
1112            */
1113           if (b0->current_data > 0)
1114             vlib_buffer_advance (b0, (word) -b0->current_data);
1115
1116           pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
1117
1118           b0->current_data = save_current_data;
1119           b0->current_length = save_current_length;
1120         }
1121     }
1122 }
1123
1124 void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
1125 {
1126   vnet_interface_main_t * im = &vnet_get_main()->interface_main;
1127
1128   if (im->pcap_drop_filter_hash == 0)
1129       im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
1130
1131   if (is_add)
1132     hash_set (im->pcap_drop_filter_hash, error_index, 1);
1133   else
1134     hash_unset (im->pcap_drop_filter_hash, error_index);
1135 }
1136
1137 static uword
1138 process_drop (vlib_main_t * vm,
1139               vlib_node_runtime_t * node,
1140               vlib_frame_t * frame)
1141 {
1142   vnet_interface_main_t * im = &vnet_get_main()->interface_main;
1143
1144   if (PREDICT_FALSE (im->drop_pcap_enable))
1145     pcap_drop_trace (vm, im, frame);
1146
1147   return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
1148 }
1149
1150 static uword
1151 process_punt (vlib_main_t * vm,
1152               vlib_node_runtime_t * node,
1153               vlib_frame_t * frame)
1154 {
1155   return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
1156 }
1157
1158 VLIB_REGISTER_NODE (drop_buffers,static) = {
1159   .function = process_drop,
1160   .name = "error-drop",
1161   .flags = VLIB_NODE_FLAG_IS_DROP,
1162   .vector_size = sizeof (u32),
1163   .format_trace = format_vnet_error_trace,
1164   .validate_frame = validate_error_frame,
1165 };
1166
1167 VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop)
1168
1169 VLIB_REGISTER_NODE (punt_buffers,static) = {
1170   .function = process_punt,
1171   .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH
1172             | VLIB_NODE_FLAG_IS_PUNT),
1173   .name = "error-punt",
1174   .vector_size = sizeof (u32),
1175   .format_trace = format_vnet_error_trace,
1176   .validate_frame = validate_error_frame,
1177 };
1178
1179 VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt)
1180
1181 VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node,static) = {
1182   .function = vnet_per_buffer_interface_output,
1183   .name = "interface-output",
1184   .vector_size = sizeof (u32),
1185 };
1186
1187 VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node, vnet_per_buffer_interface_output)
1188
1189 clib_error_t *
1190 vnet_per_buffer_interface_output_hw_interface_add_del (vnet_main_t * vnm,
1191                                                        u32 hw_if_index,
1192                                                        u32 is_create)
1193 {
1194   vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
1195   u32 next_index;
1196
1197   next_index = vlib_node_add_next_with_slot
1198     (vnm->vlib_main, vnet_per_buffer_interface_output_node.index,
1199      hi->output_node_index,
1200      /* next_index */ hw_if_index);
1201
1202   ASSERT (next_index == hw_if_index);
1203
1204   return 0;
1205 }
1206
1207 VNET_HW_INTERFACE_ADD_DEL_FUNCTION 
1208 (vnet_per_buffer_interface_output_hw_interface_add_del);
1209
1210 static clib_error_t *
1211 pcap_drop_trace_command_fn (vlib_main_t * vm,
1212                             unformat_input_t * input,
1213                             vlib_cli_command_t * cmd)
1214 {
1215   vnet_main_t * vnm = vnet_get_main();
1216   vnet_interface_main_t * im = &vnm->interface_main;
1217   u8 * filename;
1218   u32 max;
1219   int matched = 0;
1220   clib_error_t * error = 0;
1221
1222   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) 
1223     {
1224       if (unformat (input, "on"))
1225         {
1226           if (im->drop_pcap_enable == 0)
1227             {
1228               if (im->pcap_filename == 0)
1229                 im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
1230               
1231               memset (&im->pcap_main, 0, sizeof (im->pcap_main));
1232               im->pcap_main.file_name = (char *) im->pcap_filename;
1233               im->pcap_main.n_packets_to_capture = 100;
1234               if (im->pcap_pkts_to_capture)
1235                 im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
1236
1237               im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
1238               im->drop_pcap_enable = 1;
1239               matched = 1;
1240               vlib_cli_output (vm, "pcap drop capture on...");
1241             }
1242           else
1243             {
1244               vlib_cli_output (vm, "pcap drop capture already on...");
1245             }
1246           matched = 1;
1247         }
1248       else if (unformat (input, "off"))
1249         {
1250           matched = 1;
1251
1252           if (im->drop_pcap_enable)
1253             {
1254               vlib_cli_output (vm, "captured %d pkts...", 
1255                                im->pcap_main.n_packets_captured);
1256               if (im->pcap_main.n_packets_captured)
1257                 {
1258                   im->pcap_main.n_packets_to_capture = 
1259                     im->pcap_main.n_packets_captured;
1260                   error = pcap_write (&im->pcap_main);
1261                   if (error)
1262                     clib_error_report (error);
1263                   else
1264                     vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
1265                 }
1266             }
1267           else
1268             {
1269               vlib_cli_output (vm, "pcap drop capture already off...");
1270             }
1271
1272           im->drop_pcap_enable = 0;
1273         }
1274       else if (unformat (input, "max %d", &max))
1275         {
1276           im->pcap_pkts_to_capture = max;
1277           matched = 1;
1278         }
1279
1280       else if (unformat (input, "intfc %U", 
1281                          unformat_vnet_sw_interface, vnm,
1282                          &im->pcap_sw_if_index))
1283         matched = 1;
1284       else if (unformat (input, "intfc any"))
1285         {
1286           im->pcap_sw_if_index = 0;
1287           matched = 1;
1288         }
1289       else if (unformat (input, "file %s", &filename))
1290         {
1291           u8 * chroot_filename;
1292           /* Brain-police user path input */
1293           if (strstr((char *)filename, "..") || index((char *)filename, '/'))
1294             {
1295               vlib_cli_output (vm, "illegal characters in filename '%s'", 
1296                                filename);
1297               continue;
1298             }
1299
1300           chroot_filename = format (0, "/tmp/%s%c", filename, 0);
1301           vec_free (filename);
1302           
1303           if (im->pcap_filename)
1304             vec_free (im->pcap_filename);
1305           vec_add1 (filename, 0);
1306           im->pcap_filename = chroot_filename;
1307           matched = 1;
1308         }
1309       else if (unformat (input, "status"))
1310         {
1311           if (im->drop_pcap_enable == 0)
1312             {
1313               vlib_cli_output (vm, "pcap drop capture is off...");
1314               continue;
1315             }
1316
1317           vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
1318                            im->pcap_main.n_packets_captured,
1319                            im->pcap_main.n_packets_to_capture);
1320           matched = 1;
1321         }
1322
1323       else
1324         break;
1325     }
1326
1327   if (matched == 0)
1328     return clib_error_return (0, "unknown input `%U'", 
1329                               format_unformat_error, input);
1330
1331   return 0;
1332 }
1333
1334 VLIB_CLI_COMMAND (pcap_trace_command, static) = {
1335     .path = "pcap drop trace",
1336     .short_help = 
1337     "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
1338     .function = pcap_drop_trace_command_fn,
1339 };