devices: add support for offloads
[vpp.git] / src / vnet / devices / af_packet / node.c
1 /*
2  *------------------------------------------------------------------
3  * af_packet.c - linux kernel packet interface
4  *
5  * Copyright (c) 2016 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19
20 #include <linux/if_packet.h>
21
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/ip/ip.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/interface/rx_queue_funcs.h>
27 #include <vnet/feature/feature.h>
28 #include <vnet/ethernet/packet.h>
29
30 #include <vnet/devices/af_packet/af_packet.h>
31 #include <vnet/devices/virtio/virtio_std.h>
32
33 #define foreach_af_packet_input_error \
34   _(PARTIAL_PKT, "partial packet")
35
36 typedef enum
37 {
38 #define _(f,s) AF_PACKET_INPUT_ERROR_##f,
39   foreach_af_packet_input_error
40 #undef _
41     AF_PACKET_INPUT_N_ERROR,
42 } af_packet_input_error_t;
43
44 static char *af_packet_input_error_strings[] = {
45 #define _(n,s) s,
46   foreach_af_packet_input_error
47 #undef _
48 };
49
50 typedef struct
51 {
52   u32 next_index;
53   u32 hw_if_index;
54   int block;
55   u32 pkt_num;
56   void *block_start;
57   block_desc_t bd;
58   tpacket3_hdr_t tph;
59   vnet_virtio_net_hdr_t vnet_hdr;
60 } af_packet_input_trace_t;
61
62 static u8 *
63 format_af_packet_input_trace (u8 * s, va_list * args)
64 {
65   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
66   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
67   af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *);
68   u32 indent = format_get_indent (s);
69
70   s = format (s, "af_packet: hw_if_index %d next-index %d",
71               t->hw_if_index, t->next_index);
72
73   s = format (
74     s, "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu pkt_num %u",
75     format_white_space, indent + 2, t->block, format_white_space, indent + 4,
76     t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num, t->pkt_num);
77   s =
78     format (s,
79             "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
80             "\n%Usec 0x%x nsec 0x%x vlan %U"
81 #ifdef TP_STATUS_VLAN_TPID_VALID
82             " vlan_tpid %u"
83 #endif
84             ,
85             format_white_space, indent + 2, format_white_space, indent + 4,
86             t->tph.tp_status, t->tph.tp_len, t->tph.tp_snaplen, t->tph.tp_mac,
87             t->tph.tp_net, format_white_space, indent + 4, t->tph.tp_sec,
88             t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.hv1.tp_vlan_tci
89 #ifdef TP_STATUS_VLAN_TPID_VALID
90             ,
91             t->tph.hv1.tp_vlan_tpid
92 #endif
93     );
94
95   s = format (s,
96               "\n%Uvnet-hdr:\n%Uflags 0x%02x gso_type 0x%02x hdr_len %u"
97               "\n%Ugso_size %u csum_start %u csum_offset %u",
98               format_white_space, indent + 2, format_white_space, indent + 4,
99               t->vnet_hdr.flags, t->vnet_hdr.gso_type, t->vnet_hdr.hdr_len,
100               format_white_space, indent + 4, t->vnet_hdr.gso_size,
101               t->vnet_hdr.csum_start, t->vnet_hdr.csum_offset);
102   return s;
103 }
104
105 always_inline void
106 buffer_add_to_chain (vlib_buffer_t *b, vlib_buffer_t *first_b,
107                      vlib_buffer_t *prev_b, u32 bi)
108 {
109   /* update first buffer */
110   first_b->total_length_not_including_first_buffer += b->current_length;
111
112   /* update previous buffer */
113   prev_b->next_buffer = bi;
114   prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
115
116   /* update current buffer */
117   b->next_buffer = ~0;
118 }
119
120 static_always_inline void
121 fill_gso_offload (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz)
122 {
123   b->flags |= VNET_BUFFER_F_GSO;
124   vnet_buffer2 (b)->gso_size = gso_size;
125   vnet_buffer2 (b)->gso_l4_hdr_sz = l4_hdr_sz;
126 }
127
128 static_always_inline void
129 fill_cksum_offload (vlib_buffer_t *b, u8 *l4_hdr_sz, u8 is_ip)
130 {
131   vnet_buffer_oflags_t oflags = 0;
132   u16 l2hdr_sz = 0;
133   u16 ethertype = 0;
134   u8 l4_proto = 0;
135
136   if (is_ip)
137     {
138       switch (b->data[0] & 0xf0)
139         {
140         case 0x40:
141           ethertype = ETHERNET_TYPE_IP4;
142           break;
143         case 0x60:
144           ethertype = ETHERNET_TYPE_IP6;
145           break;
146         }
147     }
148   else
149     {
150       ethernet_header_t *eth = vlib_buffer_get_current (b);
151       ethertype = clib_net_to_host_u16 (eth->type);
152       l2hdr_sz = sizeof (ethernet_header_t);
153       if (ethernet_frame_is_tagged (ethertype))
154         {
155           ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1);
156
157           ethertype = clib_net_to_host_u16 (vlan->type);
158           l2hdr_sz += sizeof (*vlan);
159           if (ethertype == ETHERNET_TYPE_VLAN)
160             {
161               vlan++;
162               ethertype = clib_net_to_host_u16 (vlan->type);
163               l2hdr_sz += sizeof (*vlan);
164             }
165         }
166     }
167
168   vnet_buffer (b)->l2_hdr_offset = 0;
169   vnet_buffer (b)->l3_hdr_offset = l2hdr_sz;
170
171   if (ethertype == ETHERNET_TYPE_IP4)
172     {
173       ip4_header_t *ip4 = (vlib_buffer_get_current (b) + l2hdr_sz);
174       vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
175       b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
176                    VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
177                    VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
178
179       l4_proto = ip4->protocol;
180     }
181   else if (ethertype == ETHERNET_TYPE_IP6)
182     {
183       ip6_header_t *ip6 = (vlib_buffer_get_current (b) + l2hdr_sz);
184       b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
185                    VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
186                    VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
187       u16 ip6_hdr_len = sizeof (ip6_header_t);
188
189       if (ip6_ext_hdr (ip6->protocol))
190         {
191           ip6_ext_header_t *p = (void *) (ip6 + 1);
192           ip6_hdr_len += ip6_ext_header_len (p);
193           while (ip6_ext_hdr (p->next_hdr))
194             {
195               ip6_hdr_len += ip6_ext_header_len (p);
196               p = ip6_ext_next_header (p);
197             }
198           l4_proto = p->next_hdr;
199         }
200       else
201         l4_proto = ip6->protocol;
202       vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len;
203     }
204
205   if (l4_proto == IP_PROTOCOL_TCP)
206     {
207       oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
208       tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) +
209                                             vnet_buffer (b)->l4_hdr_offset);
210       *l4_hdr_sz = tcp_header_bytes (tcp);
211     }
212   else if (l4_proto == IP_PROTOCOL_UDP)
213     {
214       oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
215       *l4_hdr_sz = sizeof (udp_header_t);
216     }
217
218   if (oflags)
219     vnet_buffer_offload_flags_set (b, oflags);
220 }
221
222 always_inline uword
223 af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
224                            vlib_frame_t *frame, af_packet_if_t *apif,
225                            u8 is_cksum_gso_enabled)
226 {
227   af_packet_main_t *apm = &af_packet_main;
228   tpacket3_hdr_t *tph;
229   u32 next_index;
230   u32 n_free_bufs;
231   u32 n_rx_packets = 0;
232   u32 n_rx_bytes = 0;
233   u32 *to_next = 0;
234   u32 block = apif->next_rx_block;
235   u32 block_nr = apif->rx_req->tp_block_nr;
236   u8 *block_start = 0;
237   uword n_trace = vlib_get_trace_count (vm, node);
238   u32 thread_index = vm->thread_index;
239   u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
240   u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
241   u32 num_pkts = 0;
242   u32 rx_frame_offset = 0;
243   block_desc_t *bd = 0;
244   vlib_buffer_t bt;
245   u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP);
246
247   if (is_ip)
248     next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
249   else
250     {
251       next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
252       if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
253         next_index = apif->per_interface_next_index;
254
255       /* redirect if feature path enabled */
256       vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt);
257     }
258
259   while ((((block_desc_t *) (block_start = apif->rx_ring[block]))
260             ->hdr.bh1.block_status &
261           TP_STATUS_USER) != 0)
262     {
263       u32 n_required = 0;
264
265       if (PREDICT_FALSE (num_pkts == 0))
266         {
267           bd = (block_desc_t *) block_start;
268           num_pkts = bd->hdr.bh1.num_pkts;
269           rx_frame_offset = sizeof (block_desc_t);
270         }
271
272       n_required = clib_max (num_pkts, VLIB_FRAME_SIZE);
273       n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
274       if (PREDICT_FALSE (n_free_bufs < n_required))
275         {
276           vec_validate (apm->rx_buffers[thread_index],
277                         n_required + n_free_bufs - 1);
278           n_free_bufs += vlib_buffer_alloc (
279             vm, &apm->rx_buffers[thread_index][n_free_bufs], n_required);
280           _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs;
281         }
282
283       while (num_pkts && (n_free_bufs > min_bufs))
284         {
285           u32 next0 = next_index;
286           u32 n_left_to_next;
287
288           vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
289
290           while (num_pkts && n_left_to_next && (n_free_bufs > min_bufs))
291             {
292               tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset);
293
294               if (num_pkts > 1)
295                 CLIB_PREFETCH (block_start + rx_frame_offset +
296                                  tph->tp_next_offset,
297                                2 * CLIB_CACHE_LINE_BYTES, LOAD);
298
299               vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0;
300               vnet_virtio_net_hdr_t *vnet_hdr = 0;
301               u32 data_len = tph->tp_snaplen;
302               u32 offset = 0;
303               u32 bi0 = ~0, first_bi0 = ~0;
304               u8 l4_hdr_sz = 0;
305
306               if (is_cksum_gso_enabled)
307                 vnet_hdr =
308                   (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac -
309                                              sizeof (vnet_virtio_net_hdr_t));
310
311               if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) >
312                                  vec_len (apm->rx_buffers[thread_index])))
313                 {
314                   vec_validate (apm->rx_buffers[thread_index],
315                                 VLIB_FRAME_SIZE + n_free_bufs - 1);
316                   n_free_bufs += vlib_buffer_alloc (
317                     vm, &apm->rx_buffers[thread_index][n_free_bufs],
318                     VLIB_FRAME_SIZE);
319                   _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs;
320                 }
321
322               while (data_len)
323                 {
324                   /* grab free buffer */
325                   u32 last_empty_buffer =
326                     vec_len (apm->rx_buffers[thread_index]) - 1;
327                   bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
328                   _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer;
329                   n_free_bufs--;
330
331                   /* copy data */
332                   u32 bytes_to_copy =
333                     data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
334                   u32 vlan_len = 0;
335                   u32 bytes_copied = 0;
336
337                   b0 = vlib_get_buffer (vm, bi0);
338                   b0->current_data = 0;
339
340                   /* Kernel removes VLAN headers, so reconstruct VLAN */
341                   if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
342                     {
343                       if (PREDICT_TRUE (offset == 0))
344                         {
345                           clib_memcpy_fast (vlib_buffer_get_current (b0),
346                                             (u8 *) tph + tph->tp_mac,
347                                             sizeof (ethernet_header_t));
348                           ethernet_header_t *eth =
349                             vlib_buffer_get_current (b0);
350                           ethernet_vlan_header_t *vlan =
351                             (ethernet_vlan_header_t *) (eth + 1);
352                           vlan->priority_cfi_and_id =
353                             clib_host_to_net_u16 (tph->hv1.tp_vlan_tci);
354                           vlan->type = eth->type;
355                           eth->type =
356                             clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
357                           vlan_len = sizeof (ethernet_vlan_header_t);
358                           bytes_copied = sizeof (ethernet_header_t);
359                         }
360                     }
361                   clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
362                                       bytes_copied + vlan_len,
363                                     (u8 *) tph + tph->tp_mac + offset +
364                                       bytes_copied,
365                                     (bytes_to_copy - bytes_copied));
366
367                   /* fill buffer header */
368                   b0->current_length = bytes_to_copy + vlan_len;
369
370                   if (offset == 0)
371                     {
372                       b0->total_length_not_including_first_buffer = 0;
373                       b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
374                       vnet_buffer (b0)->sw_if_index[VLIB_RX] =
375                         apif->sw_if_index;
376                       vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
377                       first_b0 = b0;
378                       first_bi0 = bi0;
379                       if (is_cksum_gso_enabled)
380                         {
381                           if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
382                             fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip);
383                           if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 |
384                                                     VIRTIO_NET_HDR_GSO_TCPV6))
385                             fill_gso_offload (first_b0, vnet_hdr->gso_size,
386                                               l4_hdr_sz);
387                         }
388                     }
389                   else
390                     buffer_add_to_chain (b0, first_b0, prev_b0, bi0);
391
392                   prev_b0 = b0;
393                   offset += bytes_to_copy;
394                   data_len -= bytes_to_copy;
395                 }
396               n_rx_packets++;
397               n_rx_bytes += tph->tp_snaplen;
398               to_next[0] = first_bi0;
399               to_next += 1;
400               n_left_to_next--;
401
402               /* drop partial packets */
403               if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
404                 {
405                   next0 = VNET_DEVICE_INPUT_NEXT_DROP;
406                   first_b0->error =
407                     node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
408                 }
409               else
410                 {
411                   if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP))
412                     {
413                       switch (first_b0->data[0] & 0xf0)
414                         {
415                         case 0x40:
416                           next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
417                           break;
418                         case 0x60:
419                           next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
420                           break;
421                         default:
422                           next0 = VNET_DEVICE_INPUT_NEXT_DROP;
423                           break;
424                         }
425                       if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
426                         next0 = apif->per_interface_next_index;
427                     }
428                   else
429                     {
430                       /* copy feature arc data from template */
431                       first_b0->current_config_index = bt.current_config_index;
432                       vnet_buffer (first_b0)->feature_arc_index =
433                         vnet_buffer (&bt)->feature_arc_index;
434                     }
435                 }
436
437               /* trace */
438               if (PREDICT_FALSE (n_trace > 0 &&
439                                  vlib_trace_buffer (vm, node, next0, first_b0,
440                                                     /* follow_chain */ 0)))
441                 {
442                   af_packet_input_trace_t *tr;
443                   vlib_set_trace_count (vm, node, --n_trace);
444                   tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
445                   tr->next_index = next0;
446                   tr->hw_if_index = apif->hw_if_index;
447                   tr->block = block;
448                   tr->block_start = bd;
449                   tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts;
450                   clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t));
451                   clib_memcpy_fast (&tr->tph, tph, sizeof (tpacket3_hdr_t));
452                   if (is_cksum_gso_enabled)
453                     clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr,
454                                       sizeof (vnet_virtio_net_hdr_t));
455                   else
456                     clib_memset_u8 (&tr->vnet_hdr, 0,
457                                     sizeof (vnet_virtio_net_hdr_t));
458                 }
459
460               /* enque and take next packet */
461               vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
462                                                n_left_to_next, first_bi0,
463                                                next0);
464
465               /* next packet */
466               num_pkts--;
467               rx_frame_offset += tph->tp_next_offset;
468             }
469
470           vlib_put_next_frame (vm, node, next_index, n_left_to_next);
471         }
472
473       if (PREDICT_TRUE (num_pkts == 0))
474         {
475           bd->hdr.bh1.block_status = TP_STATUS_KERNEL;
476           block = (block + 1) % block_nr;
477         }
478     }
479
480   apif->next_rx_block = block;
481   vlib_increment_combined_counter
482     (vnet_get_main ()->interface_main.combined_sw_if_counters
483      + VNET_INTERFACE_COUNTER_RX,
484      vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
485
486   vnet_device_increment_rx_packets (thread_index, n_rx_packets);
487   return n_rx_packets;
488 }
489
490 VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm,
491                                      vlib_node_runtime_t * node,
492                                      vlib_frame_t * frame)
493 {
494   u32 n_rx_packets = 0;
495   af_packet_main_t *apm = &af_packet_main;
496   vnet_hw_if_rxq_poll_vector_t *pv;
497   pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
498   for (int i = 0; i < vec_len (pv); i++)
499     {
500       af_packet_if_t *apif;
501       apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance);
502       if (apif->is_admin_up)
503         {
504           if (apif->is_cksum_gso_enabled)
505             n_rx_packets +=
506               af_packet_device_input_fn (vm, node, frame, apif, 1);
507           else
508             n_rx_packets +=
509               af_packet_device_input_fn (vm, node, frame, apif, 0);
510         }
511     }
512   return n_rx_packets;
513 }
514
515 VLIB_REGISTER_NODE (af_packet_input_node) = {
516   .name = "af-packet-input",
517   .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
518   .sibling_of = "device-input",
519   .format_trace = format_af_packet_input_trace,
520   .type = VLIB_NODE_TYPE_INPUT,
521   .state = VLIB_NODE_STATE_INTERRUPT,
522   .n_errors = AF_PACKET_INPUT_N_ERROR,
523   .error_strings = af_packet_input_error_strings,
524 };
525
526
527 /*
528  * fd.io coding-style-patch-verification: ON
529  *
530  * Local Variables:
531  * eval: (c-set-style "gnu")
532  * End:
533  */