New upstream version 18.08
[deb_dpdk.git] / lib / librte_net / rte_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4
5 #include <stdint.h>
6
7 #include <rte_mbuf.h>
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_sctp.h>
15 #include <rte_gre.h>
16 #include <rte_net.h>
17
18 /* get l3 packet type from ip6 next protocol */
19 static uint32_t
20 ptype_l3_ip6(uint8_t ip6_proto)
21 {
22         static const uint32_t ip6_ext_proto_map[256] = {
23                 [IPPROTO_HOPOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
24                 [IPPROTO_ROUTING] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
25                 [IPPROTO_FRAGMENT] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
26                 [IPPROTO_ESP] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
27                 [IPPROTO_AH] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
28                 [IPPROTO_DSTOPTS] = RTE_PTYPE_L3_IPV6_EXT - RTE_PTYPE_L3_IPV6,
29         };
30
31         return RTE_PTYPE_L3_IPV6 + ip6_ext_proto_map[ip6_proto];
32 }
33
34 /* get l3 packet type from ip version and header length */
35 static uint32_t
36 ptype_l3_ip(uint8_t ipv_ihl)
37 {
38         static const uint32_t ptype_l3_ip_proto_map[256] = {
39                 [0x45] = RTE_PTYPE_L3_IPV4,
40                 [0x46] = RTE_PTYPE_L3_IPV4_EXT,
41                 [0x47] = RTE_PTYPE_L3_IPV4_EXT,
42                 [0x48] = RTE_PTYPE_L3_IPV4_EXT,
43                 [0x49] = RTE_PTYPE_L3_IPV4_EXT,
44                 [0x4A] = RTE_PTYPE_L3_IPV4_EXT,
45                 [0x4B] = RTE_PTYPE_L3_IPV4_EXT,
46                 [0x4C] = RTE_PTYPE_L3_IPV4_EXT,
47                 [0x4D] = RTE_PTYPE_L3_IPV4_EXT,
48                 [0x4E] = RTE_PTYPE_L3_IPV4_EXT,
49                 [0x4F] = RTE_PTYPE_L3_IPV4_EXT,
50         };
51
52         return ptype_l3_ip_proto_map[ipv_ihl];
53 }
54
55 /* get l4 packet type from proto */
56 static uint32_t
57 ptype_l4(uint8_t proto)
58 {
59         static const uint32_t ptype_l4_proto[256] = {
60                 [IPPROTO_UDP] = RTE_PTYPE_L4_UDP,
61                 [IPPROTO_TCP] = RTE_PTYPE_L4_TCP,
62                 [IPPROTO_SCTP] = RTE_PTYPE_L4_SCTP,
63         };
64
65         return ptype_l4_proto[proto];
66 }
67
68 /* get inner l3 packet type from ip6 next protocol */
69 static uint32_t
70 ptype_inner_l3_ip6(uint8_t ip6_proto)
71 {
72         static const uint32_t ptype_inner_ip6_ext_proto_map[256] = {
73                 [IPPROTO_HOPOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
74                         RTE_PTYPE_INNER_L3_IPV6,
75                 [IPPROTO_ROUTING] = RTE_PTYPE_INNER_L3_IPV6_EXT -
76                         RTE_PTYPE_INNER_L3_IPV6,
77                 [IPPROTO_FRAGMENT] = RTE_PTYPE_INNER_L3_IPV6_EXT -
78                         RTE_PTYPE_INNER_L3_IPV6,
79                 [IPPROTO_ESP] = RTE_PTYPE_INNER_L3_IPV6_EXT -
80                         RTE_PTYPE_INNER_L3_IPV6,
81                 [IPPROTO_AH] = RTE_PTYPE_INNER_L3_IPV6_EXT -
82                         RTE_PTYPE_INNER_L3_IPV6,
83                 [IPPROTO_DSTOPTS] = RTE_PTYPE_INNER_L3_IPV6_EXT -
84                         RTE_PTYPE_INNER_L3_IPV6,
85         };
86
87         return RTE_PTYPE_INNER_L3_IPV6 +
88                 ptype_inner_ip6_ext_proto_map[ip6_proto];
89 }
90
91 /* get inner l3 packet type from ip version and header length */
92 static uint32_t
93 ptype_inner_l3_ip(uint8_t ipv_ihl)
94 {
95         static const uint32_t ptype_inner_l3_ip_proto_map[256] = {
96                 [0x45] = RTE_PTYPE_INNER_L3_IPV4,
97                 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT,
98                 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT,
99                 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT,
100                 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT,
101                 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT,
102                 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT,
103                 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT,
104                 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT,
105                 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT,
106                 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT,
107         };
108
109         return ptype_inner_l3_ip_proto_map[ipv_ihl];
110 }
111
112 /* get inner l4 packet type from proto */
113 static uint32_t
114 ptype_inner_l4(uint8_t proto)
115 {
116         static const uint32_t ptype_inner_l4_proto[256] = {
117                 [IPPROTO_UDP] = RTE_PTYPE_INNER_L4_UDP,
118                 [IPPROTO_TCP] = RTE_PTYPE_INNER_L4_TCP,
119                 [IPPROTO_SCTP] = RTE_PTYPE_INNER_L4_SCTP,
120         };
121
122         return ptype_inner_l4_proto[proto];
123 }
124
125 /* get the tunnel packet type if any, update proto and off. */
126 static uint32_t
127 ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
128         uint32_t *off)
129 {
130         switch (*proto) {
131         case IPPROTO_GRE: {
132                 static const uint8_t opt_len[16] = {
133                         [0x0] = 4,
134                         [0x1] = 8,
135                         [0x2] = 8,
136                         [0x8] = 8,
137                         [0x3] = 12,
138                         [0x9] = 12,
139                         [0xa] = 12,
140                         [0xb] = 16,
141                 };
142                 const struct gre_hdr *gh;
143                 struct gre_hdr gh_copy;
144                 uint16_t flags;
145
146                 gh = rte_pktmbuf_read(m, *off, sizeof(*gh), &gh_copy);
147                 if (unlikely(gh == NULL))
148                         return 0;
149
150                 flags = rte_be_to_cpu_16(*(const uint16_t *)gh);
151                 flags >>= 12;
152                 if (opt_len[flags] == 0)
153                         return 0;
154
155                 *off += opt_len[flags];
156                 *proto = gh->proto;
157                 if (*proto == rte_cpu_to_be_16(ETHER_TYPE_TEB))
158                         return RTE_PTYPE_TUNNEL_NVGRE;
159                 else
160                         return RTE_PTYPE_TUNNEL_GRE;
161         }
162         case IPPROTO_IPIP:
163                 *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
164                 return RTE_PTYPE_TUNNEL_IP;
165         case IPPROTO_IPV6:
166                 *proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
167                 return RTE_PTYPE_TUNNEL_IP; /* IP is also valid for IPv6 */
168         default:
169                 return 0;
170         }
171 }
172
173 /* get the ipv4 header length */
174 static uint8_t
175 ip4_hlen(const struct ipv4_hdr *hdr)
176 {
177         return (hdr->version_ihl & 0xf) * 4;
178 }
179
180 /* parse ipv6 extended headers, update offset and return next proto */
181 int __rte_experimental
182 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
183         int *frag)
184 {
185         struct ext_hdr {
186                 uint8_t next_hdr;
187                 uint8_t len;
188         };
189         const struct ext_hdr *xh;
190         struct ext_hdr xh_copy;
191         unsigned int i;
192
193         *frag = 0;
194
195 #define MAX_EXT_HDRS 5
196         for (i = 0; i < MAX_EXT_HDRS; i++) {
197                 switch (proto) {
198                 case IPPROTO_HOPOPTS:
199                 case IPPROTO_ROUTING:
200                 case IPPROTO_DSTOPTS:
201                         xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
202                                 &xh_copy);
203                         if (xh == NULL)
204                                 return -1;
205                         *off += (xh->len + 1) * 8;
206                         proto = xh->next_hdr;
207                         break;
208                 case IPPROTO_FRAGMENT:
209                         xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
210                                 &xh_copy);
211                         if (xh == NULL)
212                                 return -1;
213                         *off += 8;
214                         proto = xh->next_hdr;
215                         *frag = 1;
216                         return proto; /* this is always the last ext hdr */
217                 case IPPROTO_NONE:
218                         return 0;
219                 default:
220                         return proto;
221                 }
222         }
223         return -1;
224 }
225
226 /* parse mbuf data to get packet type */
227 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
228         struct rte_net_hdr_lens *hdr_lens, uint32_t layers)
229 {
230         struct rte_net_hdr_lens local_hdr_lens;
231         const struct ether_hdr *eh;
232         struct ether_hdr eh_copy;
233         uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
234         uint32_t off = 0;
235         uint16_t proto;
236         int ret;
237
238         if (hdr_lens == NULL)
239                 hdr_lens = &local_hdr_lens;
240
241         eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
242         if (unlikely(eh == NULL))
243                 return 0;
244         proto = eh->ether_type;
245         off = sizeof(*eh);
246         hdr_lens->l2_len = off;
247
248         if ((layers & RTE_PTYPE_L2_MASK) == 0)
249                 return 0;
250
251         if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
252                 goto l3; /* fast path if packet is IPv4 */
253
254         if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
255                 const struct vlan_hdr *vh;
256                 struct vlan_hdr vh_copy;
257
258                 pkt_type = RTE_PTYPE_L2_ETHER_VLAN;
259                 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
260                 if (unlikely(vh == NULL))
261                         return pkt_type;
262                 off += sizeof(*vh);
263                 hdr_lens->l2_len += sizeof(*vh);
264                 proto = vh->eth_proto;
265         } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
266                 const struct vlan_hdr *vh;
267                 struct vlan_hdr vh_copy;
268
269                 pkt_type = RTE_PTYPE_L2_ETHER_QINQ;
270                 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
271                         &vh_copy);
272                 if (unlikely(vh == NULL))
273                         return pkt_type;
274                 off += 2 * sizeof(*vh);
275                 hdr_lens->l2_len += 2 * sizeof(*vh);
276                 proto = vh->eth_proto;
277         }
278
279  l3:
280         if ((layers & RTE_PTYPE_L3_MASK) == 0)
281                 return pkt_type;
282
283         if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
284                 const struct ipv4_hdr *ip4h;
285                 struct ipv4_hdr ip4h_copy;
286
287                 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
288                 if (unlikely(ip4h == NULL))
289                         return pkt_type;
290
291                 pkt_type |= ptype_l3_ip(ip4h->version_ihl);
292                 hdr_lens->l3_len = ip4_hlen(ip4h);
293                 off += hdr_lens->l3_len;
294
295                 if ((layers & RTE_PTYPE_L4_MASK) == 0)
296                         return pkt_type;
297
298                 if (ip4h->fragment_offset & rte_cpu_to_be_16(
299                                 IPV4_HDR_OFFSET_MASK | IPV4_HDR_MF_FLAG)) {
300                         pkt_type |= RTE_PTYPE_L4_FRAG;
301                         hdr_lens->l4_len = 0;
302                         return pkt_type;
303                 }
304                 proto = ip4h->next_proto_id;
305                 pkt_type |= ptype_l4(proto);
306         } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
307                 const struct ipv6_hdr *ip6h;
308                 struct ipv6_hdr ip6h_copy;
309                 int frag = 0;
310
311                 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
312                 if (unlikely(ip6h == NULL))
313                         return pkt_type;
314
315                 proto = ip6h->proto;
316                 hdr_lens->l3_len = sizeof(*ip6h);
317                 off += hdr_lens->l3_len;
318                 pkt_type |= ptype_l3_ip6(proto);
319                 if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
320                         ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
321                         if (ret < 0)
322                                 return pkt_type;
323                         proto = ret;
324                         hdr_lens->l3_len = off - hdr_lens->l2_len;
325                 }
326                 if (proto == 0)
327                         return pkt_type;
328
329                 if ((layers & RTE_PTYPE_L4_MASK) == 0)
330                         return pkt_type;
331
332                 if (frag) {
333                         pkt_type |= RTE_PTYPE_L4_FRAG;
334                         hdr_lens->l4_len = 0;
335                         return pkt_type;
336                 }
337                 pkt_type |= ptype_l4(proto);
338         }
339
340         if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
341                 hdr_lens->l4_len = sizeof(struct udp_hdr);
342                 return pkt_type;
343         } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
344                 const struct tcp_hdr *th;
345                 struct tcp_hdr th_copy;
346
347                 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
348                 if (unlikely(th == NULL))
349                         return pkt_type & (RTE_PTYPE_L2_MASK |
350                                 RTE_PTYPE_L3_MASK);
351                 hdr_lens->l4_len = (th->data_off & 0xf0) >> 2;
352                 return pkt_type;
353         } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) {
354                 hdr_lens->l4_len = sizeof(struct sctp_hdr);
355                 return pkt_type;
356         } else {
357                 uint32_t prev_off = off;
358
359                 hdr_lens->l4_len = 0;
360
361                 if ((layers & RTE_PTYPE_TUNNEL_MASK) == 0)
362                         return pkt_type;
363
364                 pkt_type |= ptype_tunnel(&proto, m, &off);
365                 hdr_lens->tunnel_len = off - prev_off;
366         }
367
368         /* same job for inner header: we need to duplicate the code
369          * because the packet types do not have the same value.
370          */
371         if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
372                 return pkt_type;
373
374         hdr_lens->inner_l2_len = 0;
375         if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
376                 eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
377                 if (unlikely(eh == NULL))
378                         return pkt_type;
379                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER;
380                 proto = eh->ether_type;
381                 off += sizeof(*eh);
382                 hdr_lens->inner_l2_len = sizeof(*eh);
383         }
384
385         if (proto == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
386                 const struct vlan_hdr *vh;
387                 struct vlan_hdr vh_copy;
388
389                 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
390                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
391                 vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
392                 if (unlikely(vh == NULL))
393                         return pkt_type;
394                 off += sizeof(*vh);
395                 hdr_lens->inner_l2_len += sizeof(*vh);
396                 proto = vh->eth_proto;
397         } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_QINQ)) {
398                 const struct vlan_hdr *vh;
399                 struct vlan_hdr vh_copy;
400
401                 pkt_type &= ~RTE_PTYPE_INNER_L2_MASK;
402                 pkt_type |= RTE_PTYPE_INNER_L2_ETHER_QINQ;
403                 vh = rte_pktmbuf_read(m, off + sizeof(*vh), sizeof(*vh),
404                         &vh_copy);
405                 if (unlikely(vh == NULL))
406                         return pkt_type;
407                 off += 2 * sizeof(*vh);
408                 hdr_lens->inner_l2_len += 2 * sizeof(*vh);
409                 proto = vh->eth_proto;
410         }
411
412         if ((layers & RTE_PTYPE_INNER_L3_MASK) == 0)
413                 return pkt_type;
414
415         if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
416                 const struct ipv4_hdr *ip4h;
417                 struct ipv4_hdr ip4h_copy;
418
419                 ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
420                 if (unlikely(ip4h == NULL))
421                         return pkt_type;
422
423                 pkt_type |= ptype_inner_l3_ip(ip4h->version_ihl);
424                 hdr_lens->inner_l3_len = ip4_hlen(ip4h);
425                 off += hdr_lens->inner_l3_len;
426
427                 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
428                         return pkt_type;
429                 if (ip4h->fragment_offset &
430                                 rte_cpu_to_be_16(IPV4_HDR_OFFSET_MASK |
431                                         IPV4_HDR_MF_FLAG)) {
432                         pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
433                         hdr_lens->inner_l4_len = 0;
434                         return pkt_type;
435                 }
436                 proto = ip4h->next_proto_id;
437                 pkt_type |= ptype_inner_l4(proto);
438         } else if (proto == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
439                 const struct ipv6_hdr *ip6h;
440                 struct ipv6_hdr ip6h_copy;
441                 int frag = 0;
442
443                 ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
444                 if (unlikely(ip6h == NULL))
445                         return pkt_type;
446
447                 proto = ip6h->proto;
448                 hdr_lens->inner_l3_len = sizeof(*ip6h);
449                 off += hdr_lens->inner_l3_len;
450                 pkt_type |= ptype_inner_l3_ip6(proto);
451                 if ((pkt_type & RTE_PTYPE_INNER_L3_MASK) ==
452                                 RTE_PTYPE_INNER_L3_IPV6_EXT) {
453                         uint32_t prev_off;
454
455                         prev_off = off;
456                         ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
457                         if (ret < 0)
458                                 return pkt_type;
459                         proto = ret;
460                         hdr_lens->inner_l3_len += off - prev_off;
461                 }
462                 if (proto == 0)
463                         return pkt_type;
464
465                 if ((layers & RTE_PTYPE_INNER_L4_MASK) == 0)
466                         return pkt_type;
467
468                 if (frag) {
469                         pkt_type |= RTE_PTYPE_INNER_L4_FRAG;
470                         hdr_lens->inner_l4_len = 0;
471                         return pkt_type;
472                 }
473                 pkt_type |= ptype_inner_l4(proto);
474         }
475
476         if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
477                 hdr_lens->inner_l4_len = sizeof(struct udp_hdr);
478         } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
479                         RTE_PTYPE_INNER_L4_TCP) {
480                 const struct tcp_hdr *th;
481                 struct tcp_hdr th_copy;
482
483                 th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
484                 if (unlikely(th == NULL))
485                         return pkt_type & (RTE_PTYPE_INNER_L2_MASK |
486                                 RTE_PTYPE_INNER_L3_MASK);
487                 hdr_lens->inner_l4_len = (th->data_off & 0xf0) >> 2;
488         } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
489                         RTE_PTYPE_INNER_L4_SCTP) {
490                 hdr_lens->inner_l4_len = sizeof(struct sctp_hdr);
491         } else {
492                 hdr_lens->inner_l4_len = 0;
493         }
494
495         return pkt_type;
496 }