2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vnet_handoff_h
17 #define included_vnet_handoff_h
19 #include <vlib/vlib.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/mpls/packet.h>
27 HANDOFF_DISPATCH_NEXT_IP4_INPUT,
28 HANDOFF_DISPATCH_NEXT_IP6_INPUT,
29 HANDOFF_DISPATCH_NEXT_MPLS_INPUT,
30 HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT,
31 HANDOFF_DISPATCH_NEXT_DROP,
32 HANDOFF_DISPATCH_N_NEXT,
33 } handoff_dispatch_next_t;
36 vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
38 CLIB_MEMORY_BARRIER ();
42 static inline vlib_frame_queue_elt_t *
43 vlib_get_handoff_queue_elt (u32 vlib_worker_index)
45 vlib_frame_queue_t *fq;
46 vlib_frame_queue_elt_t *elt;
49 fq = vlib_frame_queues[vlib_worker_index];
52 new_tail = __sync_add_and_fetch (&fq->tail, 1);
54 /* Wait until a ring slot is available */
55 while (new_tail >= fq->head_hint + fq->nelts)
56 vlib_worker_thread_barrier_check ();
58 elt = fq->elts + (new_tail & (fq->nelts - 1));
60 /* this would be very bad... */
64 elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
65 elt->last_n_vectors = elt->n_vectors = 0;
70 static inline vlib_frame_queue_t *
71 is_vlib_handoff_queue_congested (u32 vlib_worker_index,
74 handoff_queue_by_worker_index)
76 vlib_frame_queue_t *fq;
78 fq = handoff_queue_by_worker_index[vlib_worker_index];
79 if (fq != (vlib_frame_queue_t *) (~0))
82 fq = vlib_frame_queues[vlib_worker_index];
85 if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
87 /* a valid entry in the array will indicate the queue has reached
88 * the specified threshold and is congested
90 handoff_queue_by_worker_index[vlib_worker_index] = fq;
91 fq->enqueue_full_events++;
98 static inline vlib_frame_queue_elt_t *
99 dpdk_get_handoff_queue_elt (u32 vlib_worker_index,
100 vlib_frame_queue_elt_t **
101 handoff_queue_elt_by_worker_index)
103 vlib_frame_queue_elt_t *elt;
105 if (handoff_queue_elt_by_worker_index[vlib_worker_index])
106 return handoff_queue_elt_by_worker_index[vlib_worker_index];
108 elt = vlib_get_handoff_queue_elt (vlib_worker_index);
110 handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
116 ipv4_get_key (ip4_header_t * ip)
120 hash_key = *((u64 *) (&ip->address_pair)) ^ ip->protocol;
126 ipv6_get_key (ip6_header_t * ip)
130 hash_key = ip->src_address.as_u64[0] ^
131 rotate_left (ip->src_address.as_u64[1], 13) ^
132 rotate_left (ip->dst_address.as_u64[0], 26) ^
133 rotate_left (ip->dst_address.as_u64[1], 39) ^ ip->protocol;
138 #define MPLS_BOTTOM_OF_STACK_BIT_MASK 0x00000100U
139 #define MPLS_LABEL_MASK 0xFFFFF000U
142 mpls_get_key (mpls_unicast_header_t * m)
148 /* find the bottom of the MPLS label stack. */
149 if (PREDICT_TRUE (m->label_exp_s_ttl &
150 clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
152 goto bottom_lbl_found;
156 if (PREDICT_TRUE (m->label_exp_s_ttl &
157 clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
159 goto bottom_lbl_found;
163 if (m->label_exp_s_ttl &
164 clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
166 goto bottom_lbl_found;
170 if (m->label_exp_s_ttl &
171 clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
173 goto bottom_lbl_found;
177 if (m->label_exp_s_ttl &
178 clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
180 goto bottom_lbl_found;
183 /* the bottom label was not found - use the last label */
184 hash_key = m->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
190 ip_ver = (*((u8 *) m) >> 4);
192 /* find out if it is IPV4 or IPV6 header */
193 if (PREDICT_TRUE (ip_ver == 4))
195 hash_key = ipv4_get_key ((ip4_header_t *) m);
197 else if (PREDICT_TRUE (ip_ver == 6))
199 hash_key = ipv6_get_key ((ip6_header_t *) m);
203 /* use the bottom label */
205 (m - 1)->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
214 eth_get_key (ethernet_header_t * h0)
218 if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
220 hash_key = ipv4_get_key ((ip4_header_t *) (h0 + 1));
222 else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
224 hash_key = ipv6_get_key ((ip6_header_t *) (h0 + 1));
226 else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
228 hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
230 else if ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ||
231 (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD)))
233 ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);
235 outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
237 if (PREDICT_TRUE (outer->type) ==
238 clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
240 hash_key = ipv4_get_key ((ip4_header_t *) (outer + 1));
242 else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
244 hash_key = ipv6_get_key ((ip6_header_t *) (outer + 1));
246 else if (outer->type ==
247 clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
249 hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
253 hash_key = outer->type;
264 #endif /* included_vnet_handoff_h */
267 * fd.io coding-style-patch-verification: ON
270 * eval: (c-set-style "gnu")