2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_vnet_handoff_h
17 #define included_vnet_handoff_h
19 #include <vlib/vlib.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/mpls-gre/packet.h>
26 HANDOFF_DISPATCH_NEXT_IP4_INPUT,
27 HANDOFF_DISPATCH_NEXT_IP6_INPUT,
28 HANDOFF_DISPATCH_NEXT_MPLS_INPUT,
29 HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT,
30 HANDOFF_DISPATCH_NEXT_DROP,
31 HANDOFF_DISPATCH_N_NEXT,
32 } handoff_dispatch_next_t;
35 void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
37 CLIB_MEMORY_BARRIER();
41 static inline vlib_frame_queue_elt_t *
42 vlib_get_handoff_queue_elt (u32 vlib_worker_index)
44 vlib_frame_queue_t *fq;
45 vlib_frame_queue_elt_t *elt;
48 fq = vlib_frame_queues[vlib_worker_index];
51 new_tail = __sync_add_and_fetch (&fq->tail, 1);
53 /* Wait until a ring slot is available */
54 while (new_tail >= fq->head_hint + fq->nelts)
55 vlib_worker_thread_barrier_check ();
57 elt = fq->elts + (new_tail & (fq->nelts-1));
59 /* this would be very bad... */
63 elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
64 elt->last_n_vectors = elt->n_vectors = 0;
69 static inline vlib_frame_queue_t *
70 is_vlib_handoff_queue_congested (
71 u32 vlib_worker_index,
73 vlib_frame_queue_t ** handoff_queue_by_worker_index)
75 vlib_frame_queue_t *fq;
77 fq = handoff_queue_by_worker_index [vlib_worker_index];
78 if (fq != (vlib_frame_queue_t *)(~0))
81 fq = vlib_frame_queues[vlib_worker_index];
84 if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
85 /* a valid entry in the array will indicate the queue has reached
86 * the specified threshold and is congested
88 handoff_queue_by_worker_index [vlib_worker_index] = fq;
89 fq->enqueue_full_events++;
96 static inline vlib_frame_queue_elt_t *
97 dpdk_get_handoff_queue_elt (u32 vlib_worker_index,
98 vlib_frame_queue_elt_t **
99 handoff_queue_elt_by_worker_index)
101 vlib_frame_queue_elt_t *elt;
103 if (handoff_queue_elt_by_worker_index [vlib_worker_index])
104 return handoff_queue_elt_by_worker_index [vlib_worker_index];
106 elt = vlib_get_handoff_queue_elt (vlib_worker_index);
108 handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
113 static inline u64 ipv4_get_key (ip4_header_t *ip)
117 hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
122 static inline u64 ipv6_get_key (ip6_header_t *ip)
126 hash_key = ip->src_address.as_u64[0] ^
127 rotate_left(ip->src_address.as_u64[1],13) ^
128 rotate_left(ip->dst_address.as_u64[0],26) ^
129 rotate_left(ip->dst_address.as_u64[1],39) ^
135 #define MPLS_BOTTOM_OF_STACK_BIT_MASK 0x00000100U
136 #define MPLS_LABEL_MASK 0xFFFFF000U
138 static inline u64 mpls_get_key (mpls_unicast_header_t *m)
144 /* find the bottom of the MPLS label stack. */
145 if (PREDICT_TRUE(m->label_exp_s_ttl &
146 clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
147 goto bottom_lbl_found;
151 if (PREDICT_TRUE(m->label_exp_s_ttl &
152 clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
153 goto bottom_lbl_found;
157 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
158 goto bottom_lbl_found;
162 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
163 goto bottom_lbl_found;
167 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
168 goto bottom_lbl_found;
171 /* the bottom label was not found - use the last label */
172 hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
178 ip_ver = (*((u8 *)m) >> 4);
180 /* find out if it is IPV4 or IPV6 header */
181 if (PREDICT_TRUE(ip_ver == 4)) {
182 hash_key = ipv4_get_key((ip4_header_t *)m);
183 } else if (PREDICT_TRUE(ip_ver == 6)) {
184 hash_key = ipv6_get_key((ip6_header_t *)m);
186 /* use the bottom label */
187 hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
196 eth_get_key (ethernet_header_t *h0)
200 if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
201 hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
202 } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
203 hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
204 } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
205 hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
206 } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ||
207 (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
208 ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
210 outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ?
212 if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
213 hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
214 } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
215 hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
216 } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
217 hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
219 hash_key = outer->type;
228 #endif /* included_vnet_handoff_h */