2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 vlib_node_registration_t ssvm_eth_input_node;
22 } ssvm_eth_input_trace_t;
24 /* packet trace format function */
25 static u8 * format_ssvm_eth_input_trace (u8 * s, va_list * args)
27 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
28 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
29 ssvm_eth_input_trace_t * t = va_arg (*args, ssvm_eth_input_trace_t *);
31 s = format (s, "SSVM_ETH_INPUT: sw_if_index %d, next index %d",
32 t->sw_if_index, t->next_index);
36 vlib_node_registration_t ssvm_eth_input_node;
38 #define foreach_ssvm_eth_input_error \
39 _(NO_BUFFERS, "Rx packet drops (no buffers)")
42 #define _(sym,str) SSVM_ETH_INPUT_ERROR_##sym,
43 foreach_ssvm_eth_input_error
45 SSVM_ETH_INPUT_N_ERROR,
46 } ssvm_eth_input_error_t;
48 static char * ssvm_eth_input_error_strings[] = {
49 #define _(sym,string) string,
50 foreach_ssvm_eth_input_error
55 SSVM_ETH_INPUT_NEXT_DROP,
56 SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT,
57 SSVM_ETH_INPUT_NEXT_IP4_INPUT,
58 SSVM_ETH_INPUT_NEXT_IP6_INPUT,
59 SSVM_ETH_INPUT_NEXT_MPLS_INPUT,
60 SSVM_ETH_INPUT_N_NEXT,
61 } ssvm_eth_input_next_t;
64 ssvm_eth_device_input (ssvm_eth_main_t * em,
65 ssvm_private_t * intfc,
66 vlib_node_runtime_t * node)
68 ssvm_shared_header_t * sh = intfc->sh;
69 vlib_main_t * vm = em->vlib_main;
70 unix_shared_memory_queue_t * q;
71 ssvm_eth_queue_elt_t * elt, * elts;
73 u32 my_pid = intfc->my_pid;
75 u32 n_to_alloc = VLIB_FRAME_SIZE * 2;
76 u32 n_allocated, n_present_in_cache;
78 u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
82 vlib_buffer_free_list_t * fl;
83 u32 n_left_to_next, * to_next;
88 vlib_buffer_t * b0, * prev;
89 u32 saved_cache_size = 0;
90 ethernet_header_t * eh0;
92 u32 n_rx_bytes = 0, l3_offset0;
93 u32 cpu_index = os_get_cpu_number();
94 u32 trace_cnt __attribute__((unused)) = vlib_get_trace_count (vm, node);
98 /* Either side down? buh-bye... */
99 if ((u64)(sh->opaque [MASTER_ADMIN_STATE_INDEX]) == 0 ||
100 (u64)(sh->opaque [SLAVE_ADMIN_STATE_INDEX]) == 0)
103 if (intfc->i_am_master)
104 q = (unix_shared_memory_queue_t *)(sh->opaque [TO_MASTER_Q_INDEX]);
106 q = (unix_shared_memory_queue_t *)(sh->opaque [TO_SLAVE_Q_INDEX]);
112 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
114 vec_reset_length (intfc->rx_queue);
117 while (__sync_lock_test_and_set (lock, 1))
119 while (q->cursize > 0)
121 unix_shared_memory_queue_sub_raw (q, (u8 *)&elt_index);
122 ASSERT(elt_index < 2048);
123 vec_add1 (intfc->rx_queue, elt_index);
125 CLIB_MEMORY_BARRIER();
128 n_present_in_cache = vec_len (em->buffer_cache);
130 if (vec_len (em->buffer_cache) < vec_len (intfc->rx_queue) * 2)
132 vec_validate (em->buffer_cache,
133 n_to_alloc + vec_len (em->buffer_cache) - 1);
135 vlib_buffer_alloc (vm, &em->buffer_cache [n_present_in_cache],
138 n_present_in_cache += n_allocated;
139 _vec_len (em->buffer_cache) = n_present_in_cache;
142 elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
144 n_buffers = vec_len (intfc->rx_queue);
147 while (n_buffers > 0)
149 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
151 while (n_buffers > 0 && n_left_to_next > 0)
153 elt = elts + intfc->rx_queue[rx_queue_index];
155 saved_cache_size = n_present_in_cache;
156 if (PREDICT_FALSE(saved_cache_size == 0))
158 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
161 saved_bi0 = bi0 = em->buffer_cache [--n_present_in_cache];
162 b0 = vlib_get_buffer (vm, bi0);
167 vlib_buffer_init_for_free_list (b0, fl);
170 b0->current_data = elt->current_data_hint;
171 b0->current_length = elt->length_this_buffer;
172 b0->total_length_not_including_first_buffer =
173 elt->total_length_not_including_first_buffer;
175 memcpy (b0->data + b0->current_data, elt->data,
178 if (PREDICT_FALSE(prev != 0))
179 prev->next_buffer = bi0;
181 if (PREDICT_FALSE(elt->flags & SSVM_BUFFER_NEXT_PRESENT))
184 if (PREDICT_FALSE(n_present_in_cache == 0))
186 vlib_put_next_frame (vm, node, next_index,
190 bi0 = em->buffer_cache [--n_present_in_cache];
191 b0 = vlib_get_buffer (vm, bi0);
197 saved_cache_size = n_present_in_cache;
199 to_next[0] = saved_bi0;
203 b0 = vlib_get_buffer (vm, saved_bi0);
204 eh0 = vlib_buffer_get_current (b0);
206 type0 = clib_net_to_host_u16 (eh0->type);
208 next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
210 if (type0 == ETHERNET_TYPE_IP4)
211 next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
212 else if (type0 == ETHERNET_TYPE_IP6)
213 next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
214 else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
215 next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
217 l3_offset0 = ((next0 == SSVM_ETH_INPUT_NEXT_IP4_INPUT ||
218 next0 == SSVM_ETH_INPUT_NEXT_IP6_INPUT ||
219 next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ?
220 sizeof (ethernet_header_t) : 0);
222 n_rx_bytes += b0->current_length
223 + b0->total_length_not_including_first_buffer;
225 b0->current_data += l3_offset0;
226 b0->current_length -= l3_offset0;
227 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
229 vnet_buffer(b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
230 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
233 * Turn this on if you run into
234 * "bad monkey" contexts, and you want to know exactly
235 * which nodes they've visited... See main.c...
237 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
241 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
242 to_next, n_left_to_next,
248 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
252 if (em->buffer_cache)
253 _vec_len (em->buffer_cache) = saved_cache_size;
255 ASSERT (saved_cache_size == 0);
257 ssvm_lock (sh, my_pid, 2);
259 ASSERT(vec_len(intfc->rx_queue) > 0);
261 n_available = (u32)(u64)(sh->opaque[CHUNK_POOL_NFREE]);
262 elt_indices = (u32 *)(sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
264 memcpy (&elt_indices[n_available], intfc->rx_queue,
265 vec_len (intfc->rx_queue) * sizeof (u32));
267 n_available += vec_len (intfc->rx_queue);
268 sh->opaque[CHUNK_POOL_NFREE] = (void *) (u64) n_available;
272 vlib_error_count (vm, node->node_index, SSVM_ETH_INPUT_ERROR_NO_BUFFERS,
275 vlib_increment_combined_counter
276 (vnet_get_main()->interface_main.combined_sw_if_counters
277 + VNET_INTERFACE_COUNTER_RX, cpu_index,
278 intfc->vlib_hw_if_index,
279 rx_queue_index, n_rx_bytes);
281 return rx_queue_index;
285 ssvm_eth_input_node_fn (vlib_main_t * vm,
286 vlib_node_runtime_t * node,
287 vlib_frame_t * frame)
289 ssvm_eth_main_t * em = &ssvm_eth_main;
290 ssvm_private_t * intfc;
291 uword n_rx_packets = 0;
293 vec_foreach (intfc, em->intfcs)
295 n_rx_packets += ssvm_eth_device_input (em, intfc, node);
301 VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
302 .function = ssvm_eth_input_node_fn,
303 .name = "ssvm_eth_input",
304 .vector_size = sizeof (u32),
305 .format_trace = format_ssvm_eth_input_trace,
306 .type = VLIB_NODE_TYPE_INPUT,
307 .state = VLIB_NODE_STATE_DISABLED,
309 .n_errors = ARRAY_LEN(ssvm_eth_input_error_strings),
310 .error_strings = ssvm_eth_input_error_strings,
312 .n_next_nodes = SSVM_ETH_INPUT_N_NEXT,
314 /* edit / add dispositions here */
316 [SSVM_ETH_INPUT_NEXT_DROP] = "error-drop",
317 [SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
318 [SSVM_ETH_INPUT_NEXT_IP4_INPUT] = "ip4-input",
319 [SSVM_ETH_INPUT_NEXT_IP6_INPUT] = "ip6-input",
320 [SSVM_ETH_INPUT_NEXT_MPLS_INPUT] = "mpls-gre-input",