2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 ssvm_eth_main_t ssvm_eth_main;
19 #define foreach_ssvm_eth_tx_func_error \
20 _(RING_FULL, "Tx packet drops (ring full)") \
21 _(NO_BUFFERS, "Tx packet drops (no buffers)") \
22 _(ADMIN_DOWN, "Tx packet drops (admin down)")
25 #define _(f,s) SSVM_ETH_TX_ERROR_##f,
26 foreach_ssvm_eth_tx_func_error
29 } ssvm_eth_tx_func_error_t;
31 static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
32 vnet_hw_interface_t * hi,
35 int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
37 ssvm_private_t * intfc;
40 unix_shared_memory_queue_t * q;
41 ssvm_shared_header_t * sh;
42 ssvm_eth_queue_elt_t * elts;
47 vec_add2 (em->intfcs, intfc, 1);
49 intfc->ssvm_size = em->segment_size;
50 intfc->i_am_master = 1;
54 rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */);
57 goto create_vnet_interface;
60 intfc->requested_va = em->next_base_va;
61 em->next_base_va += em->segment_size;
62 rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */);
67 /* OK, segment created, set up queues and so forth. */
70 oldheap = ssvm_push_heap (sh);
72 q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
73 0 /* consumer pid not interesting */,
74 0 /* signal not sent */);
75 sh->opaque [TO_MASTER_Q_INDEX] = (void *)q;
76 q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
77 0 /* consumer pid not interesting */,
78 0 /* signal not sent */);
79 sh->opaque [TO_SLAVE_Q_INDEX] = (void *)q;
82 * Preallocate the requested number of buffer chunks
83 * There must be a better way to do this, etc.
84 * Add some slop to avoid pool reallocation, which will not go well
89 vec_validate_aligned (elts, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
90 vec_validate_aligned (elt_indices, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
92 for (i = 0; i < em->nbuffers; i++)
95 sh->opaque [CHUNK_POOL_INDEX] = (void *) elts;
96 sh->opaque [CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
97 sh->opaque [CHUNK_POOL_NFREE] = (void *) em->nbuffers;
99 ssvm_pop_heap (oldheap);
101 create_vnet_interface:
105 memset (enet_addr, 0, sizeof (enet_addr));
108 enet_addr[2] = is_master;
109 enet_addr[5] = sh->master_index;
111 e = ethernet_register_interface
112 (em->vnet_main, ssvm_eth_device_class.index,
114 /* ethernet address */ enet_addr,
115 &intfc->vlib_hw_if_index,
116 ssvm_eth_flag_change);
120 clib_error_report (e);
121 /* $$$$ unmap offending region? */
122 return VNET_API_ERROR_INVALID_INTERFACE;
125 /* Declare link up */
126 vnet_hw_interface_set_flags (em->vnet_main, intfc->vlib_hw_if_index,
127 VNET_HW_INTERFACE_FLAG_LINK_UP);
129 /* Let the games begin... */
135 static clib_error_t *
136 ssvm_config (vlib_main_t * vm, unformat_input_t * input)
141 ssvm_eth_main_t * em = &ssvm_eth_main;
143 while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
145 if (unformat (input, "base-va %llx", &em->next_base_va))
147 else if (unformat (input, "segment-size %lld", &em->segment_size))
148 em->segment_size = 1ULL << (max_log2 (em->segment_size));
149 else if (unformat (input, "nbuffers %lld", &em->nbuffers))
151 else if (unformat (input, "queue-elts %lld", &em->queue_elts))
153 else if (unformat (input, "slave"))
155 else if (unformat (input, "%s", &name))
156 vec_add1 (em->names, name);
161 /* No configured instances, we're done... */
162 if (vec_len (em->names) == 0)
165 for (i = 0; i < vec_len (em->names); i++)
167 rv = ssvm_eth_create (em, em->names[i], is_master);
169 return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d",
173 vlib_node_set_state (vm, ssvm_eth_input_node.index, VLIB_NODE_STATE_POLLING);
178 VLIB_CONFIG_FUNCTION (ssvm_config, "ssvm_eth");
181 static clib_error_t * ssvm_eth_init (vlib_main_t * vm)
183 ssvm_eth_main_t * em = &ssvm_eth_main;
185 if (((sizeof(ssvm_eth_queue_elt_t) / CLIB_CACHE_LINE_BYTES)
186 * CLIB_CACHE_LINE_BYTES) != sizeof(ssvm_eth_queue_elt_t))
187 clib_warning ("ssvm_eth_queue_elt_t size %d not a multiple of %d",
188 sizeof(ssvm_eth_queue_elt_t), CLIB_CACHE_LINE_BYTES);
191 em->vnet_main = vnet_get_main();
192 em->elog_main = &vm->elog_main;
194 /* default config param values... */
196 em->next_base_va = 0x600000000ULL;
198 * Allocate 2 full superframes in each dir (256 x 2 x 2 x 2048 bytes),
199 * 2mb; double that so we have plenty of space... 4mb
201 em->segment_size = 8<<20;
203 em->queue_elts = 512;
207 VLIB_INIT_FUNCTION (ssvm_eth_init);
209 static char * ssvm_eth_tx_func_error_strings[] = {
211 foreach_ssvm_eth_tx_func_error
215 static u8 * format_ssvm_eth_device_name (u8 * s, va_list * args)
217 u32 i = va_arg (*args, u32);
219 s = format (s, "ssvmEthernet%d", i);
223 static u8 * format_ssvm_eth_device (u8 * s, va_list * args)
225 s = format (s, "SSVM Ethernet");
229 static u8 * format_ssvm_eth_tx_trace (u8 * s, va_list * args)
231 s = format (s, "Unimplemented...");
237 ssvm_eth_interface_tx (vlib_main_t * vm,
238 vlib_node_runtime_t * node,
241 ssvm_eth_main_t * em = &ssvm_eth_main;
242 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
243 ssvm_private_t * intfc = vec_elt_at_index (em->intfcs, rd->dev_instance);
244 ssvm_shared_header_t * sh = intfc->sh;
245 unix_shared_memory_queue_t * q;
248 ssvm_eth_queue_elt_t * elts, * elt, * prev_elt;
249 u32 my_pid = intfc->my_pid;
252 u32 size_this_buffer;
253 u32 chunks_this_buffer;
254 u8 i_am_master = intfc->i_am_master;
256 int is_ring_full, interface_down;
258 volatile u32 *queue_lock;
259 u32 n_to_alloc = VLIB_FRAME_SIZE;
260 u32 n_allocated, n_present_in_cache, n_available;
264 q = (unix_shared_memory_queue_t *)sh->opaque [TO_SLAVE_Q_INDEX];
266 q = (unix_shared_memory_queue_t *)sh->opaque [TO_MASTER_Q_INDEX];
268 queue_lock = (u32 *) q;
270 from = vlib_frame_vector_args (f);
271 n_left = f->n_vectors;
275 n_present_in_cache = vec_len (em->chunk_cache);
277 /* admin / link up/down check */
278 if ((u64)(sh->opaque [MASTER_ADMIN_STATE_INDEX]) == 0 ||
279 (u64)(sh->opaque [SLAVE_ADMIN_STATE_INDEX]) == 0)
285 ssvm_lock (sh, my_pid, 1);
287 elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
288 elt_indices = (u32 *) (sh->opaque [CHUNK_POOL_FREELIST_INDEX]);
289 n_available = (u32) (u64) (sh->opaque [CHUNK_POOL_NFREE]);
291 if (n_present_in_cache < n_left*2)
293 vec_validate (em->chunk_cache,
294 n_to_alloc + n_present_in_cache - 1);
296 n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
298 if (PREDICT_TRUE(n_allocated > 0))
300 memcpy (&em->chunk_cache[n_present_in_cache],
301 &elt_indices[n_available - n_allocated],
302 sizeof(u32) * n_allocated);
305 n_present_in_cache += n_allocated;
306 n_available -= n_allocated;
307 sh->opaque [CHUNK_POOL_NFREE] = (void *) (u64) n_available;
308 _vec_len (em->chunk_cache) = n_present_in_cache;
316 b0 = vlib_get_buffer (vm, bi0);
318 size_this_buffer = vlib_buffer_length_in_chain (vm, b0);
319 chunks_this_buffer = (size_this_buffer + (SSVM_BUFFER_SIZE - 1))
322 /* If we're not going to be able to enqueue the buffer, tail drop. */
323 if (q->cursize >= q->maxsize)
331 for (i = 0; i < chunks_this_buffer; i++)
333 if (PREDICT_FALSE (n_present_in_cache == 0))
336 elt_index = em->chunk_cache[--n_present_in_cache];
337 elt = elts + elt_index;
339 elt->type = SSVM_PACKET_TYPE;
341 elt->total_length_not_including_first_buffer =
342 b0->total_length_not_including_first_buffer;
343 elt->length_this_buffer = b0->current_length;
344 elt->current_data_hint = b0->current_data;
345 elt->owner = !i_am_master;
348 memcpy (elt->data, b0->data + b0->current_data, b0->current_length);
350 if (PREDICT_FALSE (prev_elt != 0))
351 prev_elt->next_index = elt - elts;
353 if (PREDICT_FALSE(i < (chunks_this_buffer-1)))
355 elt->flags = SSVM_BUFFER_NEXT_PRESENT;
356 ASSERT (b0->flags & VLIB_BUFFER_NEXT_PRESENT);
357 b0 = vlib_get_buffer (vm, b0->next_buffer);
362 while (__sync_lock_test_and_set (queue_lock, 1))
365 unix_shared_memory_queue_add_raw (q, (u8 *)&elt_index);
366 CLIB_MEMORY_BARRIER();
374 if (PREDICT_FALSE(n_left))
377 vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_RING_FULL,
379 else if (interface_down)
380 vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_ADMIN_DOWN,
383 vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_NO_BUFFERS,
386 vlib_buffer_free (vm, from, n_left);
389 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
391 if (PREDICT_TRUE(vec_len(em->chunk_cache)))
392 _vec_len(em->chunk_cache) = n_present_in_cache;
397 static void ssvm_eth_clear_hw_interface_counters (u32 instance)
399 /* Nothing for now */
402 static clib_error_t *
403 ssvm_eth_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
405 vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
406 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
407 ssvm_eth_main_t * em = &ssvm_eth_main;
408 ssvm_private_t * intfc = vec_elt_at_index (em->intfcs, hif->dev_instance);
409 ssvm_shared_header_t * sh;
411 /* publish link-state in shared-memory, to discourage buffer-wasting */
413 if (intfc->i_am_master)
414 sh->opaque [MASTER_ADMIN_STATE_INDEX] = (void *) is_up;
416 sh->opaque [SLAVE_ADMIN_STATE_INDEX] = (void *) is_up;
421 static clib_error_t *
422 ssvm_eth_subif_add_del_function (vnet_main_t * vnm,
424 struct vnet_sw_interface_t * st,
427 /* Nothing for now */
432 * Dynamically redirect all pkts from a specific interface
433 * to the specified node
436 ssvm_eth_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
439 ssvm_eth_main_t * em = &ssvm_eth_main;
440 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
441 ssvm_private_t * intfc = pool_elt_at_index (em->intfcs, hw->dev_instance);
443 /* Shut off redirection */
444 if (node_index == ~0)
446 intfc->per_interface_next_index = node_index;
450 intfc->per_interface_next_index =
451 vlib_node_add_next (em->vlib_main, ssvm_eth_input_node.index, node_index);
454 static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
455 vnet_hw_interface_t * hi,
458 /* nothing for now */
462 VNET_DEVICE_CLASS (ssvm_eth_device_class) = {
464 .tx_function = ssvm_eth_interface_tx,
465 .tx_function_n_errors = SSVM_ETH_TX_N_ERROR,
466 .tx_function_error_strings = ssvm_eth_tx_func_error_strings,
467 .format_device_name = format_ssvm_eth_device_name,
468 .format_device = format_ssvm_eth_device,
469 .format_tx_trace = format_ssvm_eth_tx_trace,
470 .clear_counters = ssvm_eth_clear_hw_interface_counters,
471 .admin_up_down_function = ssvm_eth_interface_admin_up_down,
472 .subif_add_del_function = ssvm_eth_subif_add_del_function,
473 .rx_redirect_to_node = ssvm_eth_set_interface_next_node,
474 .no_flatten_output_chains = 1,