2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 #include <sys/ioctl.h>
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/devices/netmap/net_netmap.h>
27 #include <vnet/devices/netmap/netmap.h>
29 #define foreach_netmap_input_error
32 #define _(f,s) NETMAP_INPUT_ERROR_##f,
33 foreach_netmap_input_error
36 } netmap_input_error_t;
38 static char * netmap_input_error_strings[] = {
40 foreach_netmap_input_error
45 NETMAP_INPUT_NEXT_DROP,
46 NETMAP_INPUT_NEXT_ETHERNET_INPUT,
53 struct netmap_slot slot;
54 } netmap_input_trace_t;
56 static u8 * format_netmap_input_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 netmap_input_trace_t * t = va_arg (*args, netmap_input_trace_t *);
61 uword indent = format_get_indent (s);
63 s = format (s, "netmap: hw_if_index %d next-index %d",
64 t->hw_if_index, t->next_index);
65 s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u",
66 format_white_space, indent + 2,
67 t->slot.flags, t->slot.len, t->slot.buf_idx);
72 buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
74 vlib_buffer_t * b = vlib_get_buffer (vm, bi);
75 vlib_buffer_t * first_b = vlib_get_buffer (vm, first_bi);
76 vlib_buffer_t * prev_b = vlib_get_buffer (vm, prev_bi);
78 /* update first buffer */
79 first_b->total_length_not_including_first_buffer += b->current_length;
81 /* update previous buffer */
82 prev_b->next_buffer = bi;
83 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
85 /* update current buffer */
89 struct rte_mbuf * mbuf = rte_mbuf_from_vlib_buffer(b);
90 struct rte_mbuf * first_mbuf = rte_mbuf_from_vlib_buffer(first_b);
91 struct rte_mbuf * prev_mbuf = rte_mbuf_from_vlib_buffer(prev_b);
92 first_mbuf->nb_segs++;
93 prev_mbuf->next = mbuf;
94 mbuf->data_len = b->current_length;
95 mbuf->data_off = RTE_PKTMBUF_HEADROOM + b->current_data;
101 netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
102 vlib_frame_t * frame, netmap_if_t * nif)
104 u32 next_index = NETMAP_INPUT_NEXT_ETHERNET_INPUT;
105 uword n_trace = vlib_get_trace_count (vm, node);
106 netmap_main_t * nm = &netmap_main;
107 u32 n_rx_packets = 0;
111 struct netmap_ring * ring;
113 u32 cpu_index = os_get_cpu_number();
114 u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
115 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
117 if (nif->per_interface_next_index != ~0)
118 next_index = nif->per_interface_next_index;
120 n_free_bufs = vec_len (nm->rx_buffers[cpu_index]);
121 if (PREDICT_FALSE(n_free_bufs < VLIB_FRAME_SIZE))
123 vec_validate(nm->rx_buffers[cpu_index], VLIB_FRAME_SIZE + n_free_bufs - 1);
124 n_free_bufs += vlib_buffer_alloc(vm, &nm->rx_buffers[cpu_index][n_free_bufs], VLIB_FRAME_SIZE);
125 _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs;
128 cur_ring = nif->first_rx_ring;
129 while (cur_ring <= nif->last_rx_ring && n_free_bufs)
133 ring = NETMAP_RXRING(nif->nifp, cur_ring);
134 r = nm_ring_space(ring);
145 cur_slot_index = ring->cur;
149 u32 next0 = next_index;
150 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
152 while (r && n_left_to_next)
154 vlib_buffer_t * b0, * first_b0 = 0;
156 u32 bi0 = 0, first_bi0 = 0, prev_bi0;
157 u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
158 u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
159 struct netmap_slot * slot = &ring->slot[cur_slot_index];
160 u32 data_len = slot->len;
162 /* prefetch 2 slots in advance */
163 CLIB_PREFETCH (&ring->slot[next2_slot_index], CLIB_CACHE_LINE_BYTES, LOAD);
164 /* prefetch start of next packet */
165 CLIB_PREFETCH (NETMAP_BUF(ring, ring->slot[next_slot_index].buf_idx),
166 CLIB_CACHE_LINE_BYTES, LOAD);
168 while (data_len && n_free_bufs)
170 /* grab free buffer */
171 u32 last_empty_buffer = vec_len (nm->rx_buffers[cpu_index]) - 1;
173 bi0 = nm->rx_buffers[cpu_index][last_empty_buffer];
174 b0 = vlib_get_buffer (vm, bi0);
175 _vec_len (nm->rx_buffers[cpu_index]) = last_empty_buffer;
179 u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
180 b0->current_data = 0;
181 clib_memcpy (vlib_buffer_get_current (b0),
182 (u8 *) NETMAP_BUF(ring, slot->buf_idx) + offset,
185 /* fill buffer header */
186 b0->current_length = bytes_to_copy;
191 struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(b0);
192 rte_pktmbuf_data_len (mb) = b0->current_length;
193 rte_pktmbuf_pkt_len (mb) = b0->current_length;
195 b0->total_length_not_including_first_buffer = 0;
196 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
197 vnet_buffer(b0)->sw_if_index[VLIB_RX] = nif->sw_if_index;
198 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
200 first_b0 = vlib_get_buffer(vm, first_bi0);
203 buffer_add_to_chain(vm, bi0, first_bi0, prev_bi0);
205 offset += bytes_to_copy;
206 data_len -= bytes_to_copy;
210 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(first_b0);
211 if (PREDICT_FALSE(n_trace > 0))
213 if (PREDICT_TRUE(first_b0 != 0))
215 netmap_input_trace_t *tr;
216 vlib_trace_buffer (vm, node, next0, first_b0,
217 /* follow_chain */ 0);
218 vlib_set_trace_count (vm, node, --n_trace);
219 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
220 tr->next_index = next0;
221 tr->hw_if_index = nif->hw_if_index;
222 memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
225 /* enque and take next packet */
226 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
227 n_left_to_next, first_bi0, next0);
231 n_rx_bytes += slot->len;
232 to_next[0] = first_bi0;
235 cur_slot_index = next_slot_index;
239 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
241 ring->head = ring->cur = cur_slot_index;
246 ioctl(nif->fd, NIOCRXSYNC, NULL);
248 vlib_increment_combined_counter
249 (vnet_get_main()->interface_main.combined_sw_if_counters
250 + VNET_INTERFACE_COUNTER_RX,
253 n_rx_packets, n_rx_bytes);
259 netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
260 vlib_frame_t * frame)
263 u32 n_rx_packets = 0;
264 u32 cpu_index = os_get_cpu_number();
265 netmap_main_t * nm = &netmap_main;
268 for(i = 0; i < vec_len(nm->interfaces); i++ )
270 nmi = vec_elt_at_index(nm->interfaces, i);
271 if (nmi->is_admin_up &&
272 (i % nm->input_cpu_count) == (cpu_index - nm->input_cpu_first_index))
273 n_rx_packets += netmap_device_input_fn(vm, node, frame, nmi);
279 VLIB_REGISTER_NODE (netmap_input_node) = {
280 .function = netmap_input_fn,
281 .name = "netmap-input",
282 .format_trace = format_netmap_input_trace,
283 .type = VLIB_NODE_TYPE_INPUT,
284 /* default state is INTERRUPT mode, switch to POLLING if worker threads are enabled */
285 .state = VLIB_NODE_STATE_INTERRUPT,
286 .n_errors = NETMAP_INPUT_N_ERROR,
287 .error_strings = netmap_input_error_strings,
289 .n_next_nodes = NETMAP_INPUT_N_NEXT,
291 [NETMAP_INPUT_NEXT_DROP] = "error-drop",
292 [NETMAP_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
296 VLIB_NODE_FUNCTION_MULTIARCH (netmap_input_node, netmap_input_fn)