2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/netmap/netmap.h>
24 #define foreach_netmap_input_error
27 #define _(f,s) NETMAP_INPUT_ERROR_##f,
28 foreach_netmap_input_error
31 } netmap_input_error_t;
33 static char * netmap_input_error_strings[] = {
35 foreach_netmap_input_error
40 NETMAP_INPUT_NEXT_DROP,
41 NETMAP_INPUT_NEXT_ETHERNET_INPUT,
48 struct netmap_slot slot;
49 } netmap_input_trace_t;
51 static u8 * format_netmap_input_trace (u8 * s, va_list * args)
53 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
54 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
55 netmap_input_trace_t * t = va_arg (*args, netmap_input_trace_t *);
56 uword indent = format_get_indent (s);
58 s = format (s, "netmap: hw_if_index %d next-index %d",
59 t->hw_if_index, t->next_index);
60 s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u",
61 format_white_space, indent + 2,
62 t->slot.flags, t->slot.len, t->slot.buf_idx);
67 buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
69 vlib_buffer_t * b = vlib_get_buffer (vm, bi);
70 vlib_buffer_t * first_b = vlib_get_buffer (vm, first_bi);
71 vlib_buffer_t * prev_b = vlib_get_buffer (vm, prev_bi);
73 /* update first buffer */
74 first_b->total_length_not_including_first_buffer += b->current_length;
76 /* update previous buffer */
77 prev_b->next_buffer = bi;
78 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
80 /* update current buffer */
84 struct rte_mbuf * mbuf = rte_mbuf_from_vlib_buffer(b);
85 struct rte_mbuf * first_mbuf = rte_mbuf_from_vlib_buffer(first_b);
86 struct rte_mbuf * prev_mbuf = rte_mbuf_from_vlib_buffer(prev_b);
87 first_mbuf->nb_segs++;
88 prev_mbuf->next = mbuf;
89 mbuf->data_len = b->current_length;
90 mbuf->data_off = RTE_PKTMBUF_HEADROOM + b->current_data;
96 netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
97 vlib_frame_t * frame, u32 device_idx)
99 u32 next_index = NETMAP_INPUT_NEXT_ETHERNET_INPUT;
100 uword n_trace = vlib_get_trace_count (vm, node);
101 netmap_main_t * nm = &netmap_main;
102 netmap_if_t * nif = pool_elt_at_index(nm->interfaces, device_idx);
103 u32 n_rx_packets = 0;
107 struct netmap_ring * ring;
109 u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
110 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
112 if (nif->per_interface_next_index != ~0)
113 next_index = nif->per_interface_next_index;
115 n_free_bufs = vec_len (nm->rx_buffers);
116 if (PREDICT_FALSE(n_free_bufs < VLIB_FRAME_SIZE))
118 vec_validate(nm->rx_buffers, VLIB_FRAME_SIZE + n_free_bufs - 1);
119 n_free_bufs += vlib_buffer_alloc(vm, &nm->rx_buffers[n_free_bufs], VLIB_FRAME_SIZE);
120 _vec_len (nm->rx_buffers) = n_free_bufs;
123 cur_ring = nif->first_rx_ring;
124 while (cur_ring <= nif->last_rx_ring && n_free_bufs)
128 ring = NETMAP_RXRING(nif->nifp, cur_ring);
129 r = nm_ring_space(ring);
140 cur_slot_index = ring->cur;
144 u32 next0 = next_index;
145 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
147 while (r && n_left_to_next)
149 vlib_buffer_t * b0, * first_b0 = 0;
151 u32 bi0 = 0, first_bi0 = 0, prev_bi0;
152 u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
153 u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
154 struct netmap_slot * slot = &ring->slot[cur_slot_index];
155 u32 data_len = slot->len;
157 /* prefetch 2 slots in advance */
158 CLIB_PREFETCH (&ring->slot[next2_slot_index], CLIB_CACHE_LINE_BYTES, LOAD);
159 /* prefetch start of next packet */
160 CLIB_PREFETCH (NETMAP_BUF(ring, ring->slot[next_slot_index].buf_idx),
161 CLIB_CACHE_LINE_BYTES, LOAD);
163 while (data_len && n_free_bufs)
165 /* grab free buffer */
166 u32 last_empty_buffer = vec_len (nm->rx_buffers) - 1;
168 bi0 = nm->rx_buffers[last_empty_buffer];
169 b0 = vlib_get_buffer (vm, bi0);
170 _vec_len (nm->rx_buffers) = last_empty_buffer;
174 u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
175 b0->current_data = 0;
176 clib_memcpy (vlib_buffer_get_current (b0),
177 (u8 *) NETMAP_BUF(ring, slot->buf_idx) + offset,
180 /* fill buffer header */
182 b0->current_length = bytes_to_copy;
187 struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(b0);
188 rte_pktmbuf_data_len (mb) = b0->current_length;
189 rte_pktmbuf_pkt_len (mb) = b0->current_length;
191 b0->total_length_not_including_first_buffer = 0;
192 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
193 vnet_buffer(b0)->sw_if_index[VLIB_RX] = nif->sw_if_index;
194 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
196 first_b0 = vlib_get_buffer(vm, first_bi0);
199 buffer_add_to_chain(vm, bi0, first_bi0, prev_bi0);
201 offset += bytes_to_copy;
202 data_len -= bytes_to_copy;
206 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(first_b0);
207 if (PREDICT_FALSE(n_trace > 0))
209 netmap_input_trace_t *tr;
210 vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */ 0);
211 vlib_set_trace_count (vm, node, --n_trace);
212 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
213 tr->next_index = next0;
214 tr->hw_if_index = nif->hw_if_index;
215 memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
217 /* enque and take next packet */
218 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
219 n_left_to_next, first_bi0, next0);
223 n_rx_bytes += slot->len;
224 to_next[0] = first_bi0;
227 cur_slot_index = next_slot_index;
231 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
233 ring->head = ring->cur = cur_slot_index;
238 ioctl(nif->fd, NIOCTXSYNC, NULL);
240 vlib_increment_combined_counter
241 (vnet_get_main()->interface_main.combined_sw_if_counters
242 + VNET_INTERFACE_COUNTER_RX,
245 n_rx_packets, n_rx_bytes);
251 netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
252 vlib_frame_t * frame)
255 u32 n_rx_packets = 0;
257 netmap_main_t * nm = &netmap_main;
259 clib_bitmap_foreach (i, nm->pending_input_bitmap,
261 clib_bitmap_set (nm->pending_input_bitmap, i, 0);
262 n_rx_packets += netmap_device_input_fn(vm, node, frame, i);
269 VLIB_REGISTER_NODE (netmap_input_node) = {
270 .function = netmap_input_fn,
271 .name = "netmap-input",
272 .format_trace = format_netmap_input_trace,
273 .type = VLIB_NODE_TYPE_INPUT,
274 .state = VLIB_NODE_STATE_INTERRUPT,
275 .n_errors = NETMAP_INPUT_N_ERROR,
276 .error_strings = netmap_input_error_strings,
278 .n_next_nodes = NETMAP_INPUT_N_NEXT,
280 [NETMAP_INPUT_NEXT_DROP] = "error-drop",
281 [NETMAP_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",