2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <sys/ioctl.h>
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
29 #include <marvell/pp2/pp2.h>
31 #define foreach_mrvl_pp2_input_error \
32 _(PPIO_RECV, "pp2_ppio_recv error") \
33 _(BPOOL_GET_NUM_BUFFS, "pp2_bpool_get_num_buffs error") \
34 _(BPOOL_PUT_BUFFS, "pp2_bpool_put_buffs error") \
35 _(BUFFER_ALLOC, "buffer alloc error") \
36 _(MAC_CE, "MAC error (CRC error)") \
37 _(MAC_OR, "overrun error") \
38 _(MAC_RSVD, "unknown MAC error") \
39 _(MAC_RE, "resource error") \
40 _(IP_HDR, "ip4 header error")
44 #define _(f,s) MRVL_PP2_INPUT_ERROR_##f,
45 foreach_mrvl_pp2_input_error
47 MRVL_PP2_INPUT_N_ERROR,
48 } mrvl_pp2_input_error_t;
50 static __clib_unused char *mrvl_pp2_input_error_strings[] = {
52 foreach_mrvl_pp2_input_error
56 static_always_inline void
57 mrvl_pp2_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next0,
58 vlib_buffer_t * b0, uword * n_trace,
59 mrvl_pp2_if_t * ppif, struct pp2_ppio_desc *d)
61 mrvl_pp2_input_trace_t *tr;
62 vlib_trace_buffer (vm, node, next0, b0,
63 /* follow_chain */ 0);
64 vlib_set_trace_count (vm, node, --(*n_trace));
65 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
66 tr->next_index = next0;
67 tr->hw_if_index = ppif->hw_if_index;
68 clib_memcpy_fast (&tr->desc, d, sizeof (struct pp2_ppio_desc));
71 static_always_inline u16
72 mrvl_pp2_set_buf_data_len_flags (vlib_buffer_t * b, struct pp2_ppio_desc *d,
76 len = pp2_ppio_inq_desc_get_pkt_len (d);
77 b->total_length_not_including_first_buffer = 0;
78 b->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | add_flags;
80 if (add_flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID)
81 vnet_buffer (b)->l2_hdr_offset = 2;
83 if (add_flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID)
85 u16 offset = DM_RXD_GET_L3_OFF (d);
86 vnet_buffer (b)->l3_hdr_offset = offset;
87 b->current_data = offset;
88 b->current_length = len - offset + 2;
93 b->current_length = len;
96 if (add_flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID)
97 vnet_buffer (b)->l4_hdr_offset = vnet_buffer (b)->l3_hdr_offset +
98 DM_RXD_GET_IPHDR_LEN (d) * 4;
103 static_always_inline u16
104 mrvl_pp2_next_from_desc (vlib_node_runtime_t * node, struct pp2_ppio_desc * d,
105 vlib_buffer_t * b, u32 * next)
108 /* ES bit set means MAC error - drop and count */
109 if (PREDICT_FALSE (DM_RXD_GET_ES (d)))
111 *next = VNET_DEVICE_INPUT_NEXT_DROP;
112 u8 ec = DM_RXD_GET_EC (d);
114 b->error = node->errors[MRVL_PP2_INPUT_ERROR_MAC_CE];
116 b->error = node->errors[MRVL_PP2_INPUT_ERROR_MAC_OR];
118 b->error = node->errors[MRVL_PP2_INPUT_ERROR_MAC_RSVD];
120 b->error = node->errors[MRVL_PP2_INPUT_ERROR_MAC_RE];
121 return mrvl_pp2_set_buf_data_len_flags (b, d, 0);
123 l3_info = DM_RXD_GET_L3_PRS_INFO (d);
125 /* ipv4 packet can be value 1, 2 or 3 */
126 if (PREDICT_TRUE ((l3_info - 1) < 3))
128 if (PREDICT_FALSE (DM_RXD_GET_L3_IP4_HDR_ERR (d) != 0))
130 *next = VNET_DEVICE_INPUT_NEXT_DROP;
131 b->error = node->errors[MRVL_PP2_INPUT_ERROR_IP_HDR];
132 return mrvl_pp2_set_buf_data_len_flags (b, d, 0);
134 *next = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
135 return mrvl_pp2_set_buf_data_len_flags
137 VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
138 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
139 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4);
142 /* ipv4 packet can be value 4 or 5 */
143 if (PREDICT_TRUE ((l3_info - 4) < 2))
145 *next = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
146 return mrvl_pp2_set_buf_data_len_flags
148 VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
149 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
150 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6);
153 *next = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
154 return mrvl_pp2_set_buf_data_len_flags (b, d,
155 VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
158 static_always_inline uword
159 mrvl_pp2_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
160 vlib_frame_t * frame, mrvl_pp2_if_t * ppif,
163 vnet_main_t *vnm = vnet_get_main ();
164 mrvl_pp2_main_t *ppm = &mrvl_pp2_main;
165 u32 thread_index = vm->thread_index;
166 mrvl_pp2_inq_t *inq = vec_elt_at_index (ppif->inqs, qid);
167 uword n_trace = vlib_get_trace_count (vm, node);
168 mrvl_pp2_per_thread_data_t *ptd =
169 vec_elt_at_index (ppm->per_thread_data, thread_index);
170 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
171 u32 sw_if_index[VLIB_N_RX_TX];
172 u32 n_rx_packets = 0;
175 struct pp2_ppio_desc *d;
176 u16 n_desc = VLIB_FRAME_SIZE;
181 vec_validate_aligned (ptd->descs, n_desc, CLIB_CACHE_LINE_BYTES);
182 if (PREDICT_FALSE (pp2_ppio_recv (ppif->ppio, 0, qid, ptd->descs, &n_desc)))
184 vlib_error_count (vm, node->node_index, MRVL_PP2_INPUT_ERROR_PPIO_RECV,
188 n_rx_packets = n_desc;
190 for (i = 0; i < n_desc; i++)
191 ptd->buffers[i] = pp2_ppio_inq_desc_get_cookie (&ptd->descs[i]);
194 buffers = ptd->buffers;
195 sw_if_index[VLIB_RX] = ppif->sw_if_index;
196 sw_if_index[VLIB_TX] = (u32) ~ 0;
200 vlib_buffer_t *b0, *b1;
203 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
204 while (n_desc >= 4 && n_left_to_next >= 2)
211 b0 = vlib_get_buffer (vm, bi0);
212 b1 = vlib_get_buffer (vm, bi1);
214 if (PREDICT_TRUE (ppif->per_interface_next_index == ~0))
216 n_rx_bytes += mrvl_pp2_next_from_desc (node, d, b0, &next0);
217 n_rx_bytes += mrvl_pp2_next_from_desc (node, d + 1, b1, &next1);
218 vnet_feature_start_device_input_x2 (ppif->sw_if_index, &next0,
223 n_rx_bytes += mrvl_pp2_set_buf_data_len_flags (b0, d, 0);
224 n_rx_bytes += mrvl_pp2_set_buf_data_len_flags (b1, d + 1, 0);
225 next0 = next1 = ppif->per_interface_next_index;
228 clib_memcpy_fast (vnet_buffer (b0)->sw_if_index, sw_if_index,
229 sizeof (sw_if_index));
230 clib_memcpy_fast (vnet_buffer (b1)->sw_if_index, sw_if_index,
231 sizeof (sw_if_index));
233 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
234 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
236 if (PREDICT_FALSE (n_trace > 0))
238 mrvl_pp2_input_trace (vm, node, next0, b0, &n_trace, ppif, d);
240 mrvl_pp2_input_trace (vm, node, next1, b1, &n_trace, ppif,
251 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
252 n_left_to_next, bi0, bi1, next0,
256 while (n_desc && n_left_to_next)
258 u32 bi0 = buffers[0];
260 b0 = vlib_get_buffer (vm, bi0);
262 if (PREDICT_TRUE (ppif->per_interface_next_index == ~0))
264 n_rx_bytes += mrvl_pp2_next_from_desc (node, d, b0, &next0);
265 vnet_feature_start_device_input_x1 (ppif->sw_if_index, &next0,
270 n_rx_bytes += mrvl_pp2_set_buf_data_len_flags (b0, d, 0);
271 next0 = ppif->per_interface_next_index;
274 clib_memcpy_fast (vnet_buffer (b0)->sw_if_index, sw_if_index,
275 sizeof (sw_if_index));
277 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
279 if (PREDICT_FALSE (n_trace > 0))
280 mrvl_pp2_input_trace (vm, node, next0, b0, &n_trace, ppif, d);
289 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
290 n_left_to_next, bi0, next0);
292 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
294 vlib_increment_combined_counter (vnm->
295 interface_main.combined_sw_if_counters +
296 VNET_INTERFACE_COUNTER_RX, thread_index,
297 ppif->hw_if_index, n_rx_packets,
300 if (PREDICT_FALSE (pp2_bpool_get_num_buffs (inq->bpool, &n_bufs)))
302 vlib_error_count (vm, node->node_index,
303 MRVL_PP2_INPUT_ERROR_BPOOL_GET_NUM_BUFFS, 1);
307 n_bufs = inq->size - n_bufs;
308 while (n_bufs >= MRVL_PP2_BUFF_BATCH_SZ)
311 struct buff_release_entry *e = ptd->bre;
312 u32 *buffers = ptd->buffers;
314 n_alloc = vlib_buffer_alloc (vm, ptd->buffers, MRVL_PP2_BUFF_BATCH_SZ);
317 if (PREDICT_FALSE (n_alloc == 0))
319 vlib_error_count (vm, node->node_index,
320 MRVL_PP2_INPUT_ERROR_BUFFER_ALLOC, 1);
327 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
328 e->buff.addr = vlib_buffer_get_pa (vm, b) - 64;
330 e->bpool = inq->bpool;
336 if (PREDICT_FALSE (pp2_bpool_put_buffs (ptd->hif, ptd->bre, &i)))
338 vlib_error_count (vm, node->node_index,
339 MRVL_PP2_INPUT_ERROR_BPOOL_PUT_BUFFS, 1);
340 vlib_buffer_free (vm, ptd->buffers, n_alloc);
344 if (PREDICT_FALSE (i != n_alloc))
345 vlib_buffer_free (vm, ptd->buffers + i, n_alloc - i);
355 mrvl_pp2_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
356 vlib_frame_t * frame)
359 mrvl_pp2_main_t *ppm = &mrvl_pp2_main;
360 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
361 vnet_device_and_queue_t *dq;
363 foreach_device_and_queue (dq, rt->devices_and_queues)
366 ppif = vec_elt_at_index (ppm->interfaces, dq->dev_instance);
367 if (ppif->flags & MRVL_PP2_IF_F_ADMIN_UP)
368 n_rx += mrvl_pp2_device_input_inline (vm, node, frame, ppif,
375 VLIB_REGISTER_NODE (mrvl_pp2_input_node) = {
376 .function = mrvl_pp2_input_fn,
377 .name = "mrvl-pp2-input",
378 .sibling_of = "device-input",
379 .format_trace = format_mrvl_pp2_input_trace,
380 .type = VLIB_NODE_TYPE_INPUT,
381 .state = VLIB_NODE_STATE_POLLING,
382 .n_errors = MRVL_PP2_INPUT_N_ERROR,
383 .error_strings = mrvl_pp2_input_error_strings,
390 * fd.io coding-style-patch-verification: ON
393 * eval: (c-set-style "gnu")