Add support for AArch32
[vpp.git] / vnet / vnet / devices / ssvm / node.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "ssvm_eth.h"
16
17 vlib_node_registration_t ssvm_eth_input_node;
18
19 typedef struct {
20   u32 next_index;
21   u32 sw_if_index;
22 } ssvm_eth_input_trace_t;
23
24 /* packet trace format function */
25 static u8 * format_ssvm_eth_input_trace (u8 * s, va_list * args)
26 {
27   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
28   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
29   ssvm_eth_input_trace_t * t = va_arg (*args, ssvm_eth_input_trace_t *);
30   
31   s = format (s, "SSVM_ETH_INPUT: sw_if_index %d, next index %d",
32               t->sw_if_index, t->next_index);
33   return s;
34 }
35
36 vlib_node_registration_t ssvm_eth_input_node;
37
38 #define foreach_ssvm_eth_input_error \
39 _(NO_BUFFERS, "Rx packet drops (no buffers)")
40
41 typedef enum {
42 #define _(sym,str) SSVM_ETH_INPUT_ERROR_##sym,
43   foreach_ssvm_eth_input_error
44 #undef _
45   SSVM_ETH_INPUT_N_ERROR,
46 } ssvm_eth_input_error_t;
47
48 static char * ssvm_eth_input_error_strings[] = {
49 #define _(sym,string) string,
50   foreach_ssvm_eth_input_error
51 #undef _
52 };
53
54 typedef enum {
55   SSVM_ETH_INPUT_NEXT_DROP,
56   SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT,
57   SSVM_ETH_INPUT_NEXT_IP4_INPUT,
58   SSVM_ETH_INPUT_NEXT_IP6_INPUT,
59   SSVM_ETH_INPUT_NEXT_MPLS_INPUT,
60   SSVM_ETH_INPUT_N_NEXT,
61 } ssvm_eth_input_next_t;
62
63 static inline uword 
64 ssvm_eth_device_input (ssvm_eth_main_t * em,
65                        ssvm_private_t * intfc,
66                        vlib_node_runtime_t * node)
67 {
68   ssvm_shared_header_t * sh = intfc->sh;
69   vlib_main_t * vm = em->vlib_main;
70   unix_shared_memory_queue_t * q;
71   ssvm_eth_queue_elt_t * elt, * elts;
72   u32 elt_index;
73   u32 my_pid = intfc->my_pid;
74   int rx_queue_index;
75   u32 n_to_alloc = VLIB_FRAME_SIZE * 2;
76   u32 n_allocated, n_present_in_cache;
77 #if DPDK > 0
78   u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
79 #else
80   u32 next_index = 0;
81 #endif
82   vlib_buffer_free_list_t * fl;
83   u32 n_left_to_next, * to_next;
84   u32 next0;
85   u32 n_buffers;
86   u32 n_available;
87   u32 bi0, saved_bi0;
88   vlib_buffer_t * b0, * prev;
89   u32 saved_cache_size = 0;
90   ethernet_header_t * eh0;
91   u16 type0;
92   u32 n_rx_bytes = 0, l3_offset0;
93   u32 cpu_index = os_get_cpu_number();
94   u32 trace_cnt __attribute__((unused)) = vlib_get_trace_count (vm, node);
95   volatile u32 * lock;
96   u32 * elt_indices;
97   uword n_trace = vlib_get_trace_count (vm, node);
98
99   /* Either side down? buh-bye... */
100   if (pointer_to_uword(sh->opaque [MASTER_ADMIN_STATE_INDEX]) == 0 ||
101       pointer_to_uword(sh->opaque [SLAVE_ADMIN_STATE_INDEX]) == 0)
102     return 0;
103
104   if (intfc->i_am_master)
105     q = (unix_shared_memory_queue_t *)(sh->opaque [TO_MASTER_Q_INDEX]);
106   else
107     q = (unix_shared_memory_queue_t *)(sh->opaque [TO_SLAVE_Q_INDEX]);
108
109   /* Nothing to do? */
110   if (q->cursize == 0)
111     return 0;
112
113   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
114
115   vec_reset_length (intfc->rx_queue);
116   
117   lock = (u32 *) q;
118   while (__sync_lock_test_and_set (lock, 1))
119     ;
120   while (q->cursize > 0)
121     {
122       unix_shared_memory_queue_sub_raw (q, (u8 *)&elt_index);
123       ASSERT(elt_index < 2048);
124       vec_add1 (intfc->rx_queue, elt_index);
125     }
126   CLIB_MEMORY_BARRIER();
127   *lock = 0;
128
129   n_present_in_cache = vec_len (em->buffer_cache);
130
131   if (vec_len (em->buffer_cache) < vec_len (intfc->rx_queue) * 2)
132     {
133       vec_validate (em->buffer_cache, 
134                     n_to_alloc + vec_len (em->buffer_cache) - 1);
135       n_allocated = 
136         vlib_buffer_alloc (vm, &em->buffer_cache [n_present_in_cache], 
137                            n_to_alloc);
138       
139       n_present_in_cache += n_allocated;
140       _vec_len (em->buffer_cache) = n_present_in_cache;
141     }
142
143   elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
144
145   n_buffers = vec_len (intfc->rx_queue);
146   rx_queue_index = 0;
147
148   while (n_buffers > 0)
149     {
150       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
151       
152       while (n_buffers > 0 && n_left_to_next > 0)
153         {
154           elt = elts + intfc->rx_queue[rx_queue_index];
155           
156           saved_cache_size = n_present_in_cache;
157           if (PREDICT_FALSE(saved_cache_size == 0))
158             {
159               vlib_put_next_frame (vm, node, next_index, n_left_to_next);
160               goto out;
161             }
162           saved_bi0 = bi0 = em->buffer_cache [--n_present_in_cache];
163           b0 = vlib_get_buffer (vm, bi0);
164           prev = 0;
165
166           while (1)
167             {
168               vlib_buffer_init_for_free_list (b0, fl);
169               b0->clone_count = 0;
170               
171               b0->current_data = elt->current_data_hint;
172               b0->current_length = elt->length_this_buffer;
173               b0->total_length_not_including_first_buffer =
174                 elt->total_length_not_including_first_buffer;
175               
176               memcpy (b0->data + b0->current_data, elt->data, 
177                       b0->current_length);
178
179               if (PREDICT_FALSE(prev != 0))
180                   prev->next_buffer = bi0;
181
182               if (PREDICT_FALSE(elt->flags & SSVM_BUFFER_NEXT_PRESENT))
183                 {
184                   prev = b0;
185                   if (PREDICT_FALSE(n_present_in_cache == 0))
186                     {
187                       vlib_put_next_frame (vm, node, next_index, 
188                                            n_left_to_next);
189                       goto out;
190                     }
191                   bi0 = em->buffer_cache [--n_present_in_cache];
192                   b0 = vlib_get_buffer (vm, bi0);
193                 }
194               else
195                 break;
196             }
197           
198           saved_cache_size = n_present_in_cache;
199
200           to_next[0] = saved_bi0;
201           to_next++;
202           n_left_to_next--;
203           
204           b0 = vlib_get_buffer (vm, saved_bi0);
205           eh0 = vlib_buffer_get_current (b0);
206
207           type0 = clib_net_to_host_u16 (eh0->type);
208
209           next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
210
211           if (type0 == ETHERNET_TYPE_IP4)
212             next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
213           else if (type0 == ETHERNET_TYPE_IP6)
214             next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
215           else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
216             next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
217           
218           l3_offset0 = ((next0 == SSVM_ETH_INPUT_NEXT_IP4_INPUT ||
219                          next0 == SSVM_ETH_INPUT_NEXT_IP6_INPUT ||
220                          next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ? 
221                         sizeof (ethernet_header_t) : 0);
222           
223           n_rx_bytes += b0->current_length 
224             + b0->total_length_not_including_first_buffer;
225
226           b0->current_data += l3_offset0;
227           b0->current_length -= l3_offset0;
228           b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
229
230           vnet_buffer(b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
231           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
232
233           /*
234            * Turn this on if you run into
235            * "bad monkey" contexts, and you want to know exactly
236            * which nodes they've visited... See main.c...
237            */
238           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
239
240           if (PREDICT_FALSE(n_trace > 0))
241           {
242               ssvm_eth_input_trace_t *tr;
243               
244               vlib_trace_buffer (vm, node, next0,
245                                  b0, /* follow_chain */ 1);
246               vlib_set_trace_count (vm, node, --n_trace);
247
248               tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
249
250               tr->next_index = next0;
251               tr->sw_if_index = intfc->vlib_hw_if_index;
252           }
253
254           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
255                                            to_next, n_left_to_next,
256                                            bi0, next0);
257           n_buffers--;
258           rx_queue_index++;
259         }
260
261       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
262     }
263   
264  out:
265   if (em->buffer_cache)
266     _vec_len (em->buffer_cache) = saved_cache_size;
267   else
268     ASSERT (saved_cache_size == 0);
269
270   ssvm_lock (sh, my_pid, 2);
271
272   ASSERT(vec_len(intfc->rx_queue) > 0);
273
274   n_available = (u32)pointer_to_uword(sh->opaque[CHUNK_POOL_NFREE]);
275   elt_indices = (u32 *)(sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
276
277   memcpy (&elt_indices[n_available], intfc->rx_queue, 
278           vec_len (intfc->rx_queue) * sizeof (u32));
279
280   n_available += vec_len (intfc->rx_queue);
281   sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer(n_available, void* );
282
283   ssvm_unlock (sh);
284
285   vlib_error_count (vm, node->node_index, SSVM_ETH_INPUT_ERROR_NO_BUFFERS,
286                     n_buffers);
287
288   vlib_increment_combined_counter 
289     (vnet_get_main()->interface_main.combined_sw_if_counters
290      + VNET_INTERFACE_COUNTER_RX, cpu_index, 
291      intfc->vlib_hw_if_index,
292      rx_queue_index, n_rx_bytes);
293
294   return rx_queue_index;
295 }
296                                            
297 static uword
298 ssvm_eth_input_node_fn (vlib_main_t * vm,
299                   vlib_node_runtime_t * node,
300                   vlib_frame_t * frame)
301 {
302   ssvm_eth_main_t * em = &ssvm_eth_main;
303   ssvm_private_t * intfc;
304   uword n_rx_packets = 0;
305
306   vec_foreach (intfc, em->intfcs)
307     {
308       n_rx_packets += ssvm_eth_device_input (em, intfc, node);
309     }
310
311   return n_rx_packets;
312 }
313
314 VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
315   .function = ssvm_eth_input_node_fn,
316   .name = "ssvm_eth_input",
317   .vector_size = sizeof (u32),
318   .format_trace = format_ssvm_eth_input_trace,
319   .type = VLIB_NODE_TYPE_INPUT,
320   .state = VLIB_NODE_STATE_DISABLED,
321   
322   .n_errors = ARRAY_LEN(ssvm_eth_input_error_strings),
323   .error_strings = ssvm_eth_input_error_strings,
324
325   .n_next_nodes = SSVM_ETH_INPUT_N_NEXT,
326
327   /* edit / add dispositions here */
328   .next_nodes = {
329         [SSVM_ETH_INPUT_NEXT_DROP] = "error-drop",
330         [SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
331         [SSVM_ETH_INPUT_NEXT_IP4_INPUT] = "ip4-input",
332         [SSVM_ETH_INPUT_NEXT_IP6_INPUT] = "ip6-input",
333         [SSVM_ETH_INPUT_NEXT_MPLS_INPUT] = "mpls-gre-input",
334   },
335 };
336