4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls.h>
21 #include <vnet/feature/feature.h>
22 #include <vnet/mpls/mpls.api_enum.h>
26 u32 label_net_byte_order;
29 #define foreach_mpls_input_next \
30 _(DROP, "error-drop") \
31 _(LOOKUP, "mpls-lookup")
34 #define _(s,n) MPLS_INPUT_NEXT_##s,
35 foreach_mpls_input_next
41 format_mpls_input_trace (u8 * s, va_list * args)
43 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
44 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
45 mpls_input_trace_t * t = va_arg (*args, mpls_input_trace_t *);
49 label = clib_net_to_host_u32(t->label_net_byte_order);
51 #define _(a,b) if (t->next_index == MPLS_INPUT_NEXT_##a) next_name = b;
52 foreach_mpls_input_next;
55 s = format (s, "MPLS: next %s[%d] label %d ttl %d exp %d",
56 next_name, t->next_index,
57 vnet_mpls_uc_get_label(label),
58 vnet_mpls_uc_get_ttl(label),
59 vnet_mpls_uc_get_exp(label));
66 u32 last_inner_fib_index;
67 u32 last_outer_fib_index;
68 mpls_main_t * mpls_main;
69 } mpls_input_runtime_t;
72 mpls_input_inline (vlib_main_t * vm,
73 vlib_node_runtime_t * node,
74 vlib_frame_t * from_frame)
76 u32 n_left_from, next_index, * from, * to_next;
77 mpls_main_t * mm = &mpls_main;
78 u32 thread_index = vlib_get_thread_index();
79 vlib_simple_counter_main_t * cm;
80 vnet_main_t * vnm = vnet_get_main();
82 from = vlib_frame_vector_args (from_frame);
83 n_left_from = from_frame->n_vectors;
85 next_index = node->cached_next_index;
87 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
88 VNET_INTERFACE_COUNTER_MPLS);
90 while (n_left_from > 0)
94 vlib_get_next_frame (vm, node, next_index,
95 to_next, n_left_to_next);
97 while (n_left_from >= 4 && n_left_to_next >= 2)
99 u32 bi0, next0, sw_if_index0;
100 u32 bi1, next1, sw_if_index1;
101 vlib_buffer_t *b0, *b1;
104 /* Prefetch next iteration. */
106 vlib_buffer_t * p2, * p3;
108 p2 = vlib_get_buffer (vm, from[2]);
109 p3 = vlib_get_buffer (vm, from[3]);
111 vlib_prefetch_buffer_header (p2, LOAD);
112 vlib_prefetch_buffer_header (p3, LOAD);
114 CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
115 CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
118 bi0 = to_next[0] = from[0];
119 bi1 = to_next[1] = from[1];
126 b0 = vlib_get_buffer (vm, bi0);
127 b1 = vlib_get_buffer (vm, bi1);
129 h0 = vlib_buffer_get_current (b0);
130 h1 = vlib_buffer_get_current (b1);
132 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
133 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
136 if (PREDICT_FALSE(h0[3] == 0))
138 next0 = MPLS_INPUT_NEXT_DROP;
139 b0->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
143 next0 = MPLS_INPUT_NEXT_LOOKUP;
144 vnet_feature_arc_start(mm->input_feature_arc_index,
145 sw_if_index0, &next0, b0);
146 vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
149 if (PREDICT_FALSE(h1[3] == 0))
151 next1 = MPLS_INPUT_NEXT_DROP;
152 b1->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
156 next1 = MPLS_INPUT_NEXT_LOOKUP;
157 vnet_feature_arc_start(mm->input_feature_arc_index,
158 sw_if_index1, &next1, b1);
159 vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1);
162 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
164 mpls_input_trace_t *tr = vlib_add_trace (vm, node,
166 tr->next_index = next0;
167 tr->label_net_byte_order = *((u32*)h0);
169 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
171 mpls_input_trace_t *tr = vlib_add_trace (vm, node,
173 tr->next_index = next1;
174 tr->label_net_byte_order = *((u32*)h1);
177 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
178 to_next, n_left_to_next,
183 while (n_left_from > 0 && n_left_to_next > 0)
185 u32 sw_if_index0, next0, bi0;
196 b0 = vlib_get_buffer (vm, bi0);
197 h0 = vlib_buffer_get_current (b0);
198 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
201 if (PREDICT_FALSE(h0[3] == 0))
203 next0 = MPLS_INPUT_NEXT_DROP;
204 b0->error = node->errors[MPLS_ERROR_TTL_EXPIRED];
208 next0 = MPLS_INPUT_NEXT_LOOKUP;
209 vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0);
210 vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
213 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
215 mpls_input_trace_t *tr = vlib_add_trace (vm, node,
217 tr->next_index = next0;
218 tr->label_net_byte_order = *(u32*)h0;
221 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
222 to_next, n_left_to_next,
226 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
228 vlib_node_increment_counter (vm, mpls_input_node.index,
229 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
230 return from_frame->n_vectors;
233 VLIB_NODE_FN (mpls_input_node) (vlib_main_t * vm,
234 vlib_node_runtime_t * node,
235 vlib_frame_t * from_frame)
237 return mpls_input_inline (vm, node, from_frame);
240 VLIB_REGISTER_NODE (mpls_input_node) = {
241 .name = "mpls-input",
242 /* Takes a vector of packets. */
243 .vector_size = sizeof (u32),
245 .runtime_data_bytes = sizeof(mpls_input_runtime_t),
247 .n_errors = MPLS_N_ERROR,
248 .error_counters = mpls_error_counters,
250 .n_next_nodes = MPLS_INPUT_N_NEXT,
252 #define _(s,n) [MPLS_INPUT_NEXT_##s] = n,
253 foreach_mpls_input_next
257 .format_buffer = format_mpls_unicast_header_net_byte_order,
258 .format_trace = format_mpls_input_trace,
261 #ifndef CLIB_MARCH_VARIANT
263 mpls_setup_nodes (vlib_main_t * vm)
267 pn = pg_get_node (mpls_input_node.index);
268 pn->unformat_edit = unformat_pg_mpls_header;
270 ethernet_register_input_type (vm, ETHERNET_TYPE_MPLS,
271 mpls_input_node.index);
274 static clib_error_t * mpls_input_init (vlib_main_t * vm)
276 mpls_setup_nodes (vm);
281 VLIB_INIT_FUNCTION (mpls_input_init) =
283 .runs_after = VLIB_INITS("mpls_init"),
285 #endif /* CLIB_MARCH_VARIANT */