+/**
+ * @brief Packet trace structure
+ */
+typedef struct mpls_tunnel_trace_t_
+{
+ /**
+ * Tunnel-id / index in tunnel vector
+ */
+ u32 tunnel_id;
+} mpls_tunnel_trace_t;
+
+static u8 *
+format_mpls_tunnel_tx_trace (u8 * s,
+ va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
+
+ s = format (s, "MPLS: tunnel %d", t->tunnel_id);
+ return s;
+}
+
+typedef enum
+{
+ MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN,
+ MPLS_TUNNEL_ENCAP_N_NEXT,
+} mpls_tunnel_encap_next_t;
+
+/**
+ * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
+ */
+VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 *from = vlib_frame_vector_args (frame);
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ u32 n_left;
+
+ n_left = frame->n_vectors;
+ b = bufs;
+ next = nexts;
+
+ vlib_get_buffers (vm, from, bufs, n_left);
+
+ while (n_left > 2)
+ {
+ const mpls_tunnel_t *mt0, *mt1;
+ u32 sw_if_index0, sw_if_index1;
+
+ sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX];
+
+ mt0 = pool_elt_at_index(mpls_tunnel_pool,
+ mpls_tunnel_db[sw_if_index0]);
+ mt1 = pool_elt_at_index(mpls_tunnel_pool,
+ mpls_tunnel_db[sw_if_index1]);
+
+ vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
+ vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index;
+ next[0] = mt0->mt_l2_lb.dpoi_next_node;
+ next[1] = mt1->mt_l2_lb.dpoi_next_node;
+
+ /* since we are coming out of the L2 world, where the vlib_buffer
+ * union is used for other things, make sure it is clean for
+ * MPLS from now on.
+ */
+ vnet_buffer(b[0])->mpls.first = 0;
+ vnet_buffer(b[1])->mpls.first = 0;
+
+ if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
+ b[0], sizeof (*tr));
+ tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
+ }
+ if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
+ b[1], sizeof (*tr));
+ tr->tunnel_id = mpls_tunnel_db[sw_if_index1];
+ }
+
+ b += 2;
+ n_left -= 2;
+ next += 2;
+ }
+ while (n_left)
+ {
+ const mpls_tunnel_t *mt0;
+ u32 sw_if_index0;
+
+ sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
+ mt0 = pool_elt_at_index(mpls_tunnel_pool,
+ mpls_tunnel_db[sw_if_index0]);
+
+ vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
+ next[0] = mt0->mt_l2_lb.dpoi_next_node;
+
+ /* since we are coming out of the L2 world, where the vlib_buffer
+ * union is used for other things, make sure it is clean for
+ * MPLS from now on.
+ */
+ vnet_buffer(b[0])->mpls.first = 0;
+
+ if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
+ b[0], sizeof (*tr));
+ tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
+ }
+
+ b += 1;
+ n_left -= 1;
+ next += 1;
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (mpls_tunnel_tx) =
+{
+ .name = "mpls-tunnel-tx",
+ .vector_size = sizeof (u32),
+ .format_trace = format_mpls_tunnel_tx_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = 0,
+ .n_next_nodes = 0,
+ /* MPLS_TUNNEL_ENCAP_N_NEXT, */
+ /* .next_nodes = { */
+ /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */
+ /* }, */
+};
+