1 ;;; tunnel-encap-skel.el - tunnel interface output skeleton
5 (define-skeleton skel-tunnel-encap
6 "Insert a tunnel encap implementation"
8 '(setq encap_stack (skeleton-read "encap_stack (e.g ip4_udp_lisp): "))
9 '(setq ENCAP_STACK (upcase encap_stack))
10 '(setq encap-stack (replace-regexp-in-string "_" "-" encap_stack))
11 '(setq ENCAP-STACK (upcase encap-stack))
13 #include <vppinfra/error.h>
14 #include <vppinfra/hash.h>
15 #include <vnet/vnet.h>
16 #include <vnet/ip/ip.h>
17 #include <vnet/ethernet/ethernet.h>
18 #include <vnet/" encap-stack "/" encap_stack ".h>
20 /* Statistics (not really errors) */
21 #define foreach_" encap_stack "_encap_error \\
22 _(ENCAPSULATED, \"good packets encapsulated\")
24 static char * " encap_stack "_encap_error_strings[] = {
25 #define _(sym,string) string,
26 foreach_" encap_stack "_encap_error
31 #define _(sym,str) " ENCAP_STACK "_ENCAP_ERROR_##sym,
32 foreach_" encap_stack "_encap_error
34 " ENCAP_STACK "_ENCAP_N_ERROR,
35 } " encap_stack "_encap_error_t;
38 " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP,
39 " ENCAP_STACK "_ENCAP_NEXT_DROP,
40 " ENCAP_STACK "_ENCAP_N_NEXT,
41 } " encap_stack "_encap_next_t;
45 } " encap_stack "_encap_trace_t;
47 u8 * format_" encap_stack "_encap_trace (u8 * s, va_list * args)
49 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
50 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
51 " encap_stack "_encap_trace_t * t
52 = va_arg (*args, " encap_stack "_encap_trace_t *);
54 s = format (s, \"" ENCAP-STACK ": tunnel %d\", t->tunnel_index);
58 /* $$$$ FIXME adjust to match the rewrite string */
59 #define foreach_fixed_header_offset \\
60 _(0) _(1) _(2) _(3) _(FIXME)
63 " encap_stack "_encap (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * from_frame)
67 u32 n_left_from, next_index, * from, * to_next;
68 " encap_stack "_main_t * ngm = &" encap_stack "_main;
69 vnet_main_t * vnm = ngm->vnet_main;
70 u32 pkts_encapsulated = 0;
71 u16 old_l0 = 0, old_l1 = 0;
73 from = vlib_frame_vector_args (from_frame);
74 n_left_from = from_frame->n_vectors;
76 next_index = node->cached_next_index;
78 while (n_left_from > 0)
82 vlib_get_next_frame (vm, node, next_index,
83 to_next, n_left_to_next);
85 #if 0 /* $$$ dual loop when the single loop works */
86 while (n_left_from >= 4 && n_left_to_next >= 2)
89 vlib_buffer_t * b0, * b1;
90 nsh_unicast_header_t * h0, * h1;
95 /* Prefetch next iteration. */
97 vlib_buffer_t * p2, * p3;
99 p2 = vlib_get_buffer (vm, from[2]);
100 p3 = vlib_get_buffer (vm, from[3]);
102 vlib_prefetch_buffer_header (p2, LOAD);
103 vlib_prefetch_buffer_header (p3, LOAD);
105 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
106 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
118 b0 = vlib_get_buffer (vm, bi0);
119 b1 = vlib_get_buffer (vm, bi1);
121 h0 = vlib_buffer_get_current (b0);
122 h1 = vlib_buffer_get_current (b1);
124 next0 = next1 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
126 vlib_buffer_advance (b0, sizeof (*h0));
127 vlib_buffer_advance (b1, sizeof (*h1));
129 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
130 to_next, n_left_to_next,
131 bi0, bi1, next0, next1);
135 while (n_left_from > 0 && n_left_to_next > 0)
139 u32 next0 = " ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP;
140 vnet_hw_interface_t * hi0;
143 u64 * copy_src0, * copy_dst0;
144 u32 * copy_src_last0, * copy_dst_last0;
145 " encap_stack "_tunnel_t * t0;
156 b0 = vlib_get_buffer (vm, bi0);
159 hi0 = vnet_get_sup_hw_interface
160 (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
162 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
164 ASSERT(vec_len(t0->rewrite) >= 24);
166 /* Apply the rewrite string. $$$$ vnet_rewrite? */
167 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
169 ip0 = vlib_buffer_get_current(b0);
170 /* Copy the fixed header */
171 copy_dst0 = (u64 *) ip0;
172 copy_src0 = (u64 *) t0->rewrite;
174 ASSERT (sizeof (ip4_udp_" encap_stack "_header_t) == FIXME);
176 /* Copy first N octets 8-bytes at a time */
177 #define _(offs) copy_dst0[offs] = copy_src0[offs];
178 foreach_fixed_header_offset;
180 #if 0 /* needed if encap not a multiple of 8 bytes */
181 /* Last 4 octets. Hopefully gcc will be our friend */
182 copy_dst_last0 = (u32 *)(©_dst0[FIXME]);
183 copy_src_last0 = (u32 *)(©_src0[FIXME]);
184 copy_dst_last0[0] = copy_src_last0[0];
187 /* fix the <bleep>ing outer-IP checksum */
188 sum0 = ip0->checksum;
189 /* old_l0 always 0, see the rewrite setup */
191 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
193 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
194 length /* changed member */);
195 ip0->checksum = ip_csum_fold (sum0);
196 ip0->length = new_l0;
199 udp0 = (udp_header_t *)(ip0+1);
200 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
203 udp0->length = new_l0;
205 /* Reset to look up tunnel partner in the configured FIB */
206 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
207 pkts_encapsulated ++;
209 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
211 " encap_stack "_encap_trace_t *tr =
212 vlib_add_trace (vm, node, b0, sizeof (*tr));
213 tr->tunnel_index = t0 - ngm->tunnels;
215 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216 to_next, n_left_to_next,
220 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
222 vlib_node_increment_counter (vm, node->node_index,
223 " ENCAP_STACK "_ENCAP_ERROR_ENCAPSULATED,
225 return from_frame->n_vectors;
228 VLIB_REGISTER_NODE (" encap_stack "_encap_node) = {
229 .function = " encap_stack "_encap,
230 .name = \"" encap-stack "-encap\",
231 .vector_size = sizeof (u32),
232 .format_trace = format_" encap_stack "_encap_trace,
233 .type = VLIB_NODE_TYPE_INTERNAL,
235 .n_errors = ARRAY_LEN(" encap_stack "_encap_error_strings),
236 .error_strings = " encap_stack "_encap_error_strings,
238 .n_next_nodes = " ENCAP_STACK "_ENCAP_N_NEXT,
241 [" ENCAP_STACK "_ENCAP_NEXT_IP4_LOOKUP] = \"ip4-lookup\",
242 [" ENCAP_STACK "_ENCAP_NEXT_DROP] = \"error-drop\",