2 * replication.c : packet replication
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/vnet.h>
20 #include <vppinfra/error.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/replication.h>
25 replication_main_t replication_main;
28 replication_context_t *
29 replication_prep (vlib_main_t * vm,
31 u32 recycle_node_index,
34 replication_main_t * rm = &replication_main;
35 replication_context_t * ctx;
36 uword cpu_number = vm->cpu_index;
40 // Allocate a context, reserve context 0
41 if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
42 pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
44 pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
45 ctx_id = ctx - rm->contexts[cpu_number];
47 // Save state from vlib buffer
48 ctx->saved_clone_count = b0->clone_count;
49 ctx->saved_free_list_index = b0->free_list_index;
50 ctx->current_data = b0->current_data;
52 // Set up vlib buffer hooks
53 b0->clone_count = ctx_id;
54 b0->free_list_index = rm->recycle_list_index;
57 ctx->recycle_node_index = recycle_node_index;
60 memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));
62 // Save packet contents
63 ctx->l2_packet = l2_packet;
64 ip = (ip4_header_t *)vlib_buffer_get_current (b0);
66 // Save ethernet header
67 ctx->l2_header[0] = ((u64 *)ip)[0];
68 ctx->l2_header[1] = ((u64 *)ip)[1];
69 ctx->l2_header[2] = ((u64 *)ip)[2];
70 // set ip to the true ip header
71 ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
75 // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is
76 // in the first two bytes of both the ip4 and ip6 headers.
77 ctx->ip_tos = *((u16 *)(ip));
79 // Save the ip4 checksum as well. We just blindly save the corresponding two
80 // bytes even for ip6 packets.
81 ctx->ip4_checksum = ip->checksum;
87 replication_context_t *
88 replication_recycle (vlib_main_t * vm,
92 replication_main_t * rm = &replication_main;
93 replication_context_t * ctx;
94 uword cpu_number = vm->cpu_index;
97 // Get access to the replication context
98 ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
100 // Restore vnet buffer state
101 memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
103 // Restore the packet start (current_data) and length
104 vlib_buffer_advance(b0, ctx->current_data - b0->current_data);
106 // Restore packet contents
107 ip = (ip4_header_t *)vlib_buffer_get_current (b0);
108 if (ctx->l2_packet) {
109 // Restore ethernet header
110 ((u64 *)ip)[0] = ctx->l2_header[0];
111 ((u64 *)ip)[1] = ctx->l2_header[1];
112 ((u64 *)ip)[2] = ctx->l2_header[2];
113 // set ip to the true ip header
114 ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
118 *((u16 *)(ip)) = ctx->ip_tos;
119 ip->checksum = ctx->ip4_checksum;
122 // This is the last replication in the list.
123 // Restore original buffer free functionality.
124 b0->clone_count = ctx->saved_clone_count;
125 b0->free_list_index = ctx->saved_free_list_index;
127 // Free context back to its pool
128 pool_put (rm->contexts[cpu_number], ctx);
137 * fish pkts back from the recycle queue/freelist
138 * un-flatten the context chains
140 static void replication_recycle_callback (vlib_main_t *vm,
141 vlib_buffer_free_list_t * fl)
143 vlib_frame_t * f = 0;
145 u32 n_left_to_next = 0;
146 u32 n_this_frame = 0;
151 vlib_buffer_t *bnext0;
153 replication_main_t * rm = &replication_main;
154 replication_context_t * ctx;
155 u32 feature_node_index = 0;
156 uword cpu_number = vm->cpu_index;
158 // All buffers in the list are destined to the same recycle node.
159 // Pull the recycle node index from the first buffer.
160 // Note: this could be sped up if the node index were stuffed into
161 // the freelist itself.
162 if (vec_len (fl->aligned_buffers) > 0) {
163 bi0 = fl->aligned_buffers[0];
164 b0 = vlib_get_buffer (vm, bi0);
165 ctx = pool_elt_at_index (rm->contexts[cpu_number],
167 feature_node_index = ctx->recycle_node_index;
168 } else if (vec_len (fl->unaligned_buffers) > 0) {
169 bi0 = fl->unaligned_buffers[0];
170 b0 = vlib_get_buffer (vm, bi0);
171 ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
172 feature_node_index = ctx->recycle_node_index;
175 /* aligned, unaligned buffers */
176 for (i = 0; i < 2; i++)
180 from = fl->aligned_buffers;
181 n_left_from = vec_len (from);
185 from = fl->unaligned_buffers;
186 n_left_from = vec_len (from);
189 while (n_left_from > 0)
191 if (PREDICT_FALSE(n_left_to_next == 0))
195 f->n_vectors = n_this_frame;
196 vlib_put_frame_to_node (vm, feature_node_index, f);
199 f = vlib_get_frame_to_node (vm, feature_node_index);
200 to_next = vlib_frame_vector_args (f);
201 n_left_to_next = VLIB_FRAME_SIZE;
206 if (PREDICT_TRUE(n_left_from > 1))
209 vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
212 bnext0 = b0 = vlib_get_buffer (vm, bi0);
214 // Mark that this buffer was just recycled
215 b0->flags |= VLIB_BUFFER_IS_RECYCLED;
217 // If buffer is traced, mark frame as traced
218 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
219 f->flags |= VLIB_FRAME_TRACE;
221 while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
225 bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
237 vec_reset_length (fl->aligned_buffers);
238 vec_reset_length (fl->unaligned_buffers);
242 ASSERT(n_this_frame);
243 f->n_vectors = n_this_frame;
244 vlib_put_frame_to_node (vm, feature_node_index, f);
250 clib_error_t *replication_init (vlib_main_t *vm)
252 replication_main_t * rm = &replication_main;
253 vlib_buffer_main_t * bm = vm->buffer_main;
254 vlib_buffer_free_list_t * fl;
255 __attribute__((unused)) replication_context_t * ctx;
256 vlib_thread_main_t * tm = vlib_get_thread_main();
259 rm->vnet_main = vnet_get_main();
260 rm->recycle_list_index =
261 vlib_buffer_create_free_list (vm, 1024 /* fictional */,
262 "replication-recycle");
264 fl = pool_elt_at_index (bm->buffer_free_list_pool,
265 rm->recycle_list_index);
267 fl->buffers_added_to_freelist_function = replication_recycle_callback;
269 // Verify the replication context is the expected size
270 ASSERT(sizeof(replication_context_t) == 128); // 2 cache lines
272 vec_validate (rm->contexts, tm->n_vlib_mains - 1);
276 VLIB_INIT_FUNCTION (replication_init);