VPP-108 : API calls to read classify table and sessions
[vpp.git] / vnet / vnet / replication.c
index 6842684..999e1b1 100644 (file)
@@ -45,19 +45,19 @@ replication_prep (vlib_main_t * vm,
   ctx_id = ctx - rm->contexts[cpu_number];
 
   // Save state from vlib buffer
-  ctx->saved_clone_count = b0->clone_count;
   ctx->saved_free_list_index = b0->free_list_index;
   ctx->current_data = b0->current_data;
 
   // Set up vlib buffer hooks
-  b0->clone_count = ctx_id;
+  b0->recycle_count = ctx_id;
   b0->free_list_index = rm->recycle_list_index;
+  b0->flags |= VLIB_BUFFER_RECYCLE;
 
   // Save feature state
   ctx->recycle_node_index = recycle_node_index;
 
   // Save vnet state
-  memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));
+  clib_memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));
 
   // Save packet contents
   ctx->l2_packet = l2_packet;
@@ -95,10 +95,10 @@ replication_recycle (vlib_main_t * vm,
   ip4_header_t * ip;
 
   // Get access to the replication context
-  ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
+  ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
 
   // Restore vnet buffer state
-  memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
+  clib_memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
 
   // Restore the packet start (current_data) and length
   vlib_buffer_advance(b0, ctx->current_data - b0->current_data);
@@ -121,7 +121,6 @@ replication_recycle (vlib_main_t * vm,
   if (is_last) {
     // This is the last replication in the list. 
     // Restore original buffer free functionality.
-    b0->clone_count = ctx->saved_clone_count;
     b0->free_list_index = ctx->saved_free_list_index;
 
     // Free context back to its pool
@@ -148,7 +147,6 @@ static void replication_recycle_callback (vlib_main_t *vm,
   u32 * to_next = 0;
   u32 bi0, pi0;
   vlib_buffer_t *b0;
-  vlib_buffer_t *bnext0;
   int i;
   replication_main_t * rm = &replication_main;
   replication_context_t * ctx;
@@ -163,12 +161,12 @@ static void replication_recycle_callback (vlib_main_t *vm,
     bi0 = fl->aligned_buffers[0];
     b0 = vlib_get_buffer (vm, bi0);
     ctx = pool_elt_at_index (rm->contexts[cpu_number],
-                             b0->clone_count);
+                             b0->recycle_count);
     feature_node_index = ctx->recycle_node_index;
   } else if (vec_len (fl->unaligned_buffers) > 0) {
     bi0 = fl->unaligned_buffers[0];
     b0 = vlib_get_buffer (vm, bi0);
-    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
+    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
     feature_node_index = ctx->recycle_node_index;
   }
 
@@ -208,8 +206,8 @@ static void replication_recycle_callback (vlib_main_t *vm,
               pi0 = from[1];
               vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
             }
-        
-          bnext0 = b0 = vlib_get_buffer (vm, bi0);
+
+         b0 = vlib_get_buffer (vm, bi0);
 
           // Mark that this buffer was just recycled
           b0->flags |= VLIB_BUFFER_IS_RECYCLED;
@@ -218,12 +216,6 @@ static void replication_recycle_callback (vlib_main_t *vm,
           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
               f->flags |= VLIB_FRAME_TRACE;
 
-          while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
-            {
-              from += 1;
-              n_left_from -= 1;
-              bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
-            }
           to_next[0] = bi0;
 
           from++;