vom: Add support for redirect contracts in gbp
[vpp.git] / src / plugins / gbp / gbp_policy_dpo.c
index e2af1e6..7c53d1b 100644 (file)
@@ -17,6 +17,8 @@
 #include <vnet/fib/ip4_fib.h>
 #include <vnet/fib/ip6_fib.h>
 #include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
 
 #include <plugins/gbp/gbp.h>
 #include <plugins/gbp/gbp_policy_dpo.h>
@@ -49,7 +51,7 @@ gbp_policy_dpo_alloc (void)
 {
   gbp_policy_dpo_t *gpd;
 
-  pool_get (gbp_policy_dpo_pool, gpd);
+  pool_get_zero (gbp_policy_dpo_pool, gpd);
 
   return (gpd);
 }
@@ -110,19 +112,24 @@ gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
   dpo_id_t parent = DPO_INVALID;
 
   gpd = gbp_policy_dpo_alloc ();
-  memset (gpd, 0, sizeof (*gpd));
 
   gpd->gpd_proto = dproto;
   gpd->gpd_sw_if_index = sw_if_index;
   gpd->gpd_epg = epg;
 
-  /*
-   * stack on the DVR DPO for the output interface
-   */
-  dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+  if (~0 != sw_if_index)
+    {
+      /*
+       * stack on the DVR DPO for the output interface
+       */
+      dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+    }
+  else
+    {
+      dpo_copy (&parent, drop_dpo_get (dproto));
+    }
 
   dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
-
   dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
 }
 
@@ -144,11 +151,36 @@ format_gbp_policy_dpo (u8 * s, va_list * ap)
   return (s);
 }
 
+/**
+ * Interpose a policy DPO
+ */
+static void
+gbp_policy_dpo_interpose (const dpo_id_t * original,
+                         const dpo_id_t * parent, dpo_id_t * clone)
+{
+  gbp_policy_dpo_t *gpd, *gpd_clone;
+
+  gpd_clone = gbp_policy_dpo_alloc ();
+  gpd = gbp_policy_dpo_get (original->dpoi_index);
+
+  gpd_clone->gpd_proto = gpd->gpd_proto;
+  gpd_clone->gpd_epg = gpd->gpd_epg;
+  gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index;
+
+  dpo_stack (gbp_policy_dpo_type,
+            gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent);
+
+  dpo_set (clone,
+          gbp_policy_dpo_type,
+          gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone));
+}
+
 const static dpo_vft_t gbp_policy_dpo_vft = {
   .dv_lock = gbp_policy_dpo_lock,
   .dv_unlock = gbp_policy_dpo_unlock,
   .dv_format = format_gbp_policy_dpo,
   .dv_get_urpf = gbp_policy_dpo_get_urpf,
+  .dv_mk_interpose = gbp_policy_dpo_interpose,
 };
 
 /**
@@ -195,6 +227,7 @@ typedef struct gbp_policy_dpo_trace_t_
   u32 src_epg;
   u32 dst_epg;
   u32 acl_index;
+  u32 a_bit;
 } gbp_policy_dpo_trace_t;
 
 typedef enum
@@ -203,6 +236,23 @@ typedef enum
   GBP_POLICY_N_NEXT,
 } gbp_policy_next_t;
 
+always_inline u32
+gbp_rule_l3_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0, int is_ip6)
+{
+  gbp_policy_node_t pnode;
+  const dpo_id_t *dpo;
+  dpo_proto_t dproto;
+
+  pnode = (is_ip6 ? GBP_POLICY_NODE_IP6 : GBP_POLICY_NODE_IP4);
+  dproto = (is_ip6 ? DPO_PROTO_IP6 : DPO_PROTO_IP4);
+  dpo = &gu->gu_dpo[pnode][dproto];
+
+  /* The flow hash is still valid as this is a IP packet being switched */
+  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
+
+  return (dpo->dpoi_next_node);
+}
+
 always_inline uword
 gbp_policy_dpo_inline (vlib_main_t * vm,
                       vlib_node_runtime_t * node,
@@ -210,6 +260,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
 {
   gbp_main_t *gm = &gbp_main;
   u32 n_left_from, next_index, *from, *to_next;
+  gbp_rule_t *gu;
 
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
@@ -227,10 +278,9 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
          const gbp_policy_dpo_t *gpd0;
          u32 bi0, next0;
          gbp_contract_key_t key0;
-         gbp_contract_value_t value0 = {
-           .as_u64 = ~0,
-         };
+         gbp_contract_t *gc0;
          vlib_buffer_t *b0;
+         index_t gci0;
 
          bi0 = from[0];
          to_next[0] = bi0;
@@ -241,14 +291,23 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
          next0 = GBP_POLICY_DROP;
 
          b0 = vlib_get_buffer (vm, bi0);
+
+         gc0 = NULL;
          gpd0 =
            gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
          vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
 
+         if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+           {
+             next0 = gpd0->gpd_dpo.dpoi_next_node;
+             key0.as_u32 = ~0;
+             goto trace;
+           }
+
          key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
          key0.gck_dst = gpd0->gpd_epg;
 
-         if (~0 != key0.gck_src)
+         if (EPG_INVALID != key0.gck_src)
            {
              if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
                {
@@ -256,12 +315,13 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
                   * intra-epg allowed
                   */
                  next0 = gpd0->gpd_dpo.dpoi_next_node;
+                 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
                }
              else
                {
-                 value0.as_u64 = gbp_acl_lookup (&key0);
+                 gci0 = gbp_contract_find (&key0);
 
-                 if (~0 != value0.gc_lc_index)
+                 if (INDEX_INVALID != gci0)
                    {
                      fa_5tuple_opaque_t pkt_5tuple0;
                      u8 action0 = 0;
@@ -270,16 +330,17 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
                      /*
                       * tests against the ACL
                       */
+                     gc0 = gbp_contract_get (gci0);
                      acl_plugin_fill_5tuple_inline (gm->
                                                     acl_plugin.p_acl_main,
-                                                    value0.gc_lc_index, b0,
+                                                    gc0->gc_lc_index, b0,
                                                     is_ip6,
                                                     /* is_input */ 1,
                                                     /* is_l2_path */ 0,
                                                     &pkt_5tuple0);
                      acl_plugin_match_5tuple_inline (gm->
                                                      acl_plugin.p_acl_main,
-                                                     value0.gc_lc_index,
+                                                     gc0->gc_lc_index,
                                                      &pkt_5tuple0, is_ip6,
                                                      &action0, &acl_pos_p0,
                                                      &acl_match_p0,
@@ -287,7 +348,25 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
                                                      &trace_bitmap0);
 
                      if (action0 > 0)
-                       next0 = gpd0->gpd_dpo.dpoi_next_node;
+                       {
+
+                         vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+                         gu = gbp_rule_get (gc0->gc_rules[rule_match_p0]);
+
+                         switch (gu->gu_action)
+                           {
+                           case GBP_RULE_PERMIT:
+                             next0 = gpd0->gpd_dpo.dpoi_next_node;
+                             break;
+                           case GBP_RULE_DENY:
+                             ASSERT (0);
+                             next0 = 0;
+                             break;
+                           case GBP_RULE_REDIRECT:
+                             next0 = gbp_rule_l3_redirect (gu, b0, is_ip6);
+                             break;
+                           }
+                       }
                    }
                }
            }
@@ -299,7 +378,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
               */
              next0 = gpd0->gpd_dpo.dpoi_next_node;
            }
-
+       trace:
          if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              gbp_policy_dpo_trace_t *tr;
@@ -307,7 +386,8 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
              tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
              tr->src_epg = key0.gck_src;
              tr->dst_epg = key0.gck_dst;
-             tr->acl_index = value0.gc_acl_index;
+             tr->acl_index = (gc0 ? gc0->gc_acl_index : ~0);
+             tr->a_bit = vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A;
            }
 
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
@@ -325,8 +405,8 @@ format_gbp_policy_dpo_trace (u8 * s, va_list * args)
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
 
-  s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
-             t->src_epg, t->dst_epg, t->acl_index);
+  s = format (s, " src-epg:%d dst-epg:%d acl-index:%d a-bit:%d",
+             t->src_epg, t->dst_epg, t->acl_index, t->a_bit);
 
   return s;
 }
@@ -455,8 +535,7 @@ gbp_lpm_classify_inline (vlib_main_t * vm,
            {
              gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index);
              src_epg0 = gpd0->gpd_epg;
-             vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX],
-                                &next0, b0);
+             vnet_feature_next (&next0, b0);
            }
          else
            {