return (dfct);
}
+static dpo_proto_t
+fib_prefix_get_payload_proto (const fib_prefix_t *pfx)
+{
+ switch (pfx->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (DPO_PROTO_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (DPO_PROTO_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (pfx->fp_payload_proto);
+ }
+
+ ASSERT(0);
+ return (DPO_PROTO_IP4);
+}
+
static void
fib_entry_src_get_path_forwarding (fib_node_index_t path_index,
fib_entry_src_collect_forwarding_ctx_t *ctx)
ctx->fct),
&nh->path_dpo);
fib_path_stack_mpls_disp(path_index,
- ctx->fib_entry->fe_prefix.fp_payload_proto,
+ fib_prefix_get_payload_proto(&ctx->fib_entry->fe_prefix),
&nh->path_dpo);
break;
fib_entry_src_mpls_format (fib_entry_src_t *src,
u8* s)
{
- return (format(s, "MPLS local-label:%d", src->mpls.fesm_label));
+ return (format(s, " local-label:%d", src->mpls.fesm_label));
}
const static fib_entry_src_vft_t mpls_src_vft = {
switch (path->fp_type)
{
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ {
+ dpo_id_t tmp = DPO_INVALID;
+
+ dpo_copy(&tmp, dpo);
+ dpo_set(dpo,
+ DPO_MPLS_DISPOSITION,
+ payload_proto,
+ mpls_disp_dpo_create(payload_proto, ~0, &tmp));
+ dpo_reset(&tmp);
+ break;
+ }
case FIB_PATH_TYPE_DEAG:
{
dpo_id_t tmp = DPO_INVALID;
}
case FIB_PATH_TYPE_RECEIVE:
case FIB_PATH_TYPE_ATTACHED:
- case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
case FIB_PATH_TYPE_RECURSIVE:
case FIB_PATH_TYPE_INTF_RX:
case FIB_PATH_TYPE_UDP_ENCAP:
chain_proto,
mldi);
}
+ else if (child_fct == FIB_FORW_CHAIN_TYPE_MPLS_EOS)
+ {
+ /*
+ * MPLS EOS packets using an imp-null. Insert the disposition.
+ */
+ fib_path_stack_mpls_disp(nh->path_index,
+ fib_forw_chain_type_to_dpo_proto(parent_fct),
+ &nh->path_dpo);
+ }
}
dpo_reset(&via_dpo);
#include <vnet/dpo/interface_rx_dpo.h>
#include <vnet/dpo/replicate_dpo.h>
#include <vnet/dpo/l2_bridge_dpo.h>
+#include <vnet/dpo/mpls_disposition.h>
#include <vnet/mpls/mpls.h>
bucket,
exp->adj.adj);
break;
+ case FT_LB_MPLS_DISP_O_ADJ:
+ {
+ const mpls_disp_dpo_t *mdd;
+
+ FIB_TEST_I((DPO_MPLS_DISPOSITION == dpo->dpoi_type),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+
+ mdd = mpls_disp_dpo_get(dpo->dpoi_index);
+
+ dpo = &mdd->mdd_dpo;
+
+ FIB_TEST_I(((DPO_ADJACENCY == dpo->dpoi_type) ||
+ (DPO_ADJACENCY_INCOMPLETE == dpo->dpoi_type)),
+ "bucket %d stacks on %U",
+ bucket,
+ format_dpo_type, dpo->dpoi_type);
+ FIB_TEST_LB((exp->adj.adj == dpo->dpoi_index),
+ "bucket %d stacks on adj %d",
+ bucket,
+ exp->adj.adj);
+ break;
+ }
case FT_LB_INTF:
FIB_TEST_I((DPO_INTERFACE_RX == dpo->dpoi_type),
"bucket %d stacks on %U",
.fp_label = 24001,
.fp_eos = MPLS_NON_EOS,
};
+ fib_test_lb_bucket_t disp_o_10_10_11_1 = {
+ .type = FT_LB_MPLS_DISP_O_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_1,
+ },
+ };
/*
* The EOS entry should link to both the paths,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &a_o_10_10_11_1),
+ &disp_o_10_10_11_1),
"24001/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.1");
+ "mpls disp adj over 10.10.11.1");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
.adj = ai_v4_10_10_11_2,
},
};
+ fib_test_lb_bucket_t disp_o_10_10_11_2 = {
+ .type = FT_LB_MPLS_DISP_O_ADJ,
+ .adj = {
+ .adj = ai_v4_10_10_11_2,
+ },
+ };
+
fei = fib_table_entry_path_add(fib_index,
&pfx_1_1_1_1_s_32,
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
- "adj over 10.10.11.1, ",
- "adj-v4 over 10.10.11.2");
+ "mpls-disp adj over 10.10.11.1, ",
+ "mpls-disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
&l99_eos_o_10_10_10_1,
&l99_eos_o_10_10_10_1,
&l99_eos_o_10_10_10_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2,
+ &disp_o_10_10_11_2),
"24001/eos LB 16 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.1",
- "adj-v4 over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.1",
+ "MPLS disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
- &a_o_10_10_11_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_1,
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
- "adj over 10.10.11.1, "
- "adj-v4 over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.1, "
+ "MPLS disp adj-v4 over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
FIB_TEST(fib_test_validate_entry(fei,
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"24001/eos LB 1 buckets via: "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"24001/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_24001_neos);
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
2,
&l99_eos_o_10_10_10_1,
- &adj_o_10_10_11_2),
+ &disp_o_10_10_11_2),
"25005/eos LB 2 buckets via: "
"label 99 over 10.10.10.1, "
- "adj over 10.10.11.2");
+ "MPLS disp adj over 10.10.11.2");
fei = fib_table_lookup(MPLS_FIB_DEFAULT_TABLE_ID,
&pfx_25005_neos);
FT_LB_LABEL_STACK_O_ADJ,
FT_LB_LABEL_O_LB,
FT_LB_O_LB,
+ FT_LB_MPLS_DISP_O_ADJ,
FT_LB_INTF,
FT_LB_L2,
FT_LB_BIER_TABLE,
class TestDVR(VppTestCase):
- """ IPv4 Load-Balancing """
+ """ Distributed Virtual Router """
def setUp(self):
super(TestDVR, self).setUp()
L2_VTR_OP.L2_POP_1,
93)
- self.logger.error(self.vapi.ppcli("show bridge-domain 1 detail"))
-
#
# Add routes to bridge the traffic via a tagged an nontagged interface
#
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, self.pg1.remote_ip4)
- self.logger.error(self.vapi.cli("sh error"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
super(TestIP6LoadBalance, self).tearDown()
def send_and_expect_load_balancing(self, input, pkts, outputs):
+ self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.assertNotEqual(0, len(rx))
def send_and_expect_one_itf(self, input, pkts, itf):
+ self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# 0: "hop limit exceeded in transit",
self.assertEqual(icmp.code, 0)
- self.logger.error(self.vapi.cli("sh error"))
-
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
-from scapy.layers.inet6 import IPv6
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
from scapy.contrib.mpls import MPLS
except:
raise
+ def verify_capture_ip6_icmp(self, src_if, capture, sent):
+ try:
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x86DD)
+
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+
+ self.assertEqual(rx_ip.dst, tx_ip.src)
+ # ICMP sourced from the interface's address
+ self.assertEqual(rx_ip.src, src_if.local_ip6)
+ # hop-limit reset to 255 for IMCP packet
+ self.assertEqual(rx_ip.hlim, 254)
+
+ icmp = rx[ICMPv6TimeExceeded]
+
+ except:
+ raise
+
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
rx = self.pg0.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
+ #
+ # disposed packets have an invalid IPv4 checkusm
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0, [33],
+ dst_ip=self.pg0.remote_ip4,
+ n=65,
+ chksum=1)
+ self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
+
+ #
+ # An MPLS xconnect - EOS label in IPv6 out
+ #
+ route_333_eos = VppMplsRoute(
+ self, 333, 1,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ labels=[],
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route_333_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ #
+ # disposed packets have an TTL expired
+ #
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=1)
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [333], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
+ #
+ # An MPLS xconnect - EOS label in IPv6 out w imp-null
+ #
+ route_334_eos = VppMplsRoute(
+ self, 334, 1,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ labels=[3],
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route_334_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [334], 64)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ #
+ # disposed packets have an TTL expired
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, [334], 64,
+ dst_ip=self.pg1.remote_ip6,
+ hlim=0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
#
# An MPLS xconnect - non-EOS label in IP out - an invalid configuration
# so this traffic should be dropped.
tx = self.create_stream_labelled_ip4(self.pg0, [34],
dst_ip="232.1.1.1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
- self.logger.error(self.vapi.cli("sh error"))
def test_mcast_ip6_tail(self):
""" MPLS IPv6 Multicast Tail """