+ vlib_main_t * vm = vlib_get_main();
+ vl_api_policer_add_del_reply_t *rmp;
+ int rv = 0;
+ u8 *name = NULL;
+ sse2_qos_pol_cfg_params_st cfg;
+ clib_error_t * error;
+
+ name = format(0, "%s", mp->name);
+
+ memset (&cfg, 0, sizeof (cfg));
+ cfg.rfc = mp->type;
+ cfg.rnd_type = mp->round_type;
+ cfg.rate_type = mp->rate_type;
+ cfg.rb.kbps.cir_kbps = mp->cir;
+ cfg.rb.kbps.eir_kbps = mp->eir;
+ cfg.rb.kbps.cb_bytes = mp->cb;
+ cfg.rb.kbps.eb_bytes = mp->eb;
+
+ error = policer_add_del(vm, name, &cfg, mp->is_add);
+
+ if (error)
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+ REPLY_MACRO(VL_API_POLICER_ADD_DEL_REPLY);
+}
+
+static void
+send_policer_details (u8 *name,
+ sse2_qos_pol_cfg_params_st *config,
+ policer_read_response_type_st *templ,
+ unix_shared_memory_queue_t *q,
+ u32 context)
+{
+ vl_api_policer_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_POLICER_DETAILS);
+ mp->context = context;
+ mp->cir = htonl(config->rb.kbps.cir_kbps);
+ mp->eir = htonl(config->rb.kbps.eir_kbps);
+ mp->cb = htonl(config->rb.kbps.cb_bytes);
+ mp->eb = htonl(config->rb.kbps.eb_bytes);
+ mp->rate_type = config->rate_type;
+ mp->round_type = config->rnd_type;
+ mp->type = config->rfc;
+ mp->single_rate = templ->single_rate ? 1 : 0;
+ mp->color_aware = templ->color_aware ? 1 : 0;
+ mp->scale = htonl(templ->scale);
+ mp->cir_tokens_per_period = htonl(templ->cir_tokens_per_period);
+ mp->pir_tokens_per_period = htonl(templ->pir_tokens_per_period);
+ mp->current_limit = htonl(templ->current_limit);
+ mp->current_bucket = htonl(templ->current_bucket);
+ mp->extended_limit = htonl(templ->extended_limit);
+ mp->extended_bucket = htonl(templ->extended_bucket);
+ mp->last_update_time = clib_host_to_net_u64(templ->last_update_time);
+
+ strncpy ((char *) mp->name, (char *) name, ARRAY_LEN(mp->name) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_policer_dump_t_handler
+(vl_api_policer_dump_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+ vnet_policer_main_t * pm = &vnet_policer_main;
+ hash_pair_t * hp;
+ uword * p;
+ u32 pool_index;
+ u8 * match_name = 0;
+ u8 * name;
+ sse2_qos_pol_cfg_params_st *config;
+ policer_read_response_type_st *templ;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (mp->match_name_valid) {
+ match_name = format(0, "%s%c", mp->match_name, 0);
+ }
+
+ if (mp->match_name_valid) {
+ p = hash_get_mem (pm->policer_config_by_name, match_name);
+ if (p) {
+ pool_index = p[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details(match_name, config, templ, q, mp->context);
+ }
+ } else {
+ hash_foreach_pair (hp, pm->policer_config_by_name,
+ ({
+ name = (u8 *) hp->key;
+ pool_index = hp->value[0];
+ config = pool_elt_at_index (pm->configs, pool_index);
+ templ = pool_elt_at_index (pm->policer_templates, pool_index);
+ send_policer_details(name, config, templ, q, mp->context);
+ }));
+ }
+}
+
+static void
+vl_api_netmap_create_t_handler
+(vl_api_netmap_create_t *mp)
+{
+ vlib_main_t *vm = vlib_get_main();
+ vl_api_netmap_create_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format(0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_create_if(vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ mp->is_pipe, mp->is_master, 0);
+
+ vec_free(if_name);
+
+ REPLY_MACRO(VL_API_NETMAP_CREATE_REPLY);
+}
+
+static void
+vl_api_netmap_delete_t_handler
+(vl_api_netmap_delete_t *mp)
+{
+ vlib_main_t * vm = vlib_get_main();
+ vl_api_netmap_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format(0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_delete_if(vm, if_name);
+
+ vec_free(if_name);
+
+ REPLY_MACRO(VL_API_NETMAP_DELETE_REPLY);
+}
+
+static void vl_api_mpls_gre_tunnel_details_t_handler (
+ vl_api_mpls_gre_tunnel_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_gre_tunnel_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ mpls_gre_tunnel_t * gt,
+ u32 index,
+ u32 context)
+{
+ vl_api_mpls_gre_tunnel_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_GRE_TUNNEL_DETAILS);
+ mp->context = context;
+
+ if (gt != NULL) {
+ mp->tunnel_index = htonl(index);
+ mp->tunnel_src = gt->tunnel_src.as_u32;
+ mp->tunnel_dst = gt->tunnel_dst.as_u32;
+ mp->intfc_address = gt->intfc_address.as_u32;
+ mp->mask_width = htonl(gt->mask_width);
+ mp->inner_fib_index = htonl(gt->inner_fib_index);
+ mp->outer_fib_index = htonl(gt->outer_fib_index);
+ mp->encap_index = htonl(gt->encap_index);
+ mp->hw_if_index = htonl(gt->hw_if_index);
+ mp->l2_only = htonl(gt->l2_only);
+ }
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, gt->encap_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_gre_tunnel_dump_t_handler (vl_api_mpls_gre_tunnel_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ mpls_main_t * mm = &mpls_main;
+ mpls_gre_tunnel_t * gt;
+ u32 index = ntohl(mp->tunnel_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ if (pool_elts (mm->gre_tunnels)) {
+ if(mp->tunnel_index >= 0) {
+ vlib_cli_output (vm, "MPLS-GRE tunnel %u", index);
+ gt = pool_elt_at_index (mm->gre_tunnels, index);
+ send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context);
+ } else {
+ vlib_cli_output (vm, "MPLS-GRE tunnels");
+ pool_foreach (gt, mm->gre_tunnels,
+ ({
+ send_mpls_gre_tunnel_entry (am, q, gt, gt - mm->gre_tunnels, mp->context);
+ }));
+ }
+ } else {
+ vlib_cli_output (vm, "No MPLS-GRE tunnels");
+ }
+}
+
+static void vl_api_mpls_eth_tunnel_details_t_handler (
+ vl_api_mpls_eth_tunnel_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_eth_tunnel_entry (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ mpls_eth_tunnel_t * et,
+ u32 index,
+ u32 context)
+{
+ vl_api_mpls_eth_tunnel_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_ETH_TUNNEL_DETAILS);
+ mp->context = context;
+
+ if (et != NULL) {
+ mp->tunnel_index = htonl(index);
+ memcpy(mp->tunnel_dst_mac, et->tunnel_dst, 6);
+ mp->intfc_address = et->intfc_address.as_u32;
+ mp->tx_sw_if_index = htonl(et->tx_sw_if_index);
+ mp->inner_fib_index = htonl(et->inner_fib_index);
+ mp->mask_width = htonl(et->mask_width);
+ mp->encap_index = htonl(et->encap_index);
+ mp->hw_if_index = htonl(et->hw_if_index);
+ mp->l2_only = htonl(et->l2_only);
+ }
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, et->encap_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_eth_tunnel_dump_t_handler (vl_api_mpls_eth_tunnel_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ mpls_main_t * mm = &mpls_main;
+ mpls_eth_tunnel_t * et;
+ u32 index = ntohl(mp->tunnel_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ clib_warning("Received mpls_eth_tunnel_dump");
+ clib_warning("Received tunnel index: %u from client %u", index, mp->client_index);
+
+ if (pool_elts (mm->eth_tunnels)) {
+ if(mp->tunnel_index >= 0) {
+ vlib_cli_output (vm, "MPLS-Ethernet tunnel %u", index);
+ et = pool_elt_at_index (mm->eth_tunnels, index);
+ send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context);
+ } else {
+ clib_warning("MPLS-Ethernet tunnels");
+ pool_foreach (et, mm->eth_tunnels,
+ ({
+ send_mpls_eth_tunnel_entry (am, q, et, et - mm->eth_tunnels, mp->context);
+ }));
+ }
+ } else {
+ clib_warning("No MPLS-Ethernet tunnels");
+ }
+}
+
+static void vl_api_mpls_fib_encap_details_t_handler (
+ vl_api_mpls_fib_encap_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_fib_encap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ show_mpls_fib_t *s,
+ u32 context)
+{
+ vl_api_mpls_fib_encap_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_ENCAP_DETAILS);
+ mp->context = context;
+
+ mp->fib_index = htonl(s->fib_index);
+ mp->entry_index = htonl(s->entry_index);
+ mp->dest = s->dest;
+ mp->s_bit = htonl(s->s_bit);
+
+ mpls_main_t * mm = &mpls_main;
+ mpls_encap_t * e;
+ int i;
+ u32 len = 0;
+
+ e = pool_elt_at_index (mm->encaps, s->entry_index);
+ len = vec_len (e->labels);
+ mp->nlabels = htonl(len);
+
+ for (i = 0; i < len; i++) {
+ mp->labels[i] = htonl(vnet_mpls_uc_get_label(
+ clib_host_to_net_u32(e->labels[i].label_exp_s_ttl)));
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_fib_encap_dump_t_handler (vl_api_mpls_fib_encap_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ u64 key;
+ u32 value;
+ show_mpls_fib_t *records = 0;
+ show_mpls_fib_t *s;
+ mpls_main_t * mm = &mpls_main;
+ ip4_main_t * im = &ip4_main;
+ ip4_fib_t * rx_fib;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ hash_foreach (key, value, mm->mpls_encap_by_fib_and_dest,
+ ({
+ vec_add2 (records, s, 1);
+ s->fib_index = (u32)(key>>32);
+ s->dest = (u32)(key & 0xFFFFFFFF);
+ s->entry_index = (u32) value;
+ }));
+
+ if (0 == vec_len(records)) {
+ vlib_cli_output(vm, "MPLS encap table empty");
+ goto out;
+ }
+
+ /* sort output by dst address within fib */
+ vec_sort_with_function(records, mpls_dest_cmp);
+ vec_sort_with_function(records, mpls_fib_index_cmp);
+ vlib_cli_output(vm, "MPLS encap table");
+ vlib_cli_output(vm, "%=6s%=16s%=16s", "Table", "Dest address", "Labels");
+ vec_foreach (s, records)
+ {
+ rx_fib = vec_elt_at_index(im->fibs, s->fib_index);
+ vlib_cli_output(vm, "%=6d%=16U%=16U", rx_fib->table_id,
+ format_ip4_address, &s->dest, format_mpls_encap_index, mm,
+ s->entry_index);
+ send_mpls_fib_encap_details (am, q, s, mp->context);
+ }
+
+out:
+ vec_free(records);
+}
+
+static void vl_api_mpls_fib_decap_details_t_handler (
+ vl_api_mpls_fib_decap_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_mpls_fib_decap_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t *q,
+ show_mpls_fib_t *s,
+ u32 rx_table_id,
+ u32 tx_table_id,
+ char *swif_tag,
+ u32 context)
+{
+ vl_api_mpls_fib_decap_details_t * mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs(VL_API_MPLS_FIB_DECAP_DETAILS);
+ mp->context = context;
+
+ mp->fib_index = htonl(s->fib_index);
+ mp->entry_index = htonl(s->entry_index);
+ mp->dest = s->dest;
+ mp->s_bit = htonl(s->s_bit);
+ mp->label = htonl(s->label);
+ mp->rx_table_id = htonl(rx_table_id);
+ mp->tx_table_id = htonl(tx_table_id);
+ strncpy ((char *) mp->swif_tag,
+ (char *) swif_tag, ARRAY_LEN(mp->swif_tag)-1);
+
+ vl_msg_api_send_shmem (q, (u8 *)&mp);
+}
+
+static void
+vl_api_mpls_fib_decap_dump_t_handler (vl_api_mpls_fib_decap_dump_t *mp)
+{
+ vpe_api_main_t * am = &vpe_api_main;
+ unix_shared_memory_queue_t * q;
+ vlib_main_t * vm = &vlib_global_main;
+ u64 key;
+ u32 value;
+ show_mpls_fib_t *records = 0;
+ show_mpls_fib_t *s;
+ mpls_main_t * mm = &mpls_main;
+ ip4_main_t * im = &ip4_main;
+ ip4_fib_t * rx_fib;
+ ip4_fib_t *tx_fib;
+ u32 tx_table_id;
+ char *swif_tag;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ hash_foreach (key, value, mm->mpls_decap_by_rx_fib_and_label,
+ ({
+ vec_add2 (records, s, 1);
+ s->fib_index = (u32)(key>>32);
+ s->entry_index = (u32) value;
+ s->label = ((u32) key)>>12;
+ s->s_bit = (key & (1<<8)) != 0;
+ }));
+
+ if (!vec_len(records)) {
+ vlib_cli_output(vm, "MPLS decap table empty");
+ goto out;
+ }
+
+ vec_sort_with_function(records, mpls_label_cmp);
+ vlib_cli_output(vm, "MPLS decap table");
+ vlib_cli_output(vm, "%=10s%=15s%=6s%=6s", "RX Table", "TX Table/Intfc",
+ "Label", "S-bit");
+ vec_foreach (s, records)
+ {
+ mpls_decap_t * d;
+ d = pool_elt_at_index(mm->decaps, s->entry_index);
+ if (d->next_index == MPLS_INPUT_NEXT_IP4_INPUT) {
+ tx_fib = vec_elt_at_index(im->fibs, d->tx_fib_index);
+ tx_table_id = tx_fib->table_id;
+ swif_tag = " ";
+ } else {
+ tx_table_id = d->tx_fib_index;
+ swif_tag = "(i) ";
+ }
+ rx_fib = vec_elt_at_index(im->fibs, s->fib_index);
+
+ vlib_cli_output(vm, "%=10d%=10d%=5s%=6d%=6d", rx_fib->table_id,
+ tx_table_id, swif_tag, s->label, s->s_bit);
+
+ send_mpls_fib_decap_details (am, q, s, rx_fib->table_id,
+ tx_table_id, swif_tag, mp->context);
+ }
+
+out:
+ vec_free(records);
+}
+
+static void vl_api_classify_table_ids_t_handler (vl_api_classify_table_ids_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ vnet_classify_table_t * t;
+ u32 * table_ids = 0;
+ u32 count;
+
+ pool_foreach (t, cm->tables,
+ ({
+ vec_add1 (table_ids, ntohl(t - cm->tables));
+ }));
+ count = vec_len(table_ids);
+
+ vl_api_classify_table_ids_reply_t *rmp;
+ rmp = vl_msg_api_alloc_as_if_client(sizeof (*rmp) + count);
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_IDS_REPLY);
+ rmp->context = mp->context;
+ rmp->count = ntohl(count);
+ clib_memcpy(rmp->ids, table_ids, count * sizeof(u32));
+ rmp->retval = 0;
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+
+ vec_free (table_ids);
+}
+
+static void vl_api_classify_table_by_interface_t_handler (vl_api_classify_table_by_interface_t *mp)
+{
+ vl_api_classify_table_by_interface_reply_t *rmp;
+ int rv = 0;
+
+ u32 sw_if_index = ntohl(mp->sw_if_index);
+ u32 * acl = 0;
+
+ vec_validate (acl, INPUT_ACL_N_TABLES - 1);
+ vec_set (acl, ~0);
+
+ VALIDATE_SW_IF_INDEX(mp);
+
+ input_acl_main_t * am = &input_acl_main;
+
+ int if_idx;
+ u32 type;
+
+ for (type = 0; type < INPUT_ACL_N_TABLES; type++)
+ {
+ u32 * vec_tbl = am->classify_table_index_by_sw_if_index[type];
+ if (vec_len(vec_tbl)) {
+ for (if_idx = 0; if_idx < vec_len (vec_tbl); if_idx++)
+ {
+ if (vec_elt(vec_tbl, if_idx) == ~0 || sw_if_index != if_idx) {
+ continue;
+ }
+ acl[type] = vec_elt(vec_tbl, if_idx);
+ }
+ }
+ }
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO2(VL_API_CLASSIFY_TABLE_BY_INTERFACE_REPLY,
+ ({
+ rmp->sw_if_index = ntohl(sw_if_index);
+ rmp->l2_table_id = ntohl(acl[INPUT_ACL_TABLE_L2]);
+ rmp->ip4_table_id = ntohl(acl[INPUT_ACL_TABLE_IP4]);
+ rmp->ip6_table_id = ntohl(acl[INPUT_ACL_TABLE_IP6]);
+ }));
+ vec_free(acl);
+}
+
+static void vl_api_classify_table_info_t_handler (vl_api_classify_table_info_t *mp)
+{
+ unix_shared_memory_queue_t * q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ vl_api_classify_table_info_reply_t *rmp = 0;
+
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ u32 table_id = ntohl(mp->table_id);
+ vnet_classify_table_t * t;
+
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables) {
+ rmp = vl_msg_api_alloc_as_if_client(sizeof (*rmp) + t->match_n_vectors * sizeof (u32x4));
+ rmp->_vl_msg_id = ntohs (VL_API_CLASSIFY_TABLE_INFO_REPLY);
+ rmp->context = mp->context;
+ rmp->table_id = ntohl(table_id);
+ rmp->nbuckets = ntohl(t->nbuckets);
+ rmp->match_n_vectors = ntohl(t->match_n_vectors);
+ rmp->skip_n_vectors = ntohl(t->skip_n_vectors);
+ rmp->active_sessions = ntohl(t->active_elements);
+ rmp->next_table_index = ntohl(t->next_table_index);
+ rmp->miss_next_index = ntohl(t->miss_next_index);
+ rmp->mask_length = ntohl(t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy(rmp->mask, t->mask, t->match_n_vectors * sizeof(u32x4));
+ rmp->retval = 0;
+ break;
+ }
+ }));
+
+ if (rmp == 0) {
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs((VL_API_CLASSIFY_TABLE_INFO_REPLY));
+ rmp->context = mp->context;
+ rmp->retval = ntohl(VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND);
+ }
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_classify_session_details_t_handler (vl_api_classify_session_details_t * mp)
+{
+ clib_warning ("BUG");
+}
+
+static void send_classify_session_details (unix_shared_memory_queue_t * q,
+ u32 table_id,
+ u32 match_length,
+ vnet_classify_entry_t * e,
+ u32 context)
+{
+ vl_api_classify_session_details_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs(VL_API_CLASSIFY_SESSION_DETAILS);
+ rmp->context = context;
+ rmp->table_id = ntohl(table_id);
+ rmp->hit_next_index = ntohl(e->next_index);
+ rmp->advance = ntohl(e->advance);
+ rmp->opaque_index = ntohl(e->opaque_index);
+ rmp->match_length = ntohl(match_length);
+ clib_memcpy(rmp->match, e->key, match_length);
+
+ vl_msg_api_send_shmem (q, (u8 *)&rmp);
+}
+
+static void vl_api_classify_session_dump_t_handler (vl_api_classify_session_dump_t *mp)
+{
+ vnet_classify_main_t * cm = &vnet_classify_main;
+ unix_shared_memory_queue_t * q;
+
+ u32 table_id = ntohl(mp->table_id);
+ vnet_classify_table_t * t;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+
+ pool_foreach (t, cm->tables,
+ ({
+ if (table_id == t - cm->tables) {
+ vnet_classify_bucket_t * b;
+ vnet_classify_entry_t * v, * save_v;
+ int i, j, k;
+
+ for (i = 0; i < t->nbuckets; i++)
+ {
+ b = &t->buckets [i];
+ if (b->offset == 0)
+ continue;
+
+ save_v = vnet_classify_get_entry (t, b->offset);
+ for (j = 0; j < (1<<b->log2_pages); j++)
+ {
+ for (k = 0; k < t->entries_per_page; k++)
+ {
+ v = vnet_classify_entry_at_index (t, save_v, j*t->entries_per_page + k);
+ if (vnet_classify_entry_is_free (v))
+ continue;
+
+ send_classify_session_details(q, table_id,
+ t->match_n_vectors * sizeof (u32x4), v, mp->context);
+ }
+ }
+ }
+ break;
+ }
+ }));
+}
+
+#define BOUNCE_HANDLER(nn) \
+static void vl_api_##nn##_t_handler ( \
+ vl_api_##nn##_t *mp) \
+{ \
+ vpe_client_registration_t *reg; \
+ vpe_api_main_t * vam = &vpe_api_main; \
+ unix_shared_memory_queue_t * q; \
+ \
+ /* One registration only... */ \
+ pool_foreach(reg, vam->nn##_registrations, \
+ ({ \
+ q = vl_api_client_index_to_input_queue (reg->client_index); \
+ if (q) { \
+ /* \
+ * If the queue is stuffed, turf the msg and complain \
+ * It's unlikely that the intended recipient is \
+ * alive; avoid deadlock at all costs. \
+ */ \
+ if (q->cursize == q->maxsize) { \
+ clib_warning ("ERROR: receiver queue full, drop msg"); \
+ vl_msg_api_free (mp); \
+ return; \
+ } \
+ vl_msg_api_send_shmem (q, (u8 *)&mp); \
+ return; \
+ } \
+ })); \
+ vl_msg_api_free (mp); \
+}
+
+/*
+ * vpe_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../open-repo/vlib/memclnt_vlib.c:memclnt_process()
+ */
+
+static clib_error_t *
+vpe_api_hookup (vlib_main_t *vm)
+{
+ api_main_t * am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Manually register the sr tunnel add del msg, so we trace
+ * enough bytes to capture a typical segment list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_TUNNEL_ADD_DEL,
+ "sr_tunnel_add_del",
+ vl_api_sr_tunnel_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_tunnel_add_del_t_endian,
+ vl_api_sr_tunnel_add_del_t_print,
+ 256, 1);
+
+
+ /*
+ * Manually register the sr policy add del msg, so we trace
+ * enough bytes to capture a typical tunnel name list
+ */
+ vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD_DEL,
+ "sr_policy_add_del",
+ vl_api_sr_policy_add_del_t_handler,
+ vl_noop_handler,
+ vl_api_sr_policy_add_del_t_endian,
+ vl_api_sr_policy_add_del_t_print,
+ 256, 1);
+
+ /*
+ * Trace space for 8 MPLS encap labels, classifier mask+match
+ */
+ am->api_trace_cfg [VL_API_MPLS_ADD_DEL_ENCAP].size += 8 * sizeof(u32);
+ am->api_trace_cfg [VL_API_CLASSIFY_ADD_DEL_TABLE].size
+ += 5 * sizeof (u32x4);
+ am->api_trace_cfg [VL_API_CLASSIFY_ADD_DEL_SESSION].size
+ += 5 * sizeof (u32x4);
+ am->api_trace_cfg [VL_API_VXLAN_ADD_DEL_TUNNEL].size
+ += 16 * sizeof (u32);
+
+ /*
+ * Thread-safe API messages
+ */
+ am->is_mp_safe [VL_API_IP_ADD_DEL_ROUTE] = 1;
+ am->is_mp_safe [VL_API_GET_NODE_GRAPH] = 1;
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION(vpe_api_hookup);
+
+static clib_error_t *
+vpe_api_init (vlib_main_t *vm)
+{
+ vpe_api_main_t *am = &vpe_api_main;