2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <igmp/igmp_pkt.h>
21 vlib_buffer_append (vlib_buffer_t * b, uword l)
24 b->current_length += l;
27 static vlib_buffer_t *
28 igmp_pkt_get_buffer (igmp_pkt_build_t * bk)
30 vlib_buffer_free_list_t *fl;
35 vm = vlib_get_main ();
37 if (vlib_buffer_alloc (vm, &bi, 1) != 1)
40 b = vlib_get_buffer (vm, bi);
41 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
42 vlib_buffer_init_for_free_list (b, fl);
43 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
45 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
46 b->flags |= VLIB_BUFFER_IS_TRACED;
48 /* clear out stale data */
49 vnet_buffer (b)->sw_if_index[VLIB_RX] = ~0;
52 * save progress in the builder
54 vec_add1 (bk->buffers, bi);
55 bk->n_avail = vnet_sw_interface_get_mtu (vnet_get_main (),
56 bk->sw_if_index, VNET_MTU_IP4);
61 static vlib_buffer_t *
62 igmp_pkt_build_ip_header (igmp_pkt_build_t * bk,
63 igmp_msg_type_t msg_type,
64 const igmp_group_t * group)
70 b = igmp_pkt_get_buffer (bk);
75 ip4 = vlib_buffer_get_current (b);
76 memset (ip4, 0, sizeof (ip4_header_t));
77 ip4->ip_version_and_header_length = 0x46;
79 ip4->protocol = IP_PROTOCOL_IGMP;
82 ip4_src_address_for_packet (&ip4_main.lookup_main,
83 bk->sw_if_index, &ip4->src_address);
85 vlib_buffer_append (b, sizeof (*ip4));
86 bk->n_avail -= sizeof (*ip4);
91 ip4->dst_address.as_u32 = IGMP_MEMBERSHIP_REPORT_ADDRESS;
95 clib_memcpy (&ip4->dst_address, &group->key->ip4,
96 sizeof (ip4_address_t));
98 ip4->dst_address.as_u32 = IGMP_GENERAL_QUERY_ADDRESS;
102 /* add the router alert optnios */
103 option = vlib_buffer_get_current (b);
104 option[0] = 0x80 | 20; // IP4_ROUTER_ALERT_OPTION;
105 option[1] = 4; // length
106 option[2] = option[3] = 0;
108 vlib_buffer_append (b, 4);
114 static vlib_buffer_t *
115 igmp_pkt_build_report_v3 (igmp_pkt_build_report_t * br,
116 const igmp_group_t * group)
118 igmp_membership_report_v3_t *report;
121 b = igmp_pkt_build_ip_header (&br->base, IGMP_MSG_REPORT, group);
126 report = vlib_buffer_get_current (b);
127 report->header.type = IGMP_TYPE_membership_report_v3;
128 report->header.code = 0;
129 report->header.checksum = 0;
132 vlib_buffer_append (b, sizeof (igmp_membership_report_v3_t));
133 br->base.n_avail -= sizeof (igmp_membership_report_v3_t);
134 br->base.n_bytes += sizeof (igmp_membership_report_v3_t);
140 igmp_pkt_tx (igmp_pkt_build_t * bk)
142 const igmp_config_t *config;
149 vm = vlib_get_main ();
150 config = igmp_config_lookup (bk->sw_if_index);
151 f = vlib_get_frame_to_node (vm, ip4_rewrite_mcast_node.index);
152 to_next = vlib_frame_vector_args (f);
154 vec_foreach_index (ii, bk->buffers)
156 b = vlib_get_buffer (vm, bk->buffers[ii]);
157 vnet_buffer (b)->ip.adj_index[VLIB_TX] = config->adj_index;
158 to_next[ii] = bk->buffers[ii];
162 vlib_put_frame_to_node (vm, ip4_rewrite_mcast_node.index, f);
164 IGMP_DBG (" ..tx: %U", format_vnet_sw_if_index_name,
165 vnet_get_main (), bk->sw_if_index);
167 vec_free (bk->buffers);
171 static vlib_buffer_t *
172 igmp_pkt_build_report_get_active (igmp_pkt_build_report_t * br)
174 if (NULL == br->base.buffers)
177 return (vlib_get_buffer (vlib_get_main (),
178 br->base.buffers[vec_len (br->base.buffers) - 1]));
182 igmp_pkt_build_report_bake (igmp_pkt_build_report_t * br)
184 igmp_membership_report_v3_t *igmp;
188 b = igmp_pkt_build_report_get_active (br);
192 ip4 = vlib_buffer_get_current (b);
193 igmp = (igmp_membership_report_v3_t *) (((u32 *) ip4) + 6);
195 igmp->n_groups = clib_host_to_net_u16 (br->n_groups);
197 igmp->header.checksum =
198 ~ip_csum_fold (ip_incremental_checksum (0, igmp, br->base.n_bytes));
200 ip4->length = clib_host_to_net_u16 (b->current_length);
201 ip4->checksum = ip4_header_checksum (ip4);
203 br->base.n_bytes = br->base.n_avail = br->n_groups = 0;
207 igmp_pkt_report_v3_send (igmp_pkt_build_report_t * br)
209 if (NULL == br->base.buffers)
212 igmp_pkt_build_report_bake (br);
213 igmp_pkt_tx (&br->base);
217 igmp_pkt_report_v3_get_size (const igmp_group_t * group)
219 ASSERT (IGMP_FILTER_MODE_INCLUDE == group->router_filter_mode);
221 return ((hash_elts (group->igmp_src_by_key[IGMP_FILTER_MODE_INCLUDE]) *
222 sizeof (ip4_address_t)) + sizeof (igmp_membership_group_v3_t));
225 static igmp_membership_group_v3_t *
226 igmp_pkt_report_v3_append_group (igmp_pkt_build_report_t * br,
227 const ip46_address_t * grp,
228 igmp_membership_group_v3_type_t type)
230 igmp_membership_group_v3_t *igmp_group;
233 b = igmp_pkt_build_report_get_active (br);
235 if (br->base.n_avail < sizeof (igmp_membership_group_v3_t))
237 igmp_pkt_build_report_bake (br);
238 b = igmp_pkt_build_report_v3 (br, NULL);
242 br->base.n_avail -= sizeof (igmp_membership_group_v3_t);
243 br->base.n_bytes += sizeof (igmp_membership_group_v3_t);
247 igmp_group = vlib_buffer_get_current (b);
248 vlib_buffer_append (b, sizeof (igmp_membership_group_v3_t));
250 igmp_group->type = type;
251 igmp_group->n_aux_u32s = 0;
252 igmp_group->n_src_addresses = 0;
253 igmp_group->group_address.as_u32 = grp->ip4.as_u32;
260 " If the set of Group Records required in a Report does not fit within
261 * the size limit of a single Report message (as determined by the MTU
262 * of the network on which it will be sent), the Group Records are sent
263 * in as many Report messages as needed to report the entire set.
265 * If a single Group Record contains so many source addresses that it
266 * does not fit within the size limit of a single Report message, if its
267 * Type is not MODE_IS_EXCLUDE or CHANGE_TO_EXCLUDE_MODE, it is split
268 * into multiple Group Records, each containing a different subset of
269 * the source addresses and each sent in a separate Report message. If
270 * its Type is MODE_IS_EXCLUDE or CHANGE_TO_EXCLUDE_MODE, a single Group
271 * Record is sent, containing as many source addresses as can fit, and
272 * the remaining source addresses are not reported; though the choice of
273 * which sources to report is arbitrary, it is preferable to report the
274 * same set of sources in each subsequent report, rather than reporting
275 * different sources each time."
277 static igmp_membership_group_v3_t *
278 igmp_pkt_report_v3_append_src (igmp_pkt_build_report_t * br,
279 igmp_membership_group_v3_t * igmp_group,
280 const ip46_address_t * grp,
281 igmp_membership_group_v3_type_t type,
282 const ip46_address_t * src)
286 b = igmp_pkt_build_report_get_active (br);
288 if (br->base.n_avail < sizeof (ip4_address_t))
290 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
291 igmp_pkt_build_report_bake (br);
292 b = igmp_pkt_build_report_v3 (br, NULL);
295 igmp_group = igmp_pkt_report_v3_append_group (br, grp, type);
298 igmp_group->src_addresses[br->n_srcs].as_u32 = src->ip4.as_u32;
300 br->base.n_avail -= sizeof (ip4_address_t);
301 br->base.n_bytes += sizeof (ip4_address_t);
302 vlib_buffer_append (b, sizeof (ip4_address_t));
308 igmp_pkt_report_v3_add_report (igmp_pkt_build_report_t * br,
309 const ip46_address_t * grp,
310 const ip46_address_t * srcs,
311 igmp_membership_group_v3_type_t type)
313 igmp_membership_group_v3_t *igmp_group;
314 const ip46_address_t *s;
317 b = igmp_pkt_build_report_get_active (br);
321 b = igmp_pkt_build_report_v3 (br, NULL);
323 /* failed to allocate buffer */
327 igmp_group = igmp_pkt_report_v3_append_group (br, grp, type);
329 if (NULL == igmp_group)
335 igmp_group = igmp_pkt_report_v3_append_src(br, igmp_group,
337 if (NULL == igmp_group)
342 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
344 IGMP_DBG (" ..add-group: %U", format_ip46_address, grp, IP46_TYPE_IP4);
348 igmp_pkt_report_v3_add_group (igmp_pkt_build_report_t * br,
349 const igmp_group_t * group,
350 igmp_membership_group_v3_type_t type)
352 igmp_membership_group_v3_t *igmp_group;
356 b = igmp_pkt_build_report_get_active (br);
360 b = igmp_pkt_build_report_v3 (br, NULL);
362 /* failed to allocate buffer */
367 * if the group won't fit in a partially full buffer, start again
369 if ((0 != br->n_groups) &&
370 (igmp_pkt_report_v3_get_size (group) > br->base.n_avail))
372 igmp_pkt_build_report_bake (br);
373 b = igmp_pkt_build_report_v3 (br, NULL);
375 /* failed to allocate buffer */
379 igmp_group = igmp_pkt_report_v3_append_group (br, group->key, type);
382 FOR_EACH_SRC (src, group, IGMP_FILTER_MODE_INCLUDE,
384 igmp_group = igmp_pkt_report_v3_append_src(br, igmp_group,
387 if (NULL == igmp_group)
391 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
393 IGMP_DBG (" ..add-group: %U srcs:%d",
394 format_igmp_key, group->key,
395 hash_elts (group->igmp_src_by_key[IGMP_FILTER_MODE_INCLUDE]));
399 igmp_pkt_build_report_init (igmp_pkt_build_report_t * br, u32 sw_if_index)
401 memset (br, 0, sizeof (*br));
402 br->base.sw_if_index = sw_if_index;
405 static vlib_buffer_t *
406 igmp_pkt_build_query_get_active (igmp_pkt_build_query_t * bq)
408 if (NULL == bq->base.buffers)
411 return (vlib_get_buffer (vlib_get_main (),
412 bq->base.buffers[vec_len (bq->base.buffers) - 1]));
415 static vlib_buffer_t *
416 igmp_pkt_build_query_v3 (igmp_pkt_build_query_t * bq,
417 const igmp_group_t * group)
419 igmp_membership_query_v3_t *query;
422 b = igmp_pkt_build_ip_header (&bq->base, IGMP_MSG_QUERY, group);
427 query = vlib_buffer_get_current (b);
428 query->header.type = IGMP_TYPE_membership_query;
429 query->header.code = 0;
430 query->header.checksum = 0;
432 query->resv_s_qrv = 0;
435 query->group_address.as_u32 = group->key->ip4.as_u32;
437 query->group_address.as_u32 = 0;
439 vlib_buffer_append (b, sizeof (igmp_membership_query_v3_t));
440 bq->base.n_avail -= sizeof (igmp_membership_query_v3_t);
441 bq->base.n_bytes += sizeof (igmp_membership_query_v3_t);
447 igmp_pkt_query_v3_add_group (igmp_pkt_build_query_t * bq,
448 const igmp_group_t * group,
449 const ip46_address_t * srcs)
453 b = igmp_pkt_build_query_get_active (bq);
457 b = igmp_pkt_build_query_v3 (bq, group);
459 /* failed to allocate buffer */
465 igmp_membership_query_v3_t *query;
466 const ip46_address_t *src;
468 query = vlib_buffer_get_current (b);
470 vec_foreach (src, srcs)
472 query->src_addresses[bq->n_srcs++].as_u32 = src->ip4.as_u32;
474 vlib_buffer_append (b, sizeof (ip4_address_t));
475 bq->base.n_bytes += sizeof (ip4_address_t);
476 bq->base.n_avail += sizeof (ip4_address_t);
481 * general query and we're done
486 igmp_pkt_build_query_bake (igmp_pkt_build_query_t * bq)
488 igmp_membership_query_v3_t *igmp;
492 b = igmp_pkt_build_query_get_active (bq);
496 ip4 = vlib_buffer_get_current (b);
497 // account for options
498 igmp = (igmp_membership_query_v3_t *) (((u32 *) ip4) + 6);
500 igmp->n_src_addresses = clib_host_to_net_u16 (bq->n_srcs);
502 igmp->header.checksum =
503 ~ip_csum_fold (ip_incremental_checksum (0, igmp, bq->base.n_bytes));
505 ip4->length = clib_host_to_net_u16 (b->current_length);
506 ip4->checksum = ip4_header_checksum (ip4);
508 bq->base.n_bytes = bq->base.n_avail = bq->n_srcs = 0;
512 igmp_pkt_query_v3_send (igmp_pkt_build_query_t * bq)
514 if (NULL == bq->base.buffers)
517 igmp_pkt_build_query_bake (bq);
518 igmp_pkt_tx (&bq->base);
522 igmp_pkt_build_query_init (igmp_pkt_build_query_t * bq, u32 sw_if_index)
524 memset (bq, 0, sizeof (*bq));
525 bq->base.sw_if_index = sw_if_index;
529 * fd.io coding-style-patch-verification: ON
532 * eval: (c-set-style "gnu")