2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <igmp/igmp_pkt.h>
21 vlib_buffer_append (vlib_buffer_t * b, uword l)
24 b->current_length += l;
27 static vlib_buffer_t *
28 igmp_pkt_get_buffer (igmp_pkt_build_t * bk)
30 vlib_buffer_free_list_t *fl;
35 vm = vlib_get_main ();
37 if (vlib_buffer_alloc (vm, &bi, 1) != 1)
40 b = vlib_get_buffer (vm, bi);
41 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
42 vlib_buffer_init_for_free_list (b, fl);
43 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
45 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
46 b->flags |= VLIB_BUFFER_IS_TRACED;
48 /* clear out stale data */
49 vnet_buffer (b)->sw_if_index[VLIB_RX] = ~0;
52 * save progress in the builder
54 vec_add1 (bk->buffers, bi);
55 bk->n_avail = vnet_sw_interface_get_mtu (vnet_get_main (),
56 bk->sw_if_index, VNET_MTU_IP4);
61 static vlib_buffer_t *
62 igmp_pkt_build_ip_header (igmp_pkt_build_t * bk,
63 igmp_msg_type_t msg_type,
64 const igmp_group_t * group)
70 b = igmp_pkt_get_buffer (bk);
75 ip4 = vlib_buffer_get_current (b);
76 clib_memset (ip4, 0, sizeof (ip4_header_t));
77 ip4->ip_version_and_header_length = 0x46;
79 ip4->protocol = IP_PROTOCOL_IGMP;
82 ip4_src_address_for_packet (&ip4_main.lookup_main,
83 bk->sw_if_index, &ip4->src_address);
85 vlib_buffer_append (b, sizeof (*ip4));
86 bk->n_avail -= sizeof (*ip4);
91 ip4->dst_address.as_u32 = IGMP_MEMBERSHIP_REPORT_ADDRESS;
95 clib_memcpy_fast (&ip4->dst_address, &group->key->ip4,
96 sizeof (ip4_address_t));
98 ip4->dst_address.as_u32 = IGMP_GENERAL_QUERY_ADDRESS;
102 /* add the router alert options */
103 option = vlib_buffer_get_current (b);
104 option[0] = 0x80 | 20; // IP4_ROUTER_ALERT_OPTION;
105 option[1] = 4; // length
106 option[2] = option[3] = 0;
108 vlib_buffer_append (b, 4);
114 static vlib_buffer_t *
115 igmp_pkt_build_report_v3 (igmp_pkt_build_report_t * br,
116 const igmp_group_t * group)
118 igmp_membership_report_v3_t *report;
121 b = igmp_pkt_build_ip_header (&br->base, IGMP_MSG_REPORT, group);
126 report = vlib_buffer_get_current (b);
127 report->header.type = IGMP_TYPE_membership_report_v3;
128 report->header.code = 0;
129 report->header.checksum = 0;
132 vlib_buffer_append (b, sizeof (igmp_membership_report_v3_t));
133 br->base.n_avail -= sizeof (igmp_membership_report_v3_t);
134 br->base.n_bytes += sizeof (igmp_membership_report_v3_t);
140 igmp_pkt_tx (igmp_pkt_build_t * bk)
142 const igmp_config_t *config;
149 vm = vlib_get_main ();
150 config = igmp_config_lookup (bk->sw_if_index);
155 f = vlib_get_frame_to_node (vm, ip4_rewrite_mcast_node.index);
156 to_next = vlib_frame_vector_args (f);
158 vec_foreach_index (ii, bk->buffers)
160 b = vlib_get_buffer (vm, bk->buffers[ii]);
161 vnet_buffer (b)->ip.adj_index[VLIB_TX] = config->adj_index;
162 to_next[ii] = bk->buffers[ii];
166 vlib_put_frame_to_node (vm, ip4_rewrite_mcast_node.index, f);
168 IGMP_DBG (" ..tx: %U", format_vnet_sw_if_index_name,
169 vnet_get_main (), bk->sw_if_index);
171 vec_free (bk->buffers);
175 static vlib_buffer_t *
176 igmp_pkt_build_report_get_active (igmp_pkt_build_report_t * br)
178 if (NULL == br->base.buffers)
181 return (vlib_get_buffer (vlib_get_main (),
182 br->base.buffers[vec_len (br->base.buffers) - 1]));
186 igmp_pkt_build_report_bake (igmp_pkt_build_report_t * br)
188 igmp_membership_report_v3_t *igmp;
192 b = igmp_pkt_build_report_get_active (br);
196 ip4 = vlib_buffer_get_current (b);
197 igmp = (igmp_membership_report_v3_t *) (((u32 *) ip4) + 6);
199 igmp->n_groups = clib_host_to_net_u16 (br->n_groups);
201 igmp->header.checksum =
202 ~ip_csum_fold (ip_incremental_checksum (0, igmp, br->base.n_bytes));
204 ip4->length = clib_host_to_net_u16 (b->current_length);
205 ip4->checksum = ip4_header_checksum (ip4);
207 br->base.n_bytes = br->base.n_avail = br->n_groups = 0;
211 igmp_pkt_report_v3_send (igmp_pkt_build_report_t * br)
213 if (NULL == br->base.buffers)
216 igmp_pkt_build_report_bake (br);
217 igmp_pkt_tx (&br->base);
221 igmp_pkt_report_v3_get_size (const igmp_group_t * group)
223 ASSERT (IGMP_FILTER_MODE_INCLUDE == group->router_filter_mode);
225 return ((hash_elts (group->igmp_src_by_key[IGMP_FILTER_MODE_INCLUDE]) *
226 sizeof (ip4_address_t)) + sizeof (igmp_membership_group_v3_t));
229 static igmp_membership_group_v3_t *
230 igmp_pkt_report_v3_append_group (igmp_pkt_build_report_t * br,
231 const ip46_address_t * grp,
232 igmp_membership_group_v3_type_t type)
234 igmp_membership_group_v3_t *igmp_group;
237 b = igmp_pkt_build_report_get_active (br);
239 if (br->base.n_avail < sizeof (igmp_membership_group_v3_t))
241 igmp_pkt_build_report_bake (br);
242 b = igmp_pkt_build_report_v3 (br, NULL);
246 br->base.n_avail -= sizeof (igmp_membership_group_v3_t);
247 br->base.n_bytes += sizeof (igmp_membership_group_v3_t);
251 igmp_group = vlib_buffer_get_current (b);
252 vlib_buffer_append (b, sizeof (igmp_membership_group_v3_t));
254 igmp_group->type = type;
255 igmp_group->n_aux_u32s = 0;
256 igmp_group->n_src_addresses = 0;
257 igmp_group->group_address.as_u32 = grp->ip4.as_u32;
264 " If the set of Group Records required in a Report does not fit within
265 * the size limit of a single Report message (as determined by the MTU
266 * of the network on which it will be sent), the Group Records are sent
267 * in as many Report messages as needed to report the entire set.
269 * If a single Group Record contains so many source addresses that it
270 * does not fit within the size limit of a single Report message, if its
271 * Type is not MODE_IS_EXCLUDE or CHANGE_TO_EXCLUDE_MODE, it is split
272 * into multiple Group Records, each containing a different subset of
273 * the source addresses and each sent in a separate Report message. If
274 * its Type is MODE_IS_EXCLUDE or CHANGE_TO_EXCLUDE_MODE, a single Group
275 * Record is sent, containing as many source addresses as can fit, and
276 * the remaining source addresses are not reported; though the choice of
277 * which sources to report is arbitrary, it is preferable to report the
278 * same set of sources in each subsequent report, rather than reporting
279 * different sources each time."
281 static igmp_membership_group_v3_t *
282 igmp_pkt_report_v3_append_src (igmp_pkt_build_report_t * br,
283 igmp_membership_group_v3_t * igmp_group,
284 const ip46_address_t * grp,
285 igmp_membership_group_v3_type_t type,
286 const ip46_address_t * src)
290 b = igmp_pkt_build_report_get_active (br);
292 if (br->base.n_avail < sizeof (ip4_address_t))
294 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
295 igmp_pkt_build_report_bake (br);
296 b = igmp_pkt_build_report_v3 (br, NULL);
299 igmp_group = igmp_pkt_report_v3_append_group (br, grp, type);
302 igmp_group->src_addresses[br->n_srcs].as_u32 = src->ip4.as_u32;
304 br->base.n_avail -= sizeof (ip4_address_t);
305 br->base.n_bytes += sizeof (ip4_address_t);
306 vlib_buffer_append (b, sizeof (ip4_address_t));
312 igmp_pkt_report_v3_add_report (igmp_pkt_build_report_t * br,
313 const ip46_address_t * grp,
314 const ip46_address_t * srcs,
315 igmp_membership_group_v3_type_t type)
317 igmp_membership_group_v3_t *igmp_group;
318 const ip46_address_t *s;
321 b = igmp_pkt_build_report_get_active (br);
325 b = igmp_pkt_build_report_v3 (br, NULL);
327 /* failed to allocate buffer */
331 igmp_group = igmp_pkt_report_v3_append_group (br, grp, type);
333 if (NULL == igmp_group)
339 igmp_group = igmp_pkt_report_v3_append_src(br, igmp_group,
341 if (NULL == igmp_group)
346 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
348 IGMP_DBG (" ..add-group: %U", format_ip46_address, grp, IP46_TYPE_IP4);
352 igmp_pkt_report_v3_add_group (igmp_pkt_build_report_t * br,
353 const igmp_group_t * group,
354 igmp_membership_group_v3_type_t type)
356 igmp_membership_group_v3_t *igmp_group;
360 b = igmp_pkt_build_report_get_active (br);
364 b = igmp_pkt_build_report_v3 (br, NULL);
366 /* failed to allocate buffer */
371 * if the group won't fit in a partially full buffer, start again
373 if ((0 != br->n_groups) &&
374 (igmp_pkt_report_v3_get_size (group) > br->base.n_avail))
376 igmp_pkt_build_report_bake (br);
377 b = igmp_pkt_build_report_v3 (br, NULL);
379 /* failed to allocate buffer */
383 igmp_group = igmp_pkt_report_v3_append_group (br, group->key, type);
386 FOR_EACH_SRC (src, group, IGMP_FILTER_MODE_INCLUDE,
388 igmp_group = igmp_pkt_report_v3_append_src(br, igmp_group,
391 if (NULL == igmp_group)
395 igmp_group->n_src_addresses = clib_host_to_net_u16 (br->n_srcs);
397 IGMP_DBG (" ..add-group: %U srcs:%d",
398 format_igmp_key, group->key,
399 hash_elts (group->igmp_src_by_key[IGMP_FILTER_MODE_INCLUDE]));
403 igmp_pkt_build_report_init (igmp_pkt_build_report_t * br, u32 sw_if_index)
405 clib_memset (br, 0, sizeof (*br));
406 br->base.sw_if_index = sw_if_index;
409 static vlib_buffer_t *
410 igmp_pkt_build_query_get_active (igmp_pkt_build_query_t * bq)
412 if (NULL == bq->base.buffers)
415 return (vlib_get_buffer (vlib_get_main (),
416 bq->base.buffers[vec_len (bq->base.buffers) - 1]));
419 static vlib_buffer_t *
420 igmp_pkt_build_query_v3 (igmp_pkt_build_query_t * bq,
421 const igmp_group_t * group)
423 igmp_membership_query_v3_t *query;
426 b = igmp_pkt_build_ip_header (&bq->base, IGMP_MSG_QUERY, group);
431 query = vlib_buffer_get_current (b);
432 query->header.type = IGMP_TYPE_membership_query;
433 query->header.code = 0;
434 query->header.checksum = 0;
436 query->resv_s_qrv = 0;
439 query->group_address.as_u32 = group->key->ip4.as_u32;
441 query->group_address.as_u32 = 0;
443 vlib_buffer_append (b, sizeof (igmp_membership_query_v3_t));
444 bq->base.n_avail -= sizeof (igmp_membership_query_v3_t);
445 bq->base.n_bytes += sizeof (igmp_membership_query_v3_t);
451 igmp_pkt_query_v3_add_group (igmp_pkt_build_query_t * bq,
452 const igmp_group_t * group,
453 const ip46_address_t * srcs)
457 b = igmp_pkt_build_query_get_active (bq);
461 b = igmp_pkt_build_query_v3 (bq, group);
463 /* failed to allocate buffer */
469 igmp_membership_query_v3_t *query;
470 const ip46_address_t *src;
472 query = vlib_buffer_get_current (b);
474 vec_foreach (src, srcs)
476 query->src_addresses[bq->n_srcs++].as_u32 = src->ip4.as_u32;
478 vlib_buffer_append (b, sizeof (ip4_address_t));
479 bq->base.n_bytes += sizeof (ip4_address_t);
480 bq->base.n_avail += sizeof (ip4_address_t);
485 * general query and we're done
490 igmp_pkt_build_query_bake (igmp_pkt_build_query_t * bq)
492 igmp_membership_query_v3_t *igmp;
496 b = igmp_pkt_build_query_get_active (bq);
500 ip4 = vlib_buffer_get_current (b);
501 // account for options
502 igmp = (igmp_membership_query_v3_t *) (((u32 *) ip4) + 6);
504 igmp->n_src_addresses = clib_host_to_net_u16 (bq->n_srcs);
506 igmp->header.checksum =
507 ~ip_csum_fold (ip_incremental_checksum (0, igmp, bq->base.n_bytes));
509 ip4->length = clib_host_to_net_u16 (b->current_length);
510 ip4->checksum = ip4_header_checksum (ip4);
512 bq->base.n_bytes = bq->base.n_avail = bq->n_srcs = 0;
516 igmp_pkt_query_v3_send (igmp_pkt_build_query_t * bq)
518 if (NULL == bq->base.buffers)
521 igmp_pkt_build_query_bake (bq);
522 igmp_pkt_tx (&bq->base);
526 igmp_pkt_build_query_init (igmp_pkt_build_query_t * bq, u32 sw_if_index)
528 clib_memset (bq, 0, sizeof (*bq));
529 bq->base.sw_if_index = sw_if_index;
533 * fd.io coding-style-patch-verification: ON
536 * eval: (c-set-style "gnu")