2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * node.c: srp packet processing
18 * Copyright (c) 2011 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/ip/ip_packet.h> /* for ip_csum_fold */
42 #include <vnet/srp/srp.h>
50 static u8 * format_srp_input_trace (u8 * s, va_list * va)
52 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
53 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
54 srp_input_trace_t * t = va_arg (*va, srp_input_trace_t *);
56 s = format (s, "%U", format_srp_header, t->packet_data);
63 SRP_INPUT_NEXT_ETHERNET_INPUT,
64 SRP_INPUT_NEXT_CONTROL,
72 } srp_input_disposition_t;
74 static srp_input_disposition_t srp_input_disposition_by_mode[8] = {
75 [SRP_MODE_reserved0] = {
76 .next_index = SRP_INPUT_NEXT_ERROR,
77 .error = SRP_ERROR_UNKNOWN_MODE,
79 [SRP_MODE_reserved1] = {
80 .next_index = SRP_INPUT_NEXT_ERROR,
81 .error = SRP_ERROR_UNKNOWN_MODE,
83 [SRP_MODE_reserved2] = {
84 .next_index = SRP_INPUT_NEXT_ERROR,
85 .error = SRP_ERROR_UNKNOWN_MODE,
87 [SRP_MODE_reserved3] = {
88 .next_index = SRP_INPUT_NEXT_ERROR,
89 .error = SRP_ERROR_UNKNOWN_MODE,
91 [SRP_MODE_keep_alive] = {
92 .next_index = SRP_INPUT_NEXT_ERROR,
93 .error = SRP_ERROR_KEEP_ALIVE_DROPPED,
96 .next_index = SRP_INPUT_NEXT_ETHERNET_INPUT,
97 .buffer_advance = sizeof (srp_header_t),
99 [SRP_MODE_control_pass_to_host] = {
100 .next_index = SRP_INPUT_NEXT_CONTROL,
102 [SRP_MODE_control_locally_buffered_for_host] = {
103 .next_index = SRP_INPUT_NEXT_CONTROL,
108 srp_input (vlib_main_t * vm,
109 vlib_node_runtime_t * node,
110 vlib_frame_t * from_frame)
112 vnet_main_t * vnm = vnet_get_main();
113 srp_main_t * sm = &srp_main;
114 u32 n_left_from, next_index, * from, * to_next;
116 from = vlib_frame_vector_args (from_frame);
117 n_left_from = from_frame->n_vectors;
119 if (node->flags & VLIB_NODE_FLAG_TRACE)
120 vlib_trace_frame_buffers_only (vm, node,
124 sizeof (srp_input_trace_t));
126 next_index = node->cached_next_index;
128 while (n_left_from > 0)
132 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
134 while (n_left_from >= 4 && n_left_to_next >= 2)
136 u32 bi0, bi1, sw_if_index0, sw_if_index1;
137 vlib_buffer_t * b0, * b1;
138 u8 next0, next1, error0, error1;
139 srp_header_t * s0, * s1;
140 srp_input_disposition_t * d0, * d1;
141 vnet_hw_interface_t * hi0, * hi1;
142 srp_interface_t * si0, * si1;
144 /* Prefetch next iteration. */
146 vlib_buffer_t * b2, * b3;
148 b2 = vlib_get_buffer (vm, from[2]);
149 b3 = vlib_get_buffer (vm, from[3]);
151 vlib_prefetch_buffer_header (b2, LOAD);
152 vlib_prefetch_buffer_header (b3, LOAD);
154 CLIB_PREFETCH (b2->data, sizeof (srp_header_t), LOAD);
155 CLIB_PREFETCH (b3->data, sizeof (srp_header_t), LOAD);
167 b0 = vlib_get_buffer (vm, bi0);
168 b1 = vlib_get_buffer (vm, bi1);
170 s0 = (void *) (b0->data + b0->current_data);
171 s1 = (void *) (b1->data + b1->current_data);
173 /* Data packets are always assigned to side A (outer ring) interface. */
174 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
175 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
177 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
178 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
180 si0 = pool_elt_at_index (sm->interface_pool, hi0->hw_instance);
181 si1 = pool_elt_at_index (sm->interface_pool, hi1->hw_instance);
183 sw_if_index0 = (s0->mode == SRP_MODE_data
184 ? si0->rings[SRP_RING_OUTER].sw_if_index
186 sw_if_index1 = (s1->mode == SRP_MODE_data
187 ? si1->rings[SRP_RING_OUTER].sw_if_index
190 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
191 vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1;
193 d0 = srp_input_disposition_by_mode + s0->mode;
194 d1 = srp_input_disposition_by_mode + s1->mode;
196 next0 = d0->next_index;
197 next1 = d1->next_index;
202 vlib_buffer_advance (b0, d0->buffer_advance);
203 vlib_buffer_advance (b1, d1->buffer_advance);
205 b0->error = node->errors[error0];
206 b1->error = node->errors[error1];
208 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
209 to_next, n_left_to_next,
210 bi0, bi1, next0, next1);
213 while (n_left_from > 0 && n_left_to_next > 0)
215 u32 bi0, sw_if_index0;
219 srp_input_disposition_t * d0;
220 srp_interface_t * si0;
221 vnet_hw_interface_t * hi0;
230 b0 = vlib_get_buffer (vm, bi0);
232 s0 = (void *) (b0->data + b0->current_data);
234 /* Data packets are always assigned to side A (outer ring) interface. */
235 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
237 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
239 si0 = pool_elt_at_index (sm->interface_pool, hi0->hw_instance);
241 sw_if_index0 = (s0->mode == SRP_MODE_data
242 ? si0->rings[SRP_RING_OUTER].sw_if_index
245 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
247 d0 = srp_input_disposition_by_mode + s0->mode;
249 next0 = d0->next_index;
253 vlib_buffer_advance (b0, d0->buffer_advance);
255 b0->error = node->errors[error0];
257 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
258 to_next, n_left_to_next,
262 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
265 return from_frame->n_vectors;
268 static char * srp_error_strings[] = {
274 static vlib_node_registration_t srp_input_node = {
275 .function = srp_input,
277 /* Takes a vector of packets. */
278 .vector_size = sizeof (u32),
280 .n_errors = SRP_N_ERROR,
281 .error_strings = srp_error_strings,
283 .n_next_nodes = SRP_INPUT_N_NEXT,
285 [SRP_INPUT_NEXT_ERROR] = "error-drop",
286 [SRP_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
287 [SRP_INPUT_NEXT_CONTROL] = "srp-control",
290 .format_buffer = format_srp_header_with_length,
291 .format_trace = format_srp_input_trace,
292 .unformat_buffer = unformat_srp_header,
296 srp_topology_packet (vlib_main_t * vm, u32 sw_if_index, u8 ** contents)
298 vnet_main_t * vnm = vnet_get_main();
299 vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
300 srp_topology_header_t * t;
301 srp_topology_mac_binding_t * mb;
304 t = (void *) *contents;
306 nb = clib_net_to_host_u16 (t->n_bytes_of_data_that_follows);
307 nmb = (nb - sizeof (t->originator_address)) / sizeof (mb[0]);
308 if (vec_len (*contents) < sizeof (t[0]) + nmb * sizeof (mb[0]))
309 return SRP_ERROR_TOPOLOGY_BAD_LENGTH;
311 /* Fill in our source MAC address. */
312 clib_memcpy_fast (t->ethernet.src_address, hi->hw_address, vec_len (hi->hw_address));
314 /* Make space for our MAC binding. */
315 vec_resize (*contents, sizeof (srp_topology_mac_binding_t));
316 t = (void *) *contents;
317 t->n_bytes_of_data_that_follows = clib_host_to_net_u16 (nb + sizeof (mb[0]));
319 mb = t->bindings + nmb;
322 ((t->srp.is_inner_ring ? SRP_TOPOLOGY_MAC_BINDING_FLAG_IS_INNER_RING : 0)
323 | (/* is wrapped FIXME */ 0));
324 clib_memcpy_fast (mb->address, hi->hw_address, vec_len (hi->hw_address));
327 = ~ip_csum_fold (ip_incremental_checksum (0, &t->control,
328 vec_len (*contents) - STRUCT_OFFSET_OF (srp_generic_control_header_t, control)));
336 if (vlib_buffer_add_data (vm, /* buffer to append to */ &bi,
337 *contents, vec_len (*contents)))
339 /* complete or partial buffer allocation failure */
341 vlib_buffer_free (vm, &bi, 1);
342 return SRP_ERROR_CONTROL_PACKETS_PROCESSED;
344 b = vlib_get_buffer (vm, bi);
345 vnet_buffer (b)->sw_if_index[VLIB_RX] = vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
346 f = vlib_get_frame_to_node (vm, hi->output_node_index);
347 to_next = vlib_frame_vector_args (f);
350 vlib_put_frame_to_node (vm, hi->output_node_index, f);
353 return SRP_ERROR_CONTROL_PACKETS_PROCESSED;
356 typedef uword (srp_control_handler_function_t) (vlib_main_t * vm,
361 srp_control_input (vlib_main_t * vm,
362 vlib_node_runtime_t * node,
363 vlib_frame_t * from_frame)
365 u32 n_left_from, next_index, * from, * to_next;
366 vlib_node_runtime_t * error_node;
367 static u8 * contents;
369 error_node = vlib_node_get_runtime (vm, srp_input_node.index);
371 from = vlib_frame_vector_args (from_frame);
372 n_left_from = from_frame->n_vectors;
374 if (node->flags & VLIB_NODE_FLAG_TRACE)
375 vlib_trace_frame_buffers_only (vm, node,
379 sizeof (srp_input_trace_t));
381 next_index = node->cached_next_index;
383 while (n_left_from > 0)
387 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
389 while (n_left_from > 0 && n_left_to_next > 0)
391 u32 bi0, l2_len0, l3_len0;
394 srp_generic_control_header_t * s0;
403 b0 = vlib_get_buffer (vm, bi0);
405 s0 = (void *) (b0->data + b0->current_data);
406 l2_len0 = vlib_buffer_length_in_chain (vm, b0);
407 l3_len0 = l2_len0 - STRUCT_OFFSET_OF (srp_generic_control_header_t, control);
409 error0 = SRP_ERROR_CONTROL_PACKETS_PROCESSED;
411 error0 = s0->control.version != 0 ? SRP_ERROR_CONTROL_VERSION_NON_ZERO : error0;
414 u16 save0 = s0->control.checksum;
416 s0->control.checksum = 0;
417 computed0 = ~ip_csum_fold (ip_incremental_checksum (0, &s0->control, l3_len0));
418 error0 = save0 != computed0 ? SRP_ERROR_CONTROL_BAD_CHECKSUM : error0;
421 if (error0 == SRP_ERROR_CONTROL_PACKETS_PROCESSED)
423 static srp_control_handler_function_t * t[SRP_N_CONTROL_PACKET_TYPE] = {
424 [SRP_CONTROL_PACKET_TYPE_topology] = srp_topology_packet,
426 srp_control_handler_function_t * f;
429 if (s0->control.type < ARRAY_LEN (t))
430 f = t[s0->control.type];
434 vec_validate (contents, l2_len0 - 1);
435 vlib_buffer_contents (vm, bi0, contents);
436 error0 = f (vm, vnet_buffer (b0)->sw_if_index[VLIB_RX], &contents);
439 error0 = SRP_ERROR_UNKNOWN_CONTROL;
442 b0->error = error_node->errors[error0];
445 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
446 to_next, n_left_to_next,
450 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453 return from_frame->n_vectors;
456 static vlib_node_registration_t srp_control_input_node = {
457 .function = srp_control_input,
458 .name = "srp-control",
459 /* Takes a vector of packets. */
460 .vector_size = sizeof (u32),
467 .format_buffer = format_srp_header_with_length,
468 .format_trace = format_srp_input_trace,
469 .unformat_buffer = unformat_srp_header,
472 static u8 * format_srp_ips_request_type (u8 * s, va_list * args)
474 u32 x = va_arg (*args, u32);
478 #define _(f,n) case SRP_IPS_REQUEST_##f: t = #f; break;
479 foreach_srp_ips_request_type
482 return format (s, "unknown 0x%x", x);
484 return format (s, "%U", format_c_identifier, t);
487 static u8 * format_srp_ips_status (u8 * s, va_list * args)
489 u32 x = va_arg (*args, u32);
493 #define _(f,n) case SRP_IPS_STATUS_##f: t = #f; break;
494 foreach_srp_ips_status
497 return format (s, "unknown 0x%x", x);
499 return format (s, "%U", format_c_identifier, t);
502 static u8 * format_srp_ips_state (u8 * s, va_list * args)
504 u32 x = va_arg (*args, u32);
508 #define _(f) case SRP_IPS_STATE_##f: t = #f; break;
509 foreach_srp_ips_state
512 return format (s, "unknown 0x%x", x);
514 return format (s, "%U", format_c_identifier, t);
517 static u8 * format_srp_ring (u8 * s, va_list * args)
519 u32 ring = va_arg (*args, u32);
520 return format (s, "%s", ring == SRP_RING_INNER ? "inner" : "outer");
523 static u8 * format_srp_ips_header (u8 * s, va_list * args)
525 srp_ips_header_t * h = va_arg (*args, srp_ips_header_t *);
527 s = format (s, "%U, %U, %U, %s-path",
528 format_srp_ips_request_type, h->request_type,
529 format_ethernet_address, h->originator_address,
530 format_srp_ips_status, h->status,
531 h->is_long_path ? "long" : "short");
536 static u8 * format_srp_interface (u8 * s, va_list * args)
538 srp_interface_t * si = va_arg (*args, srp_interface_t *);
539 srp_interface_ring_t * ir;
541 s = format (s, "address %U, IPS state %U",
542 format_ethernet_address, si->my_address,
543 format_srp_ips_state, si->current_ips_state);
544 for (ir = si->rings; ir < si->rings + SRP_N_RING; ir++)
545 if (ir->rx_neighbor_address_valid)
546 s = format (s, ", %U neighbor %U",
547 format_srp_ring, ir->ring,
548 format_ethernet_address, ir->rx_neighbor_address);
553 u8 * format_srp_device (u8 * s, va_list * args)
555 u32 hw_if_index = va_arg (*args, u32);
556 CLIB_UNUSED (int verbose) = va_arg (*args, int);
557 vnet_main_t * vnm = vnet_get_main();
558 srp_main_t * sm = &srp_main;
559 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
560 srp_interface_t * si = pool_elt_at_index (sm->interface_pool, hi->hw_instance);
561 return format (s, "%U", format_srp_interface, si);
564 always_inline srp_interface_t *
565 srp_get_interface (u32 sw_if_index, srp_ring_type_t * ring)
567 vnet_main_t * vnm = vnet_get_main();
568 srp_main_t * sm = &srp_main;
569 vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
570 srp_interface_t * si;
572 ASSERT (hi->hw_class_index == srp_hw_interface_class.index);
573 si = pool_elt_at_index (sm->interface_pool, hi->hw_instance);
575 ASSERT (si->rings[SRP_RING_INNER].hw_if_index == hi->hw_if_index
576 || si->rings[SRP_RING_OUTER].hw_if_index == hi->hw_if_index);
579 (hi->hw_if_index == si->rings[SRP_RING_INNER].hw_if_index
586 static void init_ips_packet (srp_interface_t * si,
587 srp_ring_type_t tx_ring,
588 srp_ips_header_t * i)
590 clib_memset (i, 0, sizeof (i[0]));
593 i->srp.is_inner_ring = tx_ring;
595 i->srp.mode = SRP_MODE_control_locally_buffered_for_host;
596 srp_header_compute_parity (&i->srp);
598 clib_memcpy_fast (&i->ethernet.src_address, &si->my_address, sizeof (si->my_address));
599 i->ethernet.type = clib_host_to_net_u16 (ETHERNET_TYPE_SRP_CONTROL);
601 /* Checksum will be filled in later. */
602 i->control.version = 0;
603 i->control.type = SRP_CONTROL_PACKET_TYPE_ips;
604 i->control.ttl = 255;
606 clib_memcpy_fast (&i->originator_address, &si->my_address, sizeof (si->my_address));
609 static void tx_ips_packet (srp_interface_t * si,
610 srp_ring_type_t tx_ring,
611 srp_ips_header_t * i)
613 srp_main_t * sm = &srp_main;
614 vnet_main_t * vnm = vnet_get_main();
615 vlib_main_t * vm = sm->vlib_main;
616 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, si->rings[tx_ring].hw_if_index);
619 u32 * to_next, bi = ~0;
621 if (! vnet_sw_interface_is_admin_up (vnm, hi->sw_if_index))
623 if (hi->hw_class_index != srp_hw_interface_class.index)
627 = ~ip_csum_fold (ip_incremental_checksum (0, &i->control,
628 sizeof (i[0]) - STRUCT_OFFSET_OF (srp_ips_header_t, control)));
630 if (vlib_buffer_add_data (vm, /* buffer to append to */ &bi, i,
633 /* complete or partial allocation failure */
635 vlib_buffer_free (vm, &bi, 1);
641 clib_warning ("%U %U",
642 format_vnet_sw_if_index_name, vnm, hi->sw_if_index,
643 format_srp_ips_header, i);
645 b = vlib_get_buffer (vm, bi);
646 vnet_buffer (b)->sw_if_index[VLIB_RX] = vnet_buffer (b)->sw_if_index[VLIB_TX] = hi->sw_if_index;
648 f = vlib_get_frame_to_node (vm, hi->output_node_index);
649 to_next = vlib_frame_vector_args (f);
652 vlib_put_frame_to_node (vm, hi->output_node_index, f);
655 static int requests_switch (srp_ips_request_type_t r)
658 [SRP_IPS_REQUEST_forced_switch] = 1,
659 [SRP_IPS_REQUEST_manual_switch] = 1,
660 [SRP_IPS_REQUEST_signal_fail] = 1,
661 [SRP_IPS_REQUEST_signal_degrade] = 1,
663 return (int) r < ARRAY_LEN (t) ? t[r] : 0;
666 /* Called when an IPS control packet is received on given interface. */
667 void srp_ips_rx_packet (u32 sw_if_index, srp_ips_header_t * h)
669 vnet_main_t * vnm = vnet_get_main();
670 vlib_main_t * vm = srp_main.vlib_main;
671 srp_ring_type_t rx_ring;
672 srp_interface_t * si = srp_get_interface (sw_if_index, &rx_ring);
673 srp_interface_ring_t * ir = &si->rings[rx_ring];
677 clib_warning ("%U %U %U",
678 format_time_interval, "h:m:s:u", vlib_time_now (vm),
679 format_vnet_sw_if_index_name, vnm, sw_if_index,
680 format_srp_ips_header, h);
682 /* Ignore self-generated IPS packets. */
683 if (! memcmp (h->originator_address, si->my_address, sizeof (h->originator_address)))
686 /* Learn neighbor address from short path messages. */
687 if (! h->is_long_path)
689 if (ir->rx_neighbor_address_valid
690 && memcmp (ir->rx_neighbor_address, h->originator_address, sizeof (ir->rx_neighbor_address)))
694 ir->rx_neighbor_address_valid = 1;
695 clib_memcpy_fast (ir->rx_neighbor_address, h->originator_address, sizeof (ir->rx_neighbor_address));
698 switch (si->current_ips_state)
700 case SRP_IPS_STATE_idle:
701 /* Received {REQ,NEIGHBOR,W,S} in idle state: wrap. */
702 if (requests_switch (h->request_type)
704 && h->status == SRP_IPS_STATUS_wrapped)
706 srp_ips_header_t to_tx[2];
708 si->current_ips_state = SRP_IPS_STATE_wrapped;
709 si->hw_wrap_function (si->rings[SRP_SIDE_A].hw_if_index, /* enable_wrap */ 1);
710 si->hw_wrap_function (si->rings[SRP_SIDE_B].hw_if_index, /* enable_wrap */ 1);
712 init_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
713 to_tx[0].request_type = SRP_IPS_REQUEST_idle;
714 to_tx[0].status = SRP_IPS_STATUS_wrapped;
715 to_tx[0].is_long_path = 0;
716 tx_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
718 init_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
719 to_tx[1].request_type = h->request_type;
720 to_tx[1].status = SRP_IPS_STATUS_wrapped;
721 to_tx[1].is_long_path = 1;
722 tx_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
726 case SRP_IPS_STATE_wrapped:
727 if (! h->is_long_path
728 && h->request_type == SRP_IPS_REQUEST_idle
729 && h->status == SRP_IPS_STATUS_idle)
731 si->current_ips_state = SRP_IPS_STATE_idle;
732 si->hw_wrap_function (si->rings[SRP_SIDE_A].hw_if_index, /* enable_wrap */ 0);
733 si->hw_wrap_function (si->rings[SRP_SIDE_B].hw_if_index, /* enable_wrap */ 0);
737 case SRP_IPS_STATE_pass_thru:
749 /* Preform local IPS request on given interface. */
750 void srp_ips_local_request (u32 sw_if_index, srp_ips_request_type_t request)
752 vnet_main_t * vnm = vnet_get_main();
753 srp_main_t * sm = &srp_main;
754 srp_ring_type_t rx_ring;
755 srp_interface_t * si = srp_get_interface (sw_if_index, &rx_ring);
756 srp_interface_ring_t * ir = &si->rings[rx_ring];
758 if (request == SRP_IPS_REQUEST_wait_to_restore)
760 if (si->current_ips_state != SRP_IPS_STATE_wrapped)
762 if (! ir->waiting_to_restore)
764 ir->wait_to_restore_start_time = vlib_time_now (sm->vlib_main);
765 ir->waiting_to_restore = 1;
770 /* FIXME handle local signal fail. */
771 ir->wait_to_restore_start_time = 0;
772 ir->waiting_to_restore = 0;
777 clib_warning ("%U %U",
778 format_vnet_sw_if_index_name, vnm, sw_if_index,
779 format_srp_ips_request_type, request);
783 static void maybe_send_ips_message (srp_interface_t * si)
785 srp_main_t * sm = &srp_main;
786 srp_ips_header_t to_tx[2];
787 srp_ring_type_t rx_ring = SRP_RING_OUTER;
788 srp_interface_ring_t * r0 = &si->rings[rx_ring ^ 0];
789 srp_interface_ring_t * r1 = &si->rings[rx_ring ^ 1];
790 f64 now = vlib_time_now (sm->vlib_main);
792 if (! si->ips_process_enable)
795 if (si->current_ips_state == SRP_IPS_STATE_wrapped
796 && r0->waiting_to_restore
797 && r1->waiting_to_restore
798 && now >= r0->wait_to_restore_start_time + si->config.wait_to_restore_idle_delay
799 && now >= r1->wait_to_restore_start_time + si->config.wait_to_restore_idle_delay)
801 si->current_ips_state = SRP_IPS_STATE_idle;
802 r0->waiting_to_restore = r1->waiting_to_restore = 0;
803 r0->wait_to_restore_start_time = r1->wait_to_restore_start_time = 0;
806 if (si->current_ips_state != SRP_IPS_STATE_idle)
809 init_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
810 init_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
812 if (si->current_ips_state == SRP_IPS_STATE_idle)
814 to_tx[0].request_type = to_tx[1].request_type = SRP_IPS_REQUEST_idle;
815 to_tx[0].status = to_tx[1].status = SRP_IPS_STATUS_idle;
816 to_tx[0].is_long_path = to_tx[1].is_long_path = 0;
819 else if (si->current_ips_state == SRP_IPS_STATE_wrapped)
821 to_tx[0].request_type =
822 (si->rings[rx_ring ^ 0].waiting_to_restore
823 ? SRP_IPS_REQUEST_wait_to_restore
824 : SRP_IPS_REQUEST_signal_fail);
825 to_tx[1].request_type =
826 (si->rings[rx_ring ^ 1].waiting_to_restore
827 ? SRP_IPS_REQUEST_wait_to_restore
828 : SRP_IPS_REQUEST_signal_fail);
829 to_tx[0].status = to_tx[1].status = SRP_IPS_STATUS_wrapped;
830 to_tx[0].is_long_path = 0;
831 to_tx[1].is_long_path = 1;
834 tx_ips_packet (si, rx_ring ^ 0, &to_tx[0]);
835 tx_ips_packet (si, rx_ring ^ 1, &to_tx[1]);
839 srp_ips_process (vlib_main_t * vm,
840 vlib_node_runtime_t * rt,
843 srp_main_t * sm = &srp_main;
844 srp_interface_t * si;
848 pool_foreach (si, sm->interface_pool, ({
849 maybe_send_ips_message (si);
851 vlib_process_suspend (vm, 1.0);
857 vlib_node_registration_t srp_ips_process_node = {
858 .function = srp_ips_process,
859 .type = VLIB_NODE_TYPE_PROCESS,
860 .name = "srp-ips-process",
861 .state = VLIB_NODE_STATE_DISABLED,
864 static clib_error_t * srp_init (vlib_main_t * vm)
866 srp_main_t * sm = &srp_main;
868 sm->default_data_ttl = 255;
870 vlib_register_node (vm, &srp_ips_process_node);
871 vlib_register_node (vm, &srp_input_node);
872 vlib_register_node (vm, &srp_control_input_node);
873 srp_setup_node (vm, srp_input_node.index);
878 VLIB_INIT_FUNCTION (srp_init);