2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/error.h>
18 u8 RT (rule_is_exact_match) (RTT (mma_rule) * key, RTT (mma_rule) * r)
22 for (i = 0; i < ARRAY_LEN (key->match.as_u64); i++)
24 if (key->match.as_u64[i] != r->match.as_u64[i])
27 for (i = 0; i < ARRAY_LEN (key->mask.as_u64); i++)
29 if (key->mask.as_u64[i] != r->mask.as_u64[i])
36 RT (rule_is_match_for_key) (RTT (mma_mask_or_match) * key, RTT (mma_rule) * r)
38 RTT (mma_mask_or_match) _tmp_key, *tkp = &_tmp_key;
42 for (i = 0; i < ARRAY_LEN (tkp->as_u64); i++)
43 tkp->as_u64[i] &= r->mask.as_u64[i];
44 for (i = 0; i < ARRAY_LEN (tkp->as_u64); i++)
46 if (tkp->as_u64[i] != r->match.as_u64[i])
52 RTT (mma_rule) * RT (mma_rules_table_rule_alloc) (RTT (mma_rules_table) * srt)
54 RTT (mma_rule) * rule;
55 pool_get (srt->rules, rule);
56 clib_memset (rule, 0, sizeof (*rule));
61 RT (mma_rule_free) (RTT (mma_rules_table) * srt, RTT (mma_rule) * rule)
63 pool_put (srt->rules, rule);
64 clib_memset (rule, 0xfa, sizeof (*rule));
69 RT (mma_rules_table_get_rule) (RTT (mma_rules_table) * srt, u32 srt_index)
71 if (!pool_is_free_index (srt->rules, srt_index))
72 return (srt->rules + srt_index);
77 RT (mma_rules_table_rule_index) (RTT (mma_rules_table) * srt,
81 return (sr - srt->rules);
87 * This should be optimized .. eventually
90 RT (mma_rules_table_lookup) (RTT (mma_rules_table) * srt,
91 RTT (mma_mask_or_match) * key, u32 rule_index)
97 ASSERT (rule_index != MMA_TABLE_INVALID_INDEX);
98 rp = RT (mma_rules_table_get_rule) (srt, rule_index);
101 if (!RT (rule_is_match_for_key) (key, rp))
102 return MMA_TABLE_INVALID_INDEX;
103 for (i = 0; i < vec_len (rp->next_indices); i++)
105 rv = RT (mma_rules_table_lookup) (srt, key, rp->next_indices[i]);
106 if (rv != MMA_TABLE_INVALID_INDEX)
109 return (rp->action_index);
113 RT (mma_rules_table_lookup_rule) (RTT (mma_rules_table) * srt,
114 RTT (mma_mask_or_match) * key,
121 ASSERT (rule_index != MMA_TABLE_INVALID_INDEX);
122 rp = RT (mma_rules_table_get_rule) (srt, rule_index);
125 if (!RT (rule_is_match_for_key) (key, rp))
126 return MMA_TABLE_INVALID_INDEX;
127 for (i = 0; i < vec_len (rp->next_indices); i++)
129 rv = RT (mma_rules_table_lookup_rule) (srt, key, rp->next_indices[i]);
130 if (rv != MMA_TABLE_INVALID_INDEX)
137 RTT (mma_rules_table) *
140 int RT (mma_sort_indices) (void *e1, void *e2)
142 u32 *ri1 = e1, *ri2 = e2;
143 RTT (mma_rule) * rule1, *rule2;
144 rule1 = RT (mma_rules_table_get_rule) (RTT (sort_srt), *ri1);
145 rule2 = RT (mma_rules_table_get_rule) (RTT (sort_srt), *ri2);
146 return RTT (sort_srt)->rule_cmp_fn (rule1, rule2);
149 void RT (mma_sort) (RTT (mma_rules_table) * srt, u32 * next_indices)
151 RTT (sort_srt) = srt;
152 vec_sort_with_function (next_indices, RT (mma_sort_indices));
156 RT (mma_rules_table_add_rule) (RTT (mma_rules_table) * srt,
157 RTT (mma_rule) * rule)
159 u32 parent_index, i, *next_indices = 0, added = 0, rule_index;
160 RTT (mma_rule) * parent, *child;
162 rule_index = RT (mma_rules_table_rule_index) (srt, rule);
163 parent_index = RT (mma_rules_table_lookup_rule) (srt, &rule->match,
165 parent = RT (mma_rules_table_get_rule) (srt, parent_index);
166 if (RT (rule_is_exact_match) (rule, parent))
168 parent->action_index = rule->action_index;
169 RT (mma_rule_free) (srt, rule);
173 if (vec_len (parent->next_indices) == 0)
175 vec_add1 (parent->next_indices, rule_index);
179 /* Check if new rule is parent of some of the existing children */
180 for (i = 0; i < vec_len (parent->next_indices); i++)
182 child = RT (mma_rules_table_get_rule) (srt, parent->next_indices[i]);
183 if (RT (rule_is_match_for_key) (&child->match, rule))
185 vec_add1 (rule->next_indices, parent->next_indices[i]);
188 vec_add1 (next_indices, rule_index);
194 if (!added && srt->rule_cmp_fn (rule, child) < 0)
196 vec_add1 (next_indices, rule_index);
199 vec_add1 (next_indices, parent->next_indices[i]);
203 vec_add1 (next_indices, rule_index);
204 vec_free (parent->next_indices);
205 parent->next_indices = next_indices;
210 RT (mma_rules_table_del_rule) (RTT (mma_rules_table) * srt,
211 RTT (mma_rule) * rule, u32 rule_index)
217 ASSERT (rule_index != MMA_TABLE_INVALID_INDEX);
218 rp = RT (mma_rules_table_get_rule) (srt, rule_index);
220 if (!RT (rule_is_match_for_key) (&rule->match, rp))
221 return MMA_TABLE_INVALID_INDEX;
222 if (RT (rule_is_exact_match) (rule, rp))
224 if (rule_index == srt->root_index)
225 rp->action_index = MMA_TABLE_INVALID_INDEX;
228 for (i = 0; i < vec_len (rp->next_indices); i++)
230 rv = RT (mma_rules_table_del_rule) (srt, rule, rp->next_indices[i]);
233 RTT (mma_rule) * child;
234 u32 *next_indices = 0, *new_elts, left_to_add;
235 child = RT (mma_rules_table_get_rule) (srt, rp->next_indices[i]);
236 ASSERT (RT (rule_is_exact_match) (rule, child));
240 vec_add2 (next_indices, new_elts, i);
241 clib_memcpy_fast (new_elts, rp->next_indices, i * sizeof (u32));
243 if (vec_len (child->next_indices))
244 vec_append (next_indices, child->next_indices);
245 left_to_add = vec_len (rp->next_indices) - i - 1;
248 vec_add2 (next_indices, new_elts, left_to_add);
249 clib_memcpy_fast (new_elts, &rp->next_indices[i + 1],
250 left_to_add * sizeof (u32));
252 RT (mma_rule_free) (srt, child);
253 vec_free (rp->next_indices);
254 rp->next_indices = next_indices;
260 return MMA_TABLE_INVALID_INDEX;
264 * fd.io coding-style-patch-verification: ON
267 * eval: (c-set-style "gnu")