Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
3 : : *
4 : : * Licensed under the Apache License, Version 2.0 (the "License");
5 : : * you may not use this file except in compliance with the License.
6 : : * You may obtain a copy of the License at:
7 : : *
8 : : * http://www.apache.org/licenses/LICENSE-2.0
9 : : *
10 : : * Unless required by applicable law or agreed to in writing, software
11 : : * distributed under the License is distributed on an "AS IS" BASIS,
12 : : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : : * See the License for the specific language governing permissions and
14 : : * limitations under the License.
15 : : */
16 : :
17 : : #ifndef CLASSIFIER_PRIVATE_H
18 : : #define CLASSIFIER_PRIVATE_H 1
19 : :
20 : : #include "ccmap.h"
21 : : #include "cmap.h"
22 : : #include "flow.h"
23 : : #include "hash.h"
24 : : #include "rculist.h"
25 : :
26 : : /* Classifier internal definitions, subject to change at any time. */
27 : :
28 : : /* A set of rules that all have the same fields wildcarded. */
29 : : struct cls_subtable {
30 : : struct cmap_node cmap_node; /* Within classifier's 'subtables_map'. */
31 : :
32 : : /* These fields are only used by writers. */
33 : : int max_priority; /* Max priority of any rule in subtable. */
34 : : unsigned int max_count; /* Count of max_priority rules. */
35 : :
36 : : /* Accessed by iterators. */
37 : : struct rculist rules_list; /* Unordered. */
38 : :
39 : : /* Identical, but lower priority rules are not inserted to any of the
40 : : * following data structures. */
41 : :
42 : : /* These fields are accessed by readers who care about wildcarding. */
43 : : const uint8_t n_indices; /* How many indices to use. */
44 : : const struct flowmap index_maps[CLS_MAX_INDICES + 1]; /* Stage maps. */
45 : : unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
46 : : * (runtime configurable). */
47 : : const int ports_mask_len;
48 : : struct ccmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
49 : : rcu_trie_ptr ports_trie; /* NULL if none. */
50 : :
51 : : /* These fields are accessed by all readers. */
52 : : struct cmap rules; /* Contains 'cls_match'es. */
53 : : const struct minimask mask; /* Wildcards for fields. */
54 : : /* 'mask' must be the last field. */
55 : : };
56 : :
57 : : /* Internal representation of a rule in a "struct cls_subtable".
58 : : *
59 : : * The 'next' member is an element in a singly linked, null-terminated list.
60 : : * This list links together identical "cls_match"es in order of decreasing
61 : : * priority. The classifier code maintains the invariant that at most one rule
62 : : * of a given priority is visible for any given lookup version.
63 : : */
64 : : struct cls_match {
65 : : /* Accessed by everybody. */
66 : : OVSRCU_TYPE(struct cls_match *) next; /* Equal, lower-priority matches. */
67 : : OVSRCU_TYPE(struct cls_conjunction_set *) conj_set;
68 : :
69 : : /* Accessed by readers interested in wildcarding. */
70 : : const int priority; /* Larger numbers are higher priorities. */
71 : :
72 : : /* Accessed by all readers. */
73 : : struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
74 : :
75 : : /* Rule versioning. */
76 : : struct versions versions;
77 : :
78 : : const struct cls_rule *cls_rule;
79 : : const struct miniflow flow; /* Matching rule. Mask is in the subtable. */
80 : : /* 'flow' must be the last field. */
81 : : };
82 : :
83 : : /* Utilities for accessing the 'cls_match' member of struct cls_rule. */
84 : : static inline struct cls_match *
85 : 972199 : get_cls_match_protected(const struct cls_rule *rule)
86 : : {
87 : 972199 : return ovsrcu_get_protected(struct cls_match *, &rule->cls_match);
88 : : }
89 : :
90 : : static inline struct cls_match *
91 : 1850516 : get_cls_match(const struct cls_rule *rule)
92 : : {
93 : 1850516 : return ovsrcu_get(struct cls_match *, &rule->cls_match);
94 : : }
95 : :
96 : : /* Must be RCU postponed. */
97 : : void cls_match_free_cb(struct cls_match *);
98 : :
99 : : static inline void
100 : 523023 : cls_match_set_remove_version(struct cls_match *rule, ovs_version_t version)
101 : : {
102 : 523023 : versions_set_remove_version(&rule->versions, version);
103 : 523023 : }
104 : :
105 : : static inline bool
106 : 13358727 : cls_match_visible_in_version(const struct cls_match *rule,
107 : : ovs_version_t version)
108 : : {
109 : 13358727 : return versions_visible_in_version(&rule->versions, version);
110 : : }
111 : :
112 : : static inline bool
113 : 468166 : cls_match_is_eventually_invisible(const struct cls_match *rule)
114 : : {
115 : 468166 : return versions_is_eventually_invisible(&rule->versions);
116 : : }
117 : :
118 : :
119 : : /* cls_match 'next' */
120 : :
121 : : static inline const struct cls_match *
122 : 38921 : cls_match_next(const struct cls_match *rule)
123 : : {
124 : 38921 : return ovsrcu_get(struct cls_match *, &rule->next);
125 : : }
126 : :
127 : : static inline struct cls_match *
128 : 1174237 : cls_match_next_protected(const struct cls_match *rule)
129 : : {
130 : 1174237 : return ovsrcu_get_protected(struct cls_match *, &rule->next);
131 : : }
132 : :
133 : : /* Puts 'rule' in the position between 'prev' and 'next'. If 'prev' == NULL,
134 : : * then the 'rule' is the new list head, and if 'next' == NULL, the rule is the
135 : : * new list tail.
136 : : * If there are any nodes between 'prev' and 'next', they are dropped from the
137 : : * list. */
138 : : static inline void
139 : 25106 : cls_match_insert(struct cls_match *prev, struct cls_match *next,
140 : : struct cls_match *rule)
141 : : {
142 : 25106 : ovsrcu_set_hidden(&rule->next, next);
143 : :
144 [ + + ]: 25106 : if (prev) {
145 : 1787 : ovsrcu_set(&prev->next, rule);
146 : : }
147 : 25106 : }
148 : :
149 : : /* Puts 'new_rule' in the position of 'old_rule', which is the next node after
150 : : * 'prev'. If 'prev' == NULL, then the 'new_rule' is the new list head.
151 : : *
152 : : * The replaced cls_match still links to the later rules, and may still be
153 : : * referenced by other threads until all other threads quiesce. The replaced
154 : : * rule may not be re-inserted, re-initialized, or deleted until after all
155 : : * other threads have quiesced (use ovsrcu_postpone). */
156 : : static inline void
157 : 23075 : cls_match_replace(struct cls_match *prev,
158 : : struct cls_match *old_rule, struct cls_match *new_rule)
159 : : {
160 : 23075 : cls_match_insert(prev, cls_match_next_protected(old_rule), new_rule);
161 : 23075 : }
162 : :
163 : : /* Removes 'rule' following 'prev' from the list. If 'prev' is NULL, then the
164 : : * 'rule' is a list head, and the caller is responsible for maintaining its
165 : : * list head pointer (if any).
166 : : *
167 : : * Afterward, the removed rule is not linked to any more, but still links to
168 : : * the following rules, and may still be referenced by other threads until all
169 : : * other threads quiesce. The removed rule may not be re-inserted,
170 : : * re-initialized, or deleted until after all other threads have quiesced (use
171 : : * ovsrcu_postpone).
172 : : */
173 : : static inline void
174 : 300 : cls_match_remove(struct cls_match *prev, struct cls_match *rule)
175 : : {
176 [ + - ]: 300 : if (prev) {
177 : 300 : ovsrcu_set(&prev->next, cls_match_next_protected(rule));
178 : : }
179 : 300 : }
180 : :
181 : : #define CLS_MATCH_FOR_EACH(ITER, HEAD) \
182 : : for ((ITER) = (HEAD); (ITER); (ITER) = cls_match_next(ITER))
183 : :
184 : : #define CLS_MATCH_FOR_EACH_AFTER_HEAD(ITER, HEAD) \
185 : : CLS_MATCH_FOR_EACH(ITER, cls_match_next(HEAD))
186 : :
187 : : /* Iterate cls_matches keeping the previous pointer for modifications. */
188 : : #define FOR_EACH_RULE_IN_LIST_PROTECTED(ITER, PREV, HEAD) \
189 : : for ((PREV) = NULL, (ITER) = (HEAD); \
190 : : (ITER); \
191 : : (PREV) = (ITER), (ITER) = cls_match_next_protected(ITER))
192 : :
193 : :
194 : : /* A longest-prefix match tree. */
195 : : struct trie_node {
196 : : uint32_t prefix; /* Prefix bits for this node, MSB first. */
197 : : uint8_t n_bits; /* Never zero, except for the root node. */
198 : : unsigned int n_rules; /* Number of rules that have this prefix. */
199 : : rcu_trie_ptr edges[2]; /* Both NULL if leaf. */
200 : : };
201 : :
202 : : /* Max bits per node. Must fit in struct trie_node's 'prefix'.
203 : : * Also tested with 16, 8, and 5 to stress the implementation. */
204 : : #define TRIE_PREFIX_BITS 32
205 : :
206 : : /* flow/miniflow/minimask/minimatch utilities.
207 : : * These are only used by the classifier, so place them here to allow
208 : : * for better optimization. */
209 : :
210 : : /* Returns a hash value for the bits of 'flow' where there are 1-bits in
211 : : * 'mask', given 'basis'.
212 : : *
213 : : * The hash values returned by this function are the same as those returned by
214 : : * miniflow_hash_in_minimask(), only the form of the arguments differ. */
215 : : static inline uint32_t
216 : 58733642 : flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
217 : : uint32_t basis)
218 : : {
219 : 58733642 : const uint64_t *mask_values = miniflow_get_values(&mask->masks);
220 : 58733642 : const uint64_t *flow_u64 = (const uint64_t *)flow;
221 : 58733642 : const uint64_t *p = mask_values;
222 : 58733642 : uint32_t hash = basis;
223 : : map_t map;
224 : :
225 [ + + ]: 176200926 : FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
226 : : size_t idx;
227 : :
228 [ + + ]: 359826713 : MAP_FOR_EACH_INDEX (idx, map) {
229 : 242359429 : hash = hash_add64(hash, flow_u64[idx] & *p++);
230 : : }
231 : 117467284 : flow_u64 += MAP_T_BITS;
232 : : }
233 : :
234 : 58733642 : return hash_finish(hash, (p - mask_values) * 8);
235 : : }
236 : :
237 : : /* Returns a hash value for the bits of 'flow' where there are 1-bits in
238 : : * 'mask', given 'basis'.
239 : : *
240 : : * The hash values returned by this function are the same as those returned by
241 : : * flow_hash_in_minimask(), only the form of the arguments differ. */
242 : : static inline uint32_t
243 : 733203 : miniflow_hash_in_minimask(const struct miniflow *flow,
244 : : const struct minimask *mask, uint32_t basis)
245 : : {
246 : 733203 : const uint64_t *mask_values = miniflow_get_values(&mask->masks);
247 : 733203 : const uint64_t *p = mask_values;
248 : 733203 : uint32_t hash = basis;
249 : : uint64_t value;
250 : :
251 [ + + ]: 6315699 : MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, mask->masks.map) {
252 : 5582496 : hash = hash_add64(hash, value & *p++);
253 : : }
254 : :
255 : 733203 : return hash_finish(hash, (p - mask_values) * 8);
256 : : }
257 : :
258 : : /* Returns a hash value for the values of 'flow', indicated by 'range', where
259 : : * there are 1-bits in 'mask', given 'basis'. 'range' must be a continuous
260 : : * subset of the bits in 'mask''s map, representing a continuous range of the
261 : : * minimask's mask data. '*offset' must be the number of 64-bit units of the
262 : : * minimask's data to skip to get to the first unit covered by 'range'. On
263 : : * return '*offset' is updated with the number of 64-bit units of the minimask
264 : : * consumed.
265 : : *
266 : : * Typically this function is called for successive ranges of minimask's masks,
267 : : * and the first invocation passes '*offset' as zero.
268 : : *
269 : : * The hash values returned by this function are the same as those returned by
270 : : * minimatch_hash_range(), only the form of the arguments differ. */
271 : : static inline uint32_t
272 : 41945908 : flow_hash_in_minimask_range(const struct flow *flow,
273 : : const struct minimask *mask,
274 : : const struct flowmap range,
275 : : unsigned int *offset,
276 : : uint32_t *basis)
277 : : {
278 : 41945908 : const uint64_t *mask_values = miniflow_get_values(&mask->masks);
279 : 41945903 : const uint64_t *flow_u64 = (const uint64_t *)flow;
280 : 41945903 : const uint64_t *p = mask_values + *offset;
281 : 41945903 : uint32_t hash = *basis;
282 : : map_t map;
283 : :
284 [ + + ]: 125837713 : FLOWMAP_FOR_EACH_MAP (map, range) {
285 : : size_t idx;
286 : :
287 [ + + ]: 190061101 : MAP_FOR_EACH_INDEX (idx, map) {
288 : 106169295 : hash = hash_add64(hash, flow_u64[idx] & *p++);
289 : : }
290 : 83891810 : flow_u64 += MAP_T_BITS;
291 : : }
292 : :
293 : 41945905 : *basis = hash; /* Allow continuation from the unfinished value. */
294 : 41945905 : *offset = p - mask_values;
295 : 41945905 : return hash_finish(hash, *offset * 8);
296 : : }
297 : :
298 : : /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
299 : : static inline void
300 : 1895841 : flow_wildcards_fold_minimask(struct flow_wildcards *wc,
301 : : const struct minimask *mask)
302 : : {
303 : 1895841 : flow_union_with_miniflow(&wc->masks, &mask->masks);
304 : 1895840 : }
305 : :
306 : : /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask for bits in
307 : : * 'fmap'. 1-bits in 'fmap' are a subset of 1-bits in 'mask''s map. */
308 : : static inline void
309 : 26168105 : flow_wildcards_fold_minimask_in_map(struct flow_wildcards *wc,
310 : : const struct minimask *mask,
311 : : const struct flowmap fmap)
312 : : {
313 : 26168105 : flow_union_with_miniflow_subset(&wc->masks, &mask->masks, fmap);
314 : 26168105 : }
315 : :
316 : : /* Returns a hash value for 'mask', given 'basis'. */
317 : : static inline uint32_t
318 : 2489709 : minimask_hash(const struct minimask *mask, uint32_t basis)
319 : : {
320 : 2489709 : const uint64_t *p = miniflow_get_values(&mask->masks);
321 : 2489709 : size_t n_values = miniflow_n_values(&mask->masks);
322 : 2489709 : uint32_t hash = basis;
323 : :
324 [ + + ]: 10707422 : for (size_t i = 0; i < n_values; i++) {
325 : 8217713 : hash = hash_add64(hash, *p++);
326 : : }
327 : :
328 : : map_t map;
329 [ + + ]: 7469127 : FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
330 : 4979418 : hash = hash_add64(hash, map);
331 : : }
332 : :
333 : 2489709 : return hash_finish(hash, n_values);
334 : : }
335 : :
336 : : /* Returns a hash value for the values of 'match->flow', indicated by 'range',
337 : : * where there are 1-bits in 'match->mask', given 'basis'. 'range' must be a
338 : : * continuous subset of the bits in the map of 'match', representing a
339 : : * continuous range of the mask data of 'match'. '*offset' must be the number
340 : : * of 64-bit units of the match data to skip to get to the first unit covered
341 : : * by 'range'. On return '*offset' is updated with the number of 64-bit units
342 : : * of the match consumed.
343 : : *
344 : : * Typically this function is called for successive ranges of minimask's masks,
345 : : * and the first invocation passes '*offset' as zero.
346 : : *
347 : : * The hash values returned by this function are the same as those returned by
348 : : * flow_hash_in_minimask_range(), only the form of the arguments differ. */
349 : : static inline uint32_t
350 : 1158237 : minimatch_hash_range(const struct minimatch *match,
351 : : const struct flowmap range, unsigned int *offset,
352 : : uint32_t *basis)
353 : : {
354 : 1158237 : const uint64_t *p = miniflow_get_values(match->flow) + *offset;
355 : 1158237 : const uint64_t *q = miniflow_get_values(&match->mask->masks) + *offset;
356 : 1158237 : unsigned int n = flowmap_n_1bits(range);
357 : 1158237 : uint32_t hash = *basis;
358 : :
359 [ + + ]: 2670743 : for (unsigned int i = 0; i < n; i++) {
360 : 1512506 : hash = hash_add64(hash, p[i] & q[i]);
361 : : }
362 : 1158237 : *basis = hash; /* Allow continuation from the unfinished value. */
363 : 1158237 : *offset += n;
364 : 1158237 : return hash_finish(hash, *offset * 8);
365 : : }
366 : :
367 : : #endif
|