From bf22d3006f7a7d528ac7b339a5855e1b0e99fb53 Mon Sep 17 00:00:00 2001 From: yaakov-stein Date: Tue, 10 Feb 2026 14:10:20 -0800 Subject: [PATCH 1/4] lib: helper: add FNV-1a hash function This will be used by the counter restoration logic to content-hash rules and sets for matching across chain updates. --- src/libbpfilter/helper.c | 14 +++++++++++++ src/libbpfilter/include/bpfilter/helper.h | 19 +++++++++++++++++ tests/unit/libbpfilter/helper.c | 25 +++++++++++++++++++++++ 3 files changed, 58 insertions(+) diff --git a/src/libbpfilter/helper.c b/src/libbpfilter/helper.c index bbbfc710..bb6af2b5 100644 --- a/src/libbpfilter/helper.c +++ b/src/libbpfilter/helper.c @@ -157,3 +157,17 @@ char *bf_trim(char *str) return bf_rtrim(bf_ltrim(str)); } + +uint64_t bf_fnv1a(const void *data, size_t len, uint64_t hash) +{ + assert(data); + + const uint8_t *bytes = data; + + for (size_t i = 0; i < len; ++i) { + hash ^= bytes[i]; + hash *= BF_FNV1A_PRIME; + } + + return hash; +} diff --git a/src/libbpfilter/include/bpfilter/helper.h b/src/libbpfilter/include/bpfilter/helper.h index 4a4977ce..b5b3b091 100644 --- a/src/libbpfilter/include/bpfilter/helper.h +++ b/src/libbpfilter/include/bpfilter/helper.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -423,3 +424,21 @@ int bf_read_file(const char *path, void **buf, size_t *len); * @return 0 on success, negative errno value on error. */ int bf_write_file(const char *path, const void *buf, size_t len); + +// FNV-1a constants from Fowler-Noll-Vo hash specification: +// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function +#define BF_FNV1A_INIT 0xcbf29ce484222325ULL +#define BF_FNV1A_PRIME 0x100000001b3ULL + +/** + * @brief Compute a FNV-1a 64-bit hash. + * + * Pass @ref BF_FNV1A_INIT as @p hash for the initial call. To hash + * multiple fields, chain calls by passing the previous return value. + * + * @param data Data to hash. Can't be NULL. + * @param len Number of bytes to hash. + * @param hash Initial or chained hash value. + * @return Updated hash value. + */ +uint64_t bf_fnv1a(const void *data, size_t len, uint64_t hash); diff --git a/tests/unit/libbpfilter/helper.c b/tests/unit/libbpfilter/helper.c index 8897e926..7cefcddc 100644 --- a/tests/unit/libbpfilter/helper.c +++ b/tests/unit/libbpfilter/helper.c @@ -356,12 +356,37 @@ static void overwrite_existing_file(void **state) assert_memory_equal(read_buf, second_data, strlen(second_data)); } +static void fnv1a_hash(void **state) +{ + uint32_t val_a = 42; + uint32_t val_b = 99; + uint64_t hash_a; + uint64_t hash_b; + uint64_t hash_ab; + uint64_t hash_ba; + + (void)state; + + hash_a = bf_fnv1a(&val_a, sizeof(val_a), BF_FNV1A_INIT); + hash_b = bf_fnv1a(&val_b, sizeof(val_b), BF_FNV1A_INIT); + + assert_int_equal(hash_a, + bf_fnv1a(&val_a, sizeof(val_a), BF_FNV1A_INIT)); + assert_int_not_equal(hash_a, hash_b); + + // Chaining: order matters for sequential hashing + hash_ab = bf_fnv1a(&val_b, sizeof(val_b), hash_a); + hash_ba = bf_fnv1a(&val_a, sizeof(val_a), hash_b); + assert_int_not_equal(hash_ab, hash_ba); +} + int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(close_fd), cmocka_unit_test(string_copy), cmocka_unit_test(realloc_mem), + cmocka_unit_test(fnv1a_hash), cmocka_unit_test(trim_left), cmocka_unit_test(trim_right), cmocka_unit_test(trim_both), From c8f1157fb4f4c2e3d4f90993377c9687fc82e2da Mon Sep 17 00:00:00 2001 From: yaakov-stein Date: Tue, 10 Feb 2026 14:41:12 -0800 Subject: [PATCH 2/4] lib: set: add order-independent set equality Add bf_set_equal() to compare two sets by key structure and element content, independent of insertion order. Elements are sorted internally for efficient comparison. This will be used to verify set identity when restoring counters across chain updates. --- src/libbpfilter/include/bpfilter/set.h | 15 +++++ src/libbpfilter/set.c | 77 ++++++++++++++++++++++++++ tests/unit/libbpfilter/set.c | 67 ++++++++++++++++++++++ 3 files changed, 159 insertions(+) diff --git a/src/libbpfilter/include/bpfilter/set.h b/src/libbpfilter/include/bpfilter/set.h index dbc57daa..6894e359 100644 --- a/src/libbpfilter/include/bpfilter/set.h +++ b/src/libbpfilter/include/bpfilter/set.h @@ -167,3 +167,18 @@ int bf_set_add_many(struct bf_set *dest, struct bf_set **to_add); * - `-EINVAL`: set key format doesn't match between dest and to_remove. */ int bf_set_remove_many(struct bf_set *dest, struct bf_set **to_remove); + +/** + * @brief Check if two sets have identical content. + * + * Compares key structure and elements (order-independent). Uses internal + * static state for sorting, so this function is not reentrant. + * + * @todo Use a hash set instead of sorting to make this function reentrant. + * + * @param lhs First set. Can't be NULL. + * @param rhs Second set. Can't be NULL. + * @return 1 if the sets are equal, 0 if not, or a negative errno value + * on failure. + */ +int bf_set_equal(const struct bf_set *lhs, const struct bf_set *rhs); diff --git a/src/libbpfilter/set.c b/src/libbpfilter/set.c index 57c50e64..3e12297e 100644 --- a/src/libbpfilter/set.c +++ b/src/libbpfilter/set.c @@ -524,3 +524,80 @@ int bf_set_remove_many(struct bf_set *dest, struct bf_set **to_remove) return 0; } + +// qsort comparator for set elements. Uses a static size since qsort +// doesn't support context and the daemon is single-threaded. +static size_t _bf_set_elem_cmp_size; + +static int _bf_set_elem_cmp(const void *lhs, const void *rhs) +{ + return memcmp(lhs, rhs, _bf_set_elem_cmp_size); +} + +static int _bf_set_get_sorted_elems(const struct bf_set *set, void **out) +{ + size_t n_elems; + void *arr; + size_t offset = 0; + + assert(set); + assert(out); + + n_elems = bf_list_size(&set->elems); + + if (n_elems == 0) { + *out = NULL; + return 0; + } + + arr = malloc(n_elems * set->elem_size); + if (!arr) + return -ENOMEM; + + bf_list_foreach (&set->elems, elem_node) { + memcpy((uint8_t *)arr + offset, bf_list_node_get_data(elem_node), + set->elem_size); + offset += set->elem_size; + } + + _bf_set_elem_cmp_size = set->elem_size; + qsort(arr, n_elems, set->elem_size, _bf_set_elem_cmp); + + *out = arr; + + return 0; +} + +int bf_set_equal(const struct bf_set *lhs, const struct bf_set *rhs) +{ + _cleanup_free_ void *lhs_sorted = NULL; + _cleanup_free_ void *rhs_sorted = NULL; + size_t n_elems; + int r; + + assert(lhs); + assert(rhs); + + if (lhs->n_comps != rhs->n_comps) + return 0; + if (memcmp(lhs->key, rhs->key, + lhs->n_comps * sizeof(enum bf_matcher_type)) != 0) + return 0; + if (lhs->elem_size != rhs->elem_size) + return 0; + + n_elems = bf_list_size(&lhs->elems); + if (bf_list_size(&rhs->elems) != n_elems) + return 0; + if (n_elems == 0) + return 1; + + r = _bf_set_get_sorted_elems(lhs, &lhs_sorted); + if (r) + return r; + r = _bf_set_get_sorted_elems(rhs, &rhs_sorted); + if (r) + return r; + + return memcmp(lhs_sorted, rhs_sorted, n_elems * lhs->elem_size) == 0; +} diff --git a/tests/unit/libbpfilter/set.c b/tests/unit/libbpfilter/set.c index 496a6527..227c8631 100644 --- a/tests/unit/libbpfilter/set.c +++ b/tests/unit/libbpfilter/set.c @@ -396,6 +396,71 @@ static void remove_many_mismatched_key_type(void **state) assert_non_null(to_remove); } +static void equal_with_reordered_elements(void **state) +{ + _free_bf_set_ struct bf_set *lhs = NULL; + _free_bf_set_ struct bf_set *rhs = NULL; + enum bf_matcher_type key[] = {BF_MATCHER_IP4_DADDR, BF_MATCHER_TCP_SPORT}; + uint8_t elem_a[8] = {10, 0, 0, 1, 0, 80, 0, 0}; + uint8_t elem_b[8] = {10, 0, 0, 2, 0, 90, 0, 0}; + uint8_t elem_c[8] = {10, 0, 0, 3, 1, 0, 0, 0}; + + (void)state; + + assert_ok(bf_set_new(&lhs, "set_a", key, ARRAY_SIZE(key))); + assert_ok(bf_set_add_elem(lhs, elem_a)); + assert_ok(bf_set_add_elem(lhs, elem_b)); + assert_ok(bf_set_add_elem(lhs, elem_c)); + + assert_ok(bf_set_new(&rhs, "set_b", key, ARRAY_SIZE(key))); + assert_ok(bf_set_add_elem(rhs, elem_c)); + assert_ok(bf_set_add_elem(rhs, elem_a)); + assert_ok(bf_set_add_elem(rhs, elem_b)); + + assert_int_equal(bf_set_equal(lhs, rhs), 1); + assert_int_equal(bf_set_equal(lhs, lhs), 1); +} + +static void not_equal(void **state) +{ + _free_bf_set_ struct bf_set *set_a = NULL; + _free_bf_set_ struct bf_set *set_b = NULL; + _free_bf_set_ struct bf_set *set_diff_key = NULL; + _free_bf_set_ struct bf_set *set_empty = NULL; + _free_bf_set_ struct bf_set *set_empty2 = NULL; + enum bf_matcher_type key[] = {BF_MATCHER_IP4_DADDR, BF_MATCHER_TCP_SPORT}; + enum bf_matcher_type key2[] = {BF_MATCHER_IP4_SADDR, BF_MATCHER_TCP_SPORT}; + uint8_t elem_a[8] = {10, 0, 0, 1, 0, 80, 0, 0}; + uint8_t elem_b[8] = {10, 0, 0, 2, 0, 90, 0, 0}; + uint8_t elem_c[8] = {10, 0, 0, 3, 1, 0, 0, 0}; + + (void)state; + + assert_ok(bf_set_new(&set_a, NULL, key, ARRAY_SIZE(key))); + assert_ok(bf_set_add_elem(set_a, elem_a)); + assert_ok(bf_set_add_elem(set_a, elem_b)); + + assert_ok(bf_set_new(&set_b, NULL, key, ARRAY_SIZE(key))); + assert_ok(bf_set_add_elem(set_b, elem_a)); + assert_ok(bf_set_add_elem(set_b, elem_c)); + + assert_int_equal(bf_set_equal(set_a, set_b), 0); + + assert_ok(bf_set_add_elem(set_b, elem_b)); + assert_int_equal(bf_set_equal(set_a, set_b), 0); + + assert_ok(bf_set_new(&set_diff_key, NULL, key2, ARRAY_SIZE(key2))); + assert_ok(bf_set_add_elem(set_diff_key, elem_a)); + assert_ok(bf_set_add_elem(set_diff_key, elem_b)); + assert_int_equal(bf_set_equal(set_a, set_diff_key), 0); + + assert_ok(bf_set_new(&set_empty, NULL, key, ARRAY_SIZE(key))); + assert_int_equal(bf_set_equal(set_a, set_empty), 0); + + assert_ok(bf_set_new(&set_empty2, NULL, key, ARRAY_SIZE(key))); + assert_int_equal(bf_set_equal(set_empty, set_empty2), 1); +} + int main(void) { const struct CMUnitTest tests[] = { @@ -420,6 +485,8 @@ int main(void) cmocka_unit_test(remove_many_disjoint_sets), cmocka_unit_test(remove_many_mismatched_key_count), cmocka_unit_test(remove_many_mismatched_key_type), + cmocka_unit_test(equal_with_reordered_elements), + cmocka_unit_test(not_equal), }; return cmocka_run_group_tests(tests, NULL, NULL); From 3701672bc11bd1294bbe10ed6241abc29154cd6a Mon Sep 17 00:00:00 2001 From: yaakov-stein Date: Tue, 10 Feb 2026 14:55:23 -0800 Subject: [PATCH 3/4] daemon: cgen: restore counters on chain update When bf_cgen_update() replaces a chain, counter values were lost because the new BPF program starts with a zeroed counter map. Restore counters by content-matching old rules to new rules before freeing the old program. The algorithm first builds a set index mapping using FNV-1a hashing with sorted-array binary search, so that set matchers can be compared across chains where set positions may differ. It then hashes and matches rules using the same sort-and-search approach. Matched counters are copied from the old program's counter map to the new one. Policy and error counters are always preserved. --- src/bpfilter/cgen/cgen.c | 478 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 478 insertions(+) diff --git a/src/bpfilter/cgen/cgen.c b/src/bpfilter/cgen/cgen.c index 0087a1b0..79ddc608 100644 --- a/src/bpfilter/cgen/cgen.c +++ b/src/bpfilter/cgen/cgen.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -21,11 +22,15 @@ #include #include #include +#include #include #include +#include +#include #include "cgen/dump.h" #include "cgen/prog/link.h" +#include "cgen/prog/map.h" #include "cgen/program.h" #include "ctx.h" #include "opts.h" @@ -343,9 +348,473 @@ int bf_cgen_attach(struct bf_cgen *cgen, const struct bf_ns *ns, return r; } +/** + * @brief Compute a content hash for a set. + * + * Hashes the key definition and a commutative sum of element hashes to + * produce a position-independent identifier. Element order does not + * affect the result. + * + * @param set Set to hash. Can't be NULL. + * @return 64-bit hash. + */ +static uint64_t _bf_set_hash(const struct bf_set *set) +{ + uint64_t hash = BF_FNV1A_INIT; + uint64_t elem_sum = 0; + + assert(set); + + hash = bf_fnv1a(&set->n_comps, sizeof(set->n_comps), hash); + hash = + bf_fnv1a(set->key, set->n_comps * sizeof(enum bf_matcher_type), hash); + + bf_list_foreach (&set->elems, elem_node) { + elem_sum += bf_fnv1a(bf_list_node_get_data(elem_node), + set->elem_size, BF_FNV1A_INIT); + } + + hash = bf_fnv1a(&elem_sum, sizeof(elem_sum), hash); + + return hash; +} + +struct _bf_hash_entry +{ + uint64_t hash; + size_t index; + const void *data; + + // Prevents duplicate entries from mapping to the same old entry. + bool matched; +}; + +static int _bf_hash_entry_cmp(const void *lhs, const void *rhs) +{ + const struct _bf_hash_entry *entry_l = lhs; + const struct _bf_hash_entry *entry_r = rhs; + + // Avoids unsigned overflow from naive subtraction. + return (entry_l->hash > entry_r->hash) - (entry_l->hash < entry_r->hash); +} + +/** + * @brief Build a mapping from new set indices to old set indices. + * + * Sorts old sets by hash and binary-searches for each new set. + * Hash matches are verified with full sorted element comparison + * for deterministic correctness. + * + * @param old_chain Old chain. Can't be NULL. + * @param new_chain New chain. Can't be NULL. + * @param set_map On success, the caller will own a mapping array where + * set_map[new_idx] is the matching old index, or -1 if unmatched. + * NULL if the new chain has no sets. Can't be NULL. + * @return 0 on success, or a negative errno value on failure. + */ +static int _bf_cgen_build_set_mapping(const struct bf_chain *old_chain, + const struct bf_chain *new_chain, + int **set_map) +{ + _cleanup_free_ struct _bf_hash_entry *old_entries = NULL; + _cleanup_free_ int *_map = NULL; + size_t old_n; + size_t new_n; + size_t new_idx = 0; + size_t i = 0; + + assert(old_chain); + assert(new_chain); + assert(set_map); + + old_n = bf_list_size(&old_chain->sets); + new_n = bf_list_size(&new_chain->sets); + + if (new_n == 0) + return 0; + + _map = malloc(new_n * sizeof(*_map)); + if (!_map) + return -ENOMEM; + + for (i = 0; i < new_n; ++i) + _map[i] = -1; + + if (old_n == 0) { + *set_map = TAKE_PTR(_map); + return 0; + } + + old_entries = calloc(old_n, sizeof(*old_entries)); + if (!old_entries) + return -ENOMEM; + + i = 0; + bf_list_foreach (&old_chain->sets, set_node) { + const struct bf_set *set = bf_list_node_get_data(set_node); + old_entries[i].data = set; + old_entries[i].hash = _bf_set_hash(set); + old_entries[i].index = i; + old_entries[i].matched = false; + ++i; + } + + qsort(old_entries, old_n, sizeof(*old_entries), _bf_hash_entry_cmp); + + bf_list_foreach (&new_chain->sets, set_node) { + const struct bf_set *new_set = bf_list_node_get_data(set_node); + uint64_t new_hash = _bf_set_hash(new_set); + + size_t low = 0, high = old_n; + while (low < high) { + size_t mid = low + ((high - low) / 2); + if (old_entries[mid].hash < new_hash) + low = mid + 1; + else + high = mid; + } + + for (size_t k = low; k < old_n && old_entries[k].hash == new_hash; ++k) { + int equal; + + if (old_entries[k].matched) + continue; + + equal = bf_set_equal(old_entries[k].data, new_set); + if (equal < 0) + return equal; + if (!equal) + continue; + + _map[new_idx] = (int)old_entries[k].index; + old_entries[k].matched = true; + break; + } + + ++new_idx; + } + + *set_map = TAKE_PTR(_map); + + return 0; +} + +/** + * @brief Hash a rule's content for matching. + * + * Matcher hashes are summed (commutative) so matcher order does not + * affect the result. For set matchers, if a mapping is provided, the + * mapped index is hashed; otherwise the raw set index is hashed. + * + * @param rule Rule to hash. Can't be NULL. + * @param chain Chain the rule belongs to (for set count). Can't be NULL. + * @param set_map Set index mapping, or NULL to hash raw set indices. + * @return 64-bit content hash. + */ +static uint64_t _bf_rule_hash(const struct bf_rule *rule, + const struct bf_chain *chain, const int *set_map) +{ + uint64_t hash = BF_FNV1A_INIT; + uint64_t matcher_sum = 0; + + assert(rule); + assert(chain); + + hash = bf_fnv1a(&rule->log, sizeof(rule->log), hash); + hash = bf_fnv1a(&rule->mark, sizeof(rule->mark), hash); + hash = bf_fnv1a(&rule->counters, sizeof(rule->counters), hash); + hash = bf_fnv1a(&rule->verdict, sizeof(rule->verdict), hash); + hash = + bf_fnv1a(&rule->redirect_ifindex, sizeof(rule->redirect_ifindex), hash); + hash = bf_fnv1a(&rule->redirect_dir, sizeof(rule->redirect_dir), hash); + + bf_list_foreach (&rule->matchers, matcher_node) { + const struct bf_matcher *matcher = bf_list_node_get_data(matcher_node); + enum bf_matcher_type type = bf_matcher_get_type(matcher); + enum bf_matcher_op op = bf_matcher_get_op(matcher); + uint64_t matcher_hash = BF_FNV1A_INIT; + + matcher_hash = bf_fnv1a(&type, sizeof(type), matcher_hash); + matcher_hash = bf_fnv1a(&op, sizeof(op), matcher_hash); + + if (type == BF_MATCHER_SET) { + uint32_t set_idx = *(const uint32_t *)bf_matcher_payload(matcher); + uint32_t hash_idx = set_idx; + + if (set_map && set_idx < bf_list_size(&chain->sets) && + set_map[set_idx] >= 0) + hash_idx = (uint32_t)set_map[set_idx]; + + matcher_hash = bf_fnv1a(&hash_idx, sizeof(hash_idx), matcher_hash); + } else { + matcher_hash = + bf_fnv1a(bf_matcher_payload(matcher), + bf_matcher_payload_len(matcher), matcher_hash); + } + + matcher_sum += matcher_hash; + } + + hash = bf_fnv1a(&matcher_sum, sizeof(matcher_sum), hash); + + return hash; +} + +/** + * @brief Compare two matchers for equality, resolving set indices via + * the pre-computed set mapping. + * + * @param old_matcher Matcher from the old chain. Can't be NULL. + * @param new_matcher Matcher from the new chain. Can't be NULL. + * @param set_map Set index mapping (new to old), or NULL if no sets. + * @param new_n_sets Number of sets in the new chain. + * @return True if the matchers are equal. + */ +static bool _bf_matcher_equal(const struct bf_matcher *old_matcher, + const struct bf_matcher *new_matcher, + const int *set_map, size_t new_n_sets) +{ + assert(old_matcher); + assert(new_matcher); + + if (bf_matcher_get_type(old_matcher) != bf_matcher_get_type(new_matcher)) + return false; + if (bf_matcher_get_op(old_matcher) != bf_matcher_get_op(new_matcher)) + return false; + if (bf_matcher_payload_len(old_matcher) != + bf_matcher_payload_len(new_matcher)) + return false; + + if (bf_matcher_get_type(old_matcher) == BF_MATCHER_SET) { + uint32_t idx_old = *(const uint32_t *)bf_matcher_payload(old_matcher); + uint32_t idx_new = *(const uint32_t *)bf_matcher_payload(new_matcher); + + if (!set_map || idx_new >= new_n_sets) + return false; + if (set_map[idx_new] < 0) + return false; + + return (uint32_t)set_map[idx_new] == idx_old; + } + + return memcmp(bf_matcher_payload(old_matcher), + bf_matcher_payload(new_matcher), + bf_matcher_payload_len(old_matcher)) == 0; +} + +/** + * @brief Check if two rules are content-equal with order-independent + * matcher comparison and set-aware matching. + * + * @param old_rule Rule from the old chain. Can't be NULL. + * @param new_rule Rule from the new chain. Can't be NULL. + * @param set_map Set index mapping (new to old), or NULL if no sets. + * @param new_n_sets Number of sets in the new chain. + * @return True if the rules are content-equal. + */ +static bool _bf_rules_equal(const struct bf_rule *old_rule, + const struct bf_rule *new_rule, const int *set_map, + size_t new_n_sets) +{ + size_t n; + + assert(old_rule); + assert(new_rule); + + if (old_rule->log != new_rule->log || old_rule->mark != new_rule->mark || + old_rule->counters != new_rule->counters || + old_rule->verdict != new_rule->verdict || + old_rule->redirect_ifindex != new_rule->redirect_ifindex || + old_rule->redirect_dir != new_rule->redirect_dir) + return false; + + n = bf_list_size(&old_rule->matchers); + if (bf_list_size(&new_rule->matchers) != n) + return false; + + if (n == 0) + return true; + + bool used[n]; + memset(used, 0, sizeof(used)); + + bf_list_foreach (&old_rule->matchers, a_node) { + const struct bf_matcher *old_m = bf_list_node_get_data(a_node); + bool found = false; + size_t idx = 0; + + bf_list_foreach (&new_rule->matchers, b_node) { + const struct bf_matcher *new_m = bf_list_node_get_data(b_node); + + if (!used[idx] && + _bf_matcher_equal(old_m, new_m, set_map, new_n_sets)) { + used[idx] = true; + found = true; + break; + } + ++idx; + } + + if (!found) + return false; + } + + return true; +} + +/** + * @brief Build a mapping from new rule indices to old rule indices + * based on content hashing. + * + * Returns an array where counter_map[new_idx] = old_idx, or -1 if the + * new rule has no match. The last two entries map the policy and error + * counters. + * + * @param old_chain Old chain. Can't be NULL. + * @param new_chain New chain. Can't be NULL. + * @param counter_map On success, the caller will own a mapping array + * of size new_rule_count + 2. Can't be NULL. + * @return 0 on success, or a negative errno value on failure. + */ +static int _bf_cgen_build_counter_mappings(const struct bf_chain *old_chain, + const struct bf_chain *new_chain, + int **counter_map) +{ + _cleanup_free_ int *set_map = NULL; + _cleanup_free_ struct _bf_hash_entry *old_entries = NULL; + _cleanup_free_ int *_map = NULL; + size_t old_n; + size_t new_n; + size_t new_n_sets; + size_t map_size; + size_t i = 0; + int r; + + assert(old_chain); + assert(new_chain); + assert(counter_map); + + old_n = bf_list_size(&old_chain->rules); + new_n = bf_list_size(&new_chain->rules); + new_n_sets = bf_list_size(&new_chain->sets); + map_size = new_n + 2; + + _map = malloc(map_size * sizeof(*_map)); + if (!_map) + return -ENOMEM; + + for (i = 0; i < map_size; ++i) + _map[i] = -1; + + // Policy and error counters always map: they sit after the rule + // counters at indices rule_count and rule_count+1. + _map[new_n] = (int)old_n; + _map[new_n + 1] = (int)(old_n + 1); + + if (old_n == 0 || new_n == 0) { + *counter_map = TAKE_PTR(_map); + return 0; + } + + r = _bf_cgen_build_set_mapping(old_chain, new_chain, &set_map); + if (r) + return r; + + old_entries = calloc(old_n, sizeof(*old_entries)); + if (!old_entries) + return -ENOMEM; + + i = 0; + bf_list_foreach (&old_chain->rules, rule_node) { + const struct bf_rule *rule = bf_list_node_get_data(rule_node); + old_entries[i].hash = _bf_rule_hash(rule, old_chain, NULL); + old_entries[i].index = rule->index; + old_entries[i].data = rule; + old_entries[i].matched = false; + ++i; + } + + qsort(old_entries, old_n, sizeof(*old_entries), _bf_hash_entry_cmp); + + bf_list_foreach (&new_chain->rules, rule_node) { + const struct bf_rule *new_rule = bf_list_node_get_data(rule_node); + uint64_t new_hash = _bf_rule_hash(new_rule, new_chain, set_map); + + size_t low = 0, high = old_n; + while (low < high) { + size_t mid = low + ((high - low) / 2); + if (old_entries[mid].hash < new_hash) + low = mid + 1; + else + high = mid; + } + + for (size_t k = low; k < old_n && old_entries[k].hash == new_hash; + ++k) { + // Skip already-matched entries to handle duplicate rules + if (old_entries[k].matched) + continue; + + if (!_bf_rules_equal(old_entries[k].data, new_rule, set_map, + new_n_sets)) + continue; + + old_entries[k].matched = true; + _map[new_rule->index] = (int)old_entries[k].index; + break; + } + } + + *counter_map = TAKE_PTR(_map); + + return 0; +} + +/** + * @brief Transfer counters from old program to new program. + * + * @param old_prog Old program to read counters from. Can't be NULL. + * @param new_prog New program to write counters into. Can't be NULL. + * @param counter_map Mapping array where counter_map[new_idx] = old_idx, + * or -1 if unmatched. Can't be NULL. + * @param map_size Number of entries in counter_map. + * @return 0 on success, or a negative errno value on failure. + */ +static int _bf_cgen_transfer_counters(const struct bf_program *old_prog, + struct bf_program *new_prog, + const int *counter_map, size_t map_size) +{ + assert(old_prog); + assert(new_prog); + assert(counter_map); + + for (size_t i = 0; i < map_size; ++i) { + struct bf_counter counter = {}; + uint32_t new_idx = i; + int r; + + if (counter_map[i] < 0) + continue; + + r = bf_program_get_counter(old_prog, counter_map[i], &counter); + if (r) + return bf_err_r(r, "failed to read old counter %d", counter_map[i]); + + if (counter.packets == 0 && counter.bytes == 0) + continue; + + r = bf_bpf_map_update_elem(new_prog->cmap->fd, &new_idx, &counter, 0); + if (r) + return bf_err_r(r, "failed to write counter %u", new_idx); + } + + return 0; +} + int bf_cgen_update(struct bf_cgen *cgen, struct bf_chain **new_chain) { _free_bf_program_ struct bf_program *new_prog = NULL; + _cleanup_free_ int *counter_map = NULL; _cleanup_close_ int pindir_fd = -1; struct bf_program *old_prog; int r; @@ -375,6 +844,15 @@ int bf_cgen_update(struct bf_cgen *cgen, struct bf_chain **new_chain) if (r) return bf_err_r(r, "failed to load new program"); + r = _bf_cgen_build_counter_mappings(cgen->chain, *new_chain, &counter_map); + if (r) + return bf_err_r(r, "failed to build counter mappings for update"); + + r = _bf_cgen_transfer_counters(old_prog, new_prog, counter_map, + bf_list_size(&(*new_chain)->rules) + 2); + if (r) + return bf_err_r(r, "failed to transfer counters for update"); + if (bf_opts_persist()) bf_program_unpin(old_prog, pindir_fd); From 63375bfdee89736f00bf55190235a13a1435f2e3 Mon Sep 17 00:00:00 2001 From: yaakov-stein Date: Tue, 10 Feb 2026 16:01:07 -0800 Subject: [PATCH 4/4] tests: e2e: add counter restoration tests Test that counters survive chain updates with rule reordering and set index shuffling. Verifies both rule-level and policy counter preservation. --- tests/e2e/CMakeLists.txt | 1 + tests/e2e/cli/counter_restore.sh | 88 ++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100755 tests/e2e/cli/counter_restore.sh diff --git a/tests/e2e/CMakeLists.txt b/tests/e2e/CMakeLists.txt index dbfe8d1d..8f890a03 100644 --- a/tests/e2e/CMakeLists.txt +++ b/tests/e2e/CMakeLists.txt @@ -57,6 +57,7 @@ bf_add_e2e_test(e2e cli/chain_load.sh ROOT) bf_add_e2e_test(e2e cli/chain_set.sh ROOT) bf_add_e2e_test(e2e cli/chain_update.sh ROOT) bf_add_e2e_test(e2e cli/chain_update_set.sh ROOT) +bf_add_e2e_test(e2e cli/counter_restore.sh ROOT) bf_add_e2e_test(e2e cli/hookopts.sh) bf_add_e2e_test(e2e cli/nf_inet_dual_stack.sh ROOT) bf_add_e2e_test(e2e cli/options_error.sh) diff --git a/tests/e2e/cli/counter_restore.sh b/tests/e2e/cli/counter_restore.sh new file mode 100755 index 00000000..30a62ac3 --- /dev/null +++ b/tests/e2e/cli/counter_restore.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +set -eux +set -o pipefail + +. "$(dirname "$0")"/../e2e_test_util.sh + +make_sandbox +start_bpfilter + +get_rule_counter() { + local chain_name=$1 + local rule_idx=$2 + ${FROM_NS} bfcli chain get --name "${chain_name}" \ + | grep "counters" \ + | grep -v "policy\|error" \ + | sed -n "${rule_idx}p" \ + | awk '{print $2}' +} + +get_policy_counter() { + local chain_name=$1 + ${FROM_NS} bfcli chain get --name "${chain_name}" \ + | grep "counters policy" \ + | awk '{print $3}' +} + +CHAIN="counter_restore" + +# Test 1: counter preservation with rule reordering + +${FROM_NS} bfcli chain set --from-str \ + "chain ${CHAIN} BF_HOOK_XDP{ifindex=${NS_IFINDEX}} ACCEPT rule ip4.proto icmp counter DROP" +(! ping -c 1 -W 0.1 ${NS_IP_ADDR}) +COUNTER=$(get_rule_counter ${CHAIN} 1) +POLICY=$(get_policy_counter ${CHAIN}) +test "${COUNTER}" -eq 1 + +${FROM_NS} bfcli chain update --from-str \ + "chain ${CHAIN} BF_HOOK_XDP ACCEPT rule ip4.saddr eq ${HOST_IP_ADDR} counter ACCEPT rule ip4.proto icmp counter DROP" +test "$(get_rule_counter ${CHAIN} 1)" -eq 0 +test "$(get_rule_counter ${CHAIN} 2)" -eq "${COUNTER}" +test "$(get_policy_counter ${CHAIN})" -ge "${POLICY}" + +POLICY=$(get_policy_counter ${CHAIN}) +${FROM_NS} bfcli chain update --from-str \ + "chain ${CHAIN} BF_HOOK_XDP ACCEPT rule ip4.proto icmp counter ACCEPT" +test "$(get_rule_counter ${CHAIN} 1)" -eq 0 +test "$(get_policy_counter ${CHAIN})" -ge "${POLICY}" + +${FROM_NS} bfcli chain flush --name ${CHAIN} + +# Test 2: set index reordering +# Rules reference sets by content, so counters should be preserved +# despite the set index shuffle. + +${FROM_NS} bfcli chain set --from-str \ + "chain ${CHAIN} BF_HOOK_XDP{ifindex=${NS_IFINDEX}} ACCEPT \ + set s1 (ip4.saddr) in { 10.0.0.1 } \ + set s2 (ip4.saddr) in { 10.0.0.2 } \ + rule (ip4.saddr) in s1 counter ACCEPT \ + rule (ip4.saddr) in s2 counter DROP" + +ping -c 1 -W 0.1 ${NS_IP_ADDR} +COUNTER_S1=$(get_rule_counter ${CHAIN} 1) +test "${COUNTER_S1}" -eq 1 + +${FROM_NS} bfcli chain update --from-str \ + "chain ${CHAIN} BF_HOOK_XDP ACCEPT \ + set s2 (ip4.saddr) in { 10.0.0.2 } \ + set s1 (ip4.saddr) in { 10.0.0.1 } \ + set s3 (ip4.saddr) in { 10.0.0.3 } \ + rule (ip4.saddr) in s1 counter ACCEPT \ + rule (ip4.saddr) in s2 counter DROP" +test "$(get_rule_counter ${CHAIN} 1)" -eq "${COUNTER_S1}" +test "$(get_rule_counter ${CHAIN} 2)" -eq 0 + +COUNTER_S2=$(get_rule_counter ${CHAIN} 2) +${FROM_NS} bfcli chain update --from-str \ + "chain ${CHAIN} BF_HOOK_XDP ACCEPT \ + set s1 (ip4.saddr) in { 10.0.0.99 } \ + set s2 (ip4.saddr) in { 10.0.0.2 } \ + rule (ip4.saddr) in s1 counter ACCEPT \ + rule (ip4.saddr) in s2 counter DROP" +test "$(get_rule_counter ${CHAIN} 1)" -eq 0 +test "$(get_rule_counter ${CHAIN} 2)" -eq "${COUNTER_S2}" + +${FROM_NS} bfcli chain flush --name ${CHAIN}