From 0cf63c998bc7e6e71e9bb33ba84f3d60fc8e3109 Mon Sep 17 00:00:00 2001 From: jaronoff97 Date: Thu, 12 Feb 2026 12:53:12 -0500 Subject: [PATCH 1/2] feat: use new policy-zig lib --- README.md | 234 +- Taskfile.yml | 12 - build.zig | 170 +- build.zig.zon | 8 +- proto/google/api/annotations.proto | 31 - proto/google/api/http.proto | 370 -- .../collector/logs/v1/logs_service.proto | 77 - .../metrics/v1/metrics_service.proto | 77 - .../v1development/profiles_service.proto | 79 - .../collector/trace/v1/trace_service.proto | 77 - .../proto/common/v1/common.proto | 129 - proto/opentelemetry/proto/logs/v1/logs.proto | 227 - .../proto/metrics/v1/metrics.proto | 735 ---- .../proto/policy/v1/policy.proto | 378 -- .../profiles/v1development/profiles.proto | 480 --- .../proto/resource/v1/resource.proto | 45 - .../opentelemetry/proto/trace/v1/trace.proto | 359 -- proto/tero/policy/v1/log.proto | 251 -- proto/tero/policy/v1/metric.proto | 126 - proto/tero/policy/v1/policy.proto | 178 - proto/tero/policy/v1/shared.proto | 52 - proto/tero/policy/v1/trace.proto | 207 - src/config/types.zig | 2 +- src/datadog_main.zig | 2 +- src/hyperscan/hyperscan.zig | 1203 ------ src/lambda/extension_api.zig | 2 +- src/lambda_main.zig | 2 +- src/main.zig | 2 +- src/modules/datadog_logs_v2.zig | 4 +- src/modules/datadog_metrics_v2.zig | 4 +- src/modules/datadog_module.zig | 4 +- src/modules/otlp_logs.zig | 4 +- src/modules/otlp_metrics.zig | 4 +- src/modules/otlp_module.zig | 4 +- src/modules/otlp_traces.zig | 4 +- src/modules/prometheus_module.zig | 4 +- src/observability/event_bus.zig | 553 --- src/observability/formatters.zig | 50 - src/observability/level.zig | 93 - src/observability/root.zig | 16 - src/observability/span.zig | 134 - src/observability/std_log_adapter.zig | 219 - src/otlp_main.zig | 2 +- src/policy/loader.zig | 370 -- src/policy/log_transform.zig | 688 --- src/policy/matcher_index.zig | 2131 ---------- src/policy/parser.zig | 2132 ---------- src/policy/policy_engine.zig | 3700 ----------------- src/policy/provider.zig | 138 - src/policy/provider_file.zig | 638 --- src/policy/provider_http.zig | 686 --- src/policy/rate_limiter.zig | 671 --- src/policy/registry.zig | 1985 --------- src/policy/root.zig | 145 - src/policy/sampler.zig | 243 -- src/policy/source.zig | 52 - src/policy/trace_sampler.zig | 608 --- src/policy/types.zig | 442 -- src/prometheus/field_accessor.zig | 2 +- src/prometheus/streaming_filter.zig | 4 +- src/prometheus_main.zig | 2 +- src/proto/google/api.pb.zig | 526 --- src/proto/google/protobuf.pb.zig | 2940 ------------- .../opentelemetry/proto/common/v1.pb.zig | 492 --- src/proto/opentelemetry/proto/logs/v1.pb.zig | 379 -- .../opentelemetry/proto/metrics/v1.pb.zig | 1491 ------- .../opentelemetry/proto/resource/v1.pb.zig | 84 - src/proto/opentelemetry/proto/trace/v1.pb.zig | 519 --- src/proto/root.zig | 6 - src/proto/tero/policy/v1.pb.zig | 2011 --------- src/proxy/server.zig | 2 +- src/root.zig | 21 +- 72 files changed, 101 insertions(+), 29621 deletions(-) delete mode 100644 proto/google/api/annotations.proto delete mode 100644 proto/google/api/http.proto delete mode 100644 proto/opentelemetry/proto/collector/logs/v1/logs_service.proto delete mode 100644 proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto delete mode 100644 proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto delete mode 100644 proto/opentelemetry/proto/collector/trace/v1/trace_service.proto delete mode 100644 proto/opentelemetry/proto/common/v1/common.proto delete mode 100644 proto/opentelemetry/proto/logs/v1/logs.proto delete mode 100644 proto/opentelemetry/proto/metrics/v1/metrics.proto delete mode 100644 proto/opentelemetry/proto/policy/v1/policy.proto delete mode 100644 proto/opentelemetry/proto/profiles/v1development/profiles.proto delete mode 100644 proto/opentelemetry/proto/resource/v1/resource.proto delete mode 100644 proto/opentelemetry/proto/trace/v1/trace.proto delete mode 100644 proto/tero/policy/v1/log.proto delete mode 100644 proto/tero/policy/v1/metric.proto delete mode 100644 proto/tero/policy/v1/policy.proto delete mode 100644 proto/tero/policy/v1/shared.proto delete mode 100644 proto/tero/policy/v1/trace.proto delete mode 100644 src/hyperscan/hyperscan.zig delete mode 100644 src/observability/event_bus.zig delete mode 100644 src/observability/formatters.zig delete mode 100644 src/observability/level.zig delete mode 100644 src/observability/root.zig delete mode 100644 src/observability/span.zig delete mode 100644 src/observability/std_log_adapter.zig delete mode 100644 src/policy/loader.zig delete mode 100644 src/policy/log_transform.zig delete mode 100644 src/policy/matcher_index.zig delete mode 100644 src/policy/parser.zig delete mode 100644 src/policy/policy_engine.zig delete mode 100644 src/policy/provider.zig delete mode 100644 src/policy/provider_file.zig delete mode 100644 src/policy/provider_http.zig delete mode 100644 src/policy/rate_limiter.zig delete mode 100644 src/policy/registry.zig delete mode 100644 src/policy/root.zig delete mode 100644 src/policy/sampler.zig delete mode 100644 src/policy/source.zig delete mode 100644 src/policy/trace_sampler.zig delete mode 100644 src/policy/types.zig delete mode 100644 src/proto/google/api.pb.zig delete mode 100644 src/proto/google/protobuf.pb.zig delete mode 100644 src/proto/opentelemetry/proto/common/v1.pb.zig delete mode 100644 src/proto/opentelemetry/proto/logs/v1.pb.zig delete mode 100644 src/proto/opentelemetry/proto/metrics/v1.pb.zig delete mode 100644 src/proto/opentelemetry/proto/resource/v1.pb.zig delete mode 100644 src/proto/opentelemetry/proto/trace/v1.pb.zig delete mode 100644 src/proto/root.zig delete mode 100644 src/proto/tero/policy/v1.pb.zig diff --git a/README.md b/README.md index 0b03624..521f79e 100644 --- a/README.md +++ b/README.md @@ -37,100 +37,32 @@ src/ ├── datadog_main.zig # Datadog-focused distribution entry point ├── otlp_main.zig # OTLP-focused distribution entry point ├── prometheus_main.zig # Prometheus-focused distribution entry point +├── lambda_main.zig # AWS Lambda extension entry point ├── root.zig # Library root (public API exports) │ -├── policy/ # Policy management package -├── hyperscan/ # Pattern matching package (Vectorscan bindings) ├── modules/ # Protocol-specific processing modules ├── proxy/ # HTTP proxy infrastructure -├── observability/ # Logging and tracing infrastructure +├── prometheus/ # Prometheus metric parsing and filtering ├── config/ # Configuration parsing (non-policy) -└── proto/ # Protobuf definitions and generated code +├── lambda/ # AWS Lambda extension support +└── zonfig/ # Comptime configuration with env overrides ``` -## Package Overview - -Each package is designed to be as independent as possible, with clear interfaces -for integration. - -### `policy/` - Policy Management - -The policy package provides centralized policy lifecycle management including -loading, hot-reloading, and evaluation. - -**Key exports:** +## External Dependencies -- `Registry` - Thread-safe policy store with lock-free reads via atomic snapshot - pointers -- `Provider` - Vtable interface for policy sources (file, HTTP, etc.) -- `FileProvider` - File-based policy loading with inotify/kqueue watching -- `HttpProvider` - HTTP-based policy loading with polling and sync state -- `FilterEngine` - Hyperscan-based policy evaluation engine -- `SourceType` - Policy source classification (file, http) +Edge consumes the following shared modules from +[policy-zig](https://github.com/usetero/policy-zig): -**Integration:** +- **`policy_zig`** - Policy engine, registry, matchers, transforms, and + Hyperscan/Vectorscan bindings +- **`proto`** - Protobuf types (policy, common, OTLP + logs/metrics/trace/resource) +- **`o11y`** - Observability (EventBus, structured logging, spans) -```zig -const policy = @import("policy/root.zig"); - -// Create registry -var registry = policy.Registry.init(allocator, event_bus); -defer registry.deinit(); - -// Create and register a file provider -const file_provider = try policy.FileProvider.init(allocator, bus, "local", "/path/to/policies.json"); -const provider = policy.Provider.init(file_provider); -try registry.registerProvider(&provider); - -// Evaluate policies (lock-free read) -var engine = policy.FilterEngine.init(allocator, bus, ®istry); - -// Evaluate data where ctx is the data you want to evaluate -// and field_accessor_fn is a function that informs the engine how to access fields in the data. -const result = engine.evaluate(ctx, field_accessor_fn); -``` - -**Dependencies:** `hyperscan/` (for FilterEngine), `observability/`, `proto/` - ---- - -### `hyperscan/` - Pattern Matching - -High-performance regex matching via Vectorscan (Hyperscan fork). Provides both -low-level bindings and a higher-level matcher index for policy evaluation. - -**Key exports:** - -- `Database` - Compiled Hyperscan database -- `Scratch` - Per-thread scratch space for scanning -- `Pattern` - Pattern definition with flags -- `MatcherIndex` - Inverted index mapping (MatchCase, key) tuples to compiled - databases -- `MatcherDatabase` - Single compiled database with pattern metadata - -**Integration:** - -```zig -const hyperscan = @import("hyperscan/hyperscan.zig"); - -// Compile patterns -const patterns = [_]hyperscan.Pattern{ - .{ .expression = "error", .id = 0, .flags = .{} }, - .{ .expression = "warning", .id = 1, .flags = .{} }, -}; -var db = try hyperscan.Database.compileMulti(allocator, &patterns, .{}); -defer db.deinit(); - -// Create scratch and scan -var scratch = try hyperscan.Scratch.init(&db); -defer scratch.deinit(); -_ = try db.scanWithCallback(&scratch, input_text, &ctx, callback_fn); -``` - -**Dependencies:** `observability/`, `proto/` (for MatcherIndex), links to libhs -(Vectorscan) +## Package Overview ---- +Each package is designed to be as independent as possible, with clear interfaces +for integration. ### `modules/` - Protocol Modules @@ -147,45 +79,7 @@ infrastructure. filtering - `PassthroughModule` - No-op passthrough for unhandled routes -**Integration:** - -```zig -const modules = @import("modules/proxy_module.zig"); -const datadog = @import("modules/datadog_module.zig"); - -var datadog_logs_module = datadog.DatadogModule{}; -var datadog_metrics_module = datadog.DatadogModule{}; -var passthrough = modules.PassthroughModule{}; - -const registrations = [_]modules.ModuleRegistration{ - .{ - .module = datadog_logs_module.asProxyModule(), - .routes = &datadog.logs_routes, - .upstream_url = "https://intake.logs.datadoghq.com", - .max_request_body = 10 * 1024 * 1024, - .max_response_body = 1024 * 1024, - .module_data = @ptrCast(&config), - }, - .{ - .module = datadog_metrics_module.asProxyModule(), - .routes = &datadog.metrics_routes, - .upstream_url = "https://api.datadoghq.com", - .max_request_body = 10 * 1024 * 1024, - .max_response_body = 1024 * 1024, - .module_data = @ptrCast(&config), - }, - .{ - .module = passthrough.asProxyModule(), - .routes = &passthrough.default_routes, - .upstream_url = upstream_url, - .max_request_body = max_body, - .max_response_body = max_body, - .module_data = null, - }, -}; -``` - -**Dependencies:** `policy/` (for DatadogModule, OtlpModule), `observability/` +**Dependencies:** `policy_zig`, `proto`, `o11y` #### OtlpModule @@ -221,60 +115,22 @@ Core HTTP proxy server and routing infrastructure. - `UpstreamClient` - HTTP client for forwarding requests - `compress` - gzip compression/decompression utilities -**Integration:** - -```zig -const proxy = @import("proxy/server.zig"); - -var server = try proxy.ProxyServer.init( - allocator, - event_bus, - listen_address, - listen_port, - &module_registrations, -); -defer server.deinit(); - -try server.listen(); // Blocks until server.stop() called -``` - -**Dependencies:** `modules/`, `observability/`, links to httpz +**Dependencies:** `o11y`, links to httpz --- -### `observability/` - Logging and Tracing +### `prometheus/` - Prometheus Metrics -Structured event-based observability infrastructure. All logging goes through an -EventBus that can route to different backends. +Streaming Prometheus exposition format parsing and policy-based filtering. **Key exports:** -- `EventBus` - Vtable interface for event emission -- `StdioEventBus` - JSON-formatted output to stdout/stderr -- `NoopEventBus` - Silent bus for testing -- `Level` - Log levels (trace, debug, info, warn, err) -- `Span` - Duration tracking with start/complete events -- `StdLogAdapter` - Routes `std.log` through the EventBus - -**Integration:** - -```zig -const o11y = @import("observability/root.zig"); - -var stdio_bus: o11y.StdioEventBus = undefined; -stdio_bus.init(); -const bus = stdio_bus.eventBus(); +- `PolicyStreamingFilter` - Streaming filter that applies policies to metrics +- `FilteringWriter` - Writer that filters metrics line-by-line +- `FieldAccessor` - Maps policy field references to Prometheus metric fields +- `LineParser` - Prometheus exposition format parser -bus.setLevel(.info); -bus.info(MyEvent{ .field = value }); - -// Spans for timing -var span = bus.started(.info, StartEvent{}); -// ... do work ... -span.completed(EndEvent{}); -``` - -**Dependencies:** None (leaf package) +**Dependencies:** `policy_zig`, `proto`, `o11y` --- @@ -284,38 +140,11 @@ Application configuration loading and parsing (non-policy configuration). **Key exports:** -- `Config` - Main configuration struct -- `parseConfigFile` - JSON config file parsing - -**Integration:** +- `ProxyConfig` - Main proxy configuration struct +- `ProviderConfig` - Policy provider configuration (re-exported from policy-zig) +- `ServiceMetadata` - Service metadata (re-exported from policy-zig) -```zig -const config = @import("config/parser.zig"); - -const cfg = try config.parseConfigFile(allocator, "config.json"); -defer { - allocator.free(cfg.upstream_url); - // ... cleanup ... - allocator.destroy(cfg); -} -``` - -**Dependencies:** `policy/` (for ProviderConfig types) - ---- - -### `proto/` - Protocol Buffers - -Generated Zig code from protobuf definitions. Provides policy and telemetry data -structures. - -**Key exports:** - -- `policy.Policy` - Policy definition -- `policy.LogMatcher` - Log matching rules -- `policy.FilterAction` - DROP/KEEP actions - -**Dependencies:** None (leaf package) +**Dependencies:** `policy_zig` --- @@ -393,6 +222,12 @@ Focused distribution for Prometheus metrics scraping with streaming filtering: Build: `zig build prometheus` Run: `zig build run-prometheus` or `./zig-out/bin/edge-prometheus [config-file]` +### Lambda (`lambda_main.zig`) + +AWS Lambda extension distribution for Datadog telemetry processing. + +Build: `zig build lambda` Run: Deployed as a Lambda layer + ## Building ```bash @@ -407,6 +242,7 @@ zig build edge # Full distribution (Datadog + OTLP + Prometheus) zig build datadog # Datadog-only distribution zig build otlp # OTLP-only distribution zig build prometheus # Prometheus-only distribution +zig build lambda # Lambda extension distribution # Run specific distribution zig build run-edge diff --git a/Taskfile.yml b/Taskfile.yml index df58959..ae3c339 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -19,11 +19,6 @@ tasks: cmds: - zig build -Doptimize={{.OPTIMIZE | default "Debug"}} - build:protos: - desc: "Build the project with protos" - cmds: - - zig build -Dgen-proto=true gen-proto - build:release: desc: "Build the project in release mode (ReleaseFast)" aliases: [br, release] @@ -275,13 +270,6 @@ tasks: echo "No Zig REPL found. Install ziggy: https://github.com/zigtools/ziggy" fi - update:protos: - desc: "Update proto files" - cmds: - - buf export buf.build/tero/policy -o proto - - buf export buf.build/opentelemetry/opentelemetry -o proto - - task: build:protos - # ============================================================================= # Build & Release # ============================================================================= diff --git a/build.zig b/build.zig index dc4513b..02ede79 100644 --- a/build.zig +++ b/build.zig @@ -1,135 +1,74 @@ const std = @import("std"); -const protobuf = @import("protobuf"); -// Although this function looks imperative, it does not perform the build -// directly and instead it mutates the build graph (`b`) that will be then -// executed by an external runner. The functions in `std.Build` implement a DSL -// for defining build steps and express dependencies between them, allowing the -// build runner to parallelize the build automatically (and the cache system to -// know when a step doesn't need to be re-run). pub fn build(b: *std.Build) void { - // Standard target options allow the person running `zig build` to choose - // what target to build for. Here we do not override the defaults, which - // means any target is allowed, and the default is native. Other options - // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); - // Standard optimization options allow the person running `zig build` to select - // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not - // set a preferred release mode, allowing the user to decide how to optimize. const optimize = b.standardOptimizeOption(.{}); - // It's also possible to define more custom flags to toggle optional features - // of this build script using `b.option()`. All defined flags (including - // target and optimize options) will be listed when running `zig build --help` - // in this directory. + + // ========================================================================== + // Dependencies + // ========================================================================== const httpz = b.dependency("httpz", .{ .target = target, .optimize = optimize, }); - const protobuf_dep = b.dependency("protobuf", .{ + const zimdjson = b.dependency("zimdjson", .{ .target = target, .optimize = optimize, }); - const zimdjson = b.dependency("zimdjson", .{ + const policy_dep = b.dependency("policy_zig", .{ .target = target, .optimize = optimize, }); - // Create a proto module for generated protobuf files - const proto_mod = b.addModule("proto", .{ - .root_source_file = b.path("src/proto/root.zig"), - .target = target, - }); - proto_mod.addImport("protobuf", protobuf_dep.module("protobuf")); + // Shared modules from policy-zig ensure type identity across boundaries. + const proto_mod = policy_dep.module("proto"); + const o11y_mod = policy_dep.module("o11y"); + + // ========================================================================== + // Edge Library Module + // ========================================================================== - // This creates a module, which represents a collection of source files alongside - // some compilation options, such as optimization mode and linked system libraries. - // Zig modules are the preferred way of making Zig code available to consumers. - // addModule defines a module that we intend to make available for importing - // to our consumers. We must give it a name because a Zig package can expose - // multiple modules and consumers will need to be able to specify which - // module they want to access. const mod = b.addModule("edge", .{ - // The root source file is the "entry point" of this module. Users of - // this module will only be able to access public declarations contained - // in this file, which means that if you have declarations that you - // intend to expose to consumers that were defined in other files part - // of this module, you will have to make sure to re-export them from - // the root file. .root_source_file = b.path("src/root.zig"), - // Later on we'll use this module as the root module of a test executable - // which requires us to specify a target. .target = target, .imports = &.{ .{ .name = "proto", .module = proto_mod }, .{ .name = "zimdjson", .module = zimdjson.module("zimdjson") }, + .{ .name = "policy_zig", .module = policy_dep.module("policy_zig") }, + .{ .name = "o11y", .module = o11y_mod }, }, }); - // Here we define an executable. An executable needs to have a root module - // which needs to expose a `main` function. While we could add a main function - // to the module defined above, it's sometimes preferable to split business - // logic and the CLI into two separate modules. - // - // If your goal is to create a Zig library for others to use, consider if - // it might benefit from also exposing a CLI tool. A parser library for a - // data serialization format could also bundle a CLI syntax checker, for example. - // - // If instead your goal is to create an executable, consider if users might - // be interested in also being able to embed the core functionality of your - // program in their own executable in order to avoid the overhead involved in - // subprocessing your CLI tool. - // - // If neither case applies to you, feel free to delete the declaration you - // don't need and to put everything under a single module. + // ========================================================================== + // Main Executable + // ========================================================================== + const exe = b.addExecutable(.{ .name = "edge", .root_module = b.createModule(.{ - // b.createModule defines a new module just like b.addModule but, - // unlike b.addModule, it does not expose the module to consumers of - // this package, which is why in this case we don't have to give it a name. .root_source_file = b.path("src/main.zig"), - // Target and optimization levels must be explicitly wired in when - // defining an executable or library (in the root module), and you - // can also hardcode a specific target for an executable or library - // definition if desireable (e.g. firmware for embedded devices). .target = target, .optimize = optimize, - // List of modules available for import in source files part of the - // root module. .imports = &.{ - // Here "edge" is the name you will use in your source code to - // import this module (e.g. `@import("edge")`). The name is - // repeated because you are allowed to rename your imports, which - // can be extremely useful in case of collisions (which can happen - // importing modules from different packages). .{ .name = "edge", .module = mod }, }, }), }); exe.root_module.addImport("httpz", httpz.module("httpz")); - exe.root_module.addImport("protobuf", protobuf_dep.module("protobuf")); exe.root_module.addImport("proto", proto_mod); exe.root_module.addImport("zimdjson", zimdjson.module("zimdjson")); - - // Link zlib for gzip compression + exe.root_module.addImport("policy_zig", policy_dep.module("policy_zig")); + exe.root_module.addImport("o11y", o11y_mod); exe.root_module.link_libc = true; exe.root_module.linkSystemLibrary("z", .{}); exe.root_module.linkSystemLibrary("zstd", .{}); - exe.root_module.linkSystemLibrary("hs", .{}); - // This declares intent for the executable to be installed into the - // install prefix when running `zig build` (i.e. when executing the default - // step). By default the install prefix is `zig-out/` but can be overridden - // by passing `--prefix` or `-p`. b.installArtifact(exe); // ========================================================================== // Distribution Builds // ========================================================================== - // Each distribution is a focused edge proxy for a specific backend. - // Build with: zig build - // Run with: zig build run- const distributions = .{ .{ "datadog", "src/datadog_main.zig", "Datadog ingestion" }, @@ -153,13 +92,13 @@ pub fn build(b: *std.Build) void { }), }); dist_exe.root_module.addImport("httpz", httpz.module("httpz")); - dist_exe.root_module.addImport("protobuf", protobuf_dep.module("protobuf")); dist_exe.root_module.addImport("proto", proto_mod); dist_exe.root_module.addImport("zimdjson", zimdjson.module("zimdjson")); + dist_exe.root_module.addImport("policy_zig", policy_dep.module("policy_zig")); + dist_exe.root_module.addImport("o11y", o11y_mod); dist_exe.root_module.link_libc = true; dist_exe.root_module.linkSystemLibrary("z", .{}); dist_exe.root_module.linkSystemLibrary("zstd", .{}); - dist_exe.root_module.linkSystemLibrary("hs", .{}); const dist_step = b.step(name, "Build the " ++ name ++ " distribution (" ++ desc ++ ")"); dist_step.dependOn(&b.addInstallArtifact(dist_exe, .{}).step); @@ -173,45 +112,30 @@ pub fn build(b: *std.Build) void { } } - // This creates a top level step. Top level steps have a name and can be - // invoked by name when running `zig build` (e.g. `zig build run`). - // This will evaluate the `run` step rather than the default step. - // For a top level step to actually do something, it must depend on other - // steps (e.g. a Run step, as we will see in a moment). - const run_step = b.step("run", "Run the app"); + // ========================================================================== + // Run Step + // ========================================================================== - // This creates a RunArtifact step in the build graph. A RunArtifact step - // invokes an executable compiled by Zig. Steps will only be executed by the - // runner if invoked directly by the user (in the case of top level steps) - // or if another step depends on it, so it's up to you to define when and - // how this Run step will be executed. In our case we want to run it when - // the user runs `zig build run`, so we create a dependency link. + const run_step = b.step("run", "Run the app"); const run_cmd = b.addRunArtifact(exe); run_step.dependOn(&run_cmd.step); - - // By making the run step depend on the default step, it will be run from the - // installation directory rather than directly from within the cache directory. run_cmd.step.dependOn(b.getInstallStep()); - - // This allows the user to pass arguments to the application in the build - // command itself, like this: `zig build run -- arg1 arg2 etc` if (b.args) |args| { run_cmd.addArgs(args); } - // Creates an executable that will run `test` blocks from the provided module. + // ========================================================================== + // Tests + // ========================================================================== + const mod_tests = b.addTest(.{ .root_module = mod, }); mod_tests.root_module.link_libc = true; mod_tests.root_module.linkSystemLibrary("z", .{}); mod_tests.root_module.linkSystemLibrary("zstd", .{}); - mod_tests.root_module.linkSystemLibrary("hs", .{}); - // A run step that will run the test executable. const run_mod_tests = b.addRunArtifact(mod_tests); - - // A top level step for running all tests. const test_step = b.step("test", "Run tests"); test_step.dependOn(&run_mod_tests.step); @@ -219,7 +143,6 @@ pub fn build(b: *std.Build) void { // Benchmark Tools // ========================================================================== - // Echo server for benchmarking - a minimal HTTP server that returns 202 const echo_server = b.addExecutable(.{ .name = "echo-server", .root_module = b.createModule(.{ @@ -240,35 +163,4 @@ pub fn build(b: *std.Build) void { if (b.args) |args| { run_echo_cmd.addArgs(args); } - - // Proto generation step - only create when explicitly requested via build option. - // This avoids fetching protoc binary during normal builds (which fails on ARM64 CI). - const gen_proto_opt = b.option(bool, "gen-proto", "Generate protobuf files") orelse false; - if (gen_proto_opt) { - const gen_proto = b.step("gen-proto", "generates zig files from protocol buffer definitions"); - const protoc_step = protobuf.RunProtocStep.create(protobuf_dep.builder, target, .{ - .destination_directory = b.path("src/proto"), - .source_files = &.{ - "proto/tero/policy/v1/policy.proto", - "proto/tero/policy/v1/log.proto", - "proto/opentelemetry/proto/logs/v1/logs.proto", - "proto/opentelemetry/proto/metrics/v1/metrics.proto", - }, - .include_directories = &.{ - "proto", - }, - }); - gen_proto.dependOn(&protoc_step.step); - } - // Just like flags, top level steps are also listed in the `--help` menu. - // - // The Zig build system is entirely implemented in userland, which means - // that it cannot hook into private compiler APIs. All compilation work - // orchestrated by the build system will result in other Zig compiler - // subcommands being invoked with the right flags defined. You can observe - // these invocations when one fails (or you pass a flag to increase - // verbosity) to validate assumptions and diagnose problems. - // - // Lastly, the Zig build system is relatively simple and self-contained, - // and reading its source code will allow you to master it. } diff --git a/build.zig.zon b/build.zig.zon index a9d6253..7573e10 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -36,14 +36,14 @@ .url = "git+https://github.com/karlseguin/http.zig?ref=master#a4477b1dbdc5b820fd4ad301797ca6399f7eb6e4", .hash = "httpz-0.0.0-PNVzrBgtBwCVkSJyophIX6WHwDR0r8XhBGQr96Kk-1El", }, - .protobuf = .{ - .url = "git+https://github.com/jaronoff97/zig-protobuf?ref=fix-json-oneof-shadowing#464170e22be3460e44f9e79dad0184e969d1407b", - .hash = "protobuf-3.0.0-0e82atQRKQAeWaBvhs4mtvWwJ6oZ42Deivtk1rZLUKx3", - }, .zimdjson = .{ .url = "git+https://github.com/jaronoff97/zimdjson?ref=zig-0.15.1-support#f31efc9e698246f994df8eba44646134a93fa2b6", .hash = "zimdjson-0.1.1-89pgxT5VBgBMxsGpAd90DI0e-DzHGKiRVlquliE5FivH", }, + .policy_zig = .{ + .url = "git+https://github.com/usetero/policy-zig?ref=v0.1.3", + .hash = "policy_zig-0.1.3-5_dp3l37FgCw1ZfsRdVHkOJONEbu01hNh1xJD5jl_TeR", + }, }, .paths = .{ "build.zig", diff --git a/proto/google/api/annotations.proto b/proto/google/api/annotations.proto deleted file mode 100644 index 417edd8..0000000 --- a/proto/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/proto/google/api/http.proto b/proto/google/api/http.proto deleted file mode 100644 index 57621b5..0000000 --- a/proto/google/api/http.proto +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// - HTTP: `GET /v1/messages/123456` -// - gRPC: `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// - HTTP: `GET /v1/messages/123456` -// - gRPC: `GetMessage(message_id: "123456")` -// -// - HTTP: `GET /v1/users/me/messages/123456` -// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` -// -// Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They -// are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL -// query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP -// request body, all -// fields are passed via URL path and URL query parameters. -// -// Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// The following example selects a gRPC method and applies an `HttpRule` to it: -// -// http: -// rules: -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -message HttpRule { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax - // details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - string get = 2; - - // Maps to HTTP PUT. Used for replacing a resource. - string put = 3; - - // Maps to HTTP POST. Used for creating a resource or performing an action. - string post = 4; - - // Maps to HTTP DELETE. Used for deleting a resource. - string delete = 5; - - // Maps to HTTP PATCH. Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/proto/opentelemetry/proto/collector/logs/v1/logs_service.proto b/proto/opentelemetry/proto/collector/logs/v1/logs_service.proto deleted file mode 100644 index 8be5cf7..0000000 --- a/proto/opentelemetry/proto/collector/logs/v1/logs_service.proto +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.logs.v1; - -import "opentelemetry/proto/logs/v1/logs.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.logs.v1"; -option java_outer_classname = "LogsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/logs/v1"; - -// Service that can be used to push logs between one Application instrumented with -// OpenTelemetry and an collector, or between an collector and a central collector (in this -// case logs are sent/received to/from multiple Applications). -service LogsService { - rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} -} - -message ExportLogsServiceRequest { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; -} - -message ExportLogsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportLogsPartialSuccess partial_success = 1; -} - -message ExportLogsPartialSuccess { - // The number of rejected log records. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_log_records = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto deleted file mode 100644 index bc02428..0000000 --- a/proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.metrics.v1; - -import "opentelemetry/proto/metrics/v1/metrics.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.metrics.v1"; -option java_outer_classname = "MetricsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/metrics/v1"; - -// Service that can be used to push metrics between one Application -// instrumented with OpenTelemetry and a collector, or between a collector and a -// central collector. -service MetricsService { - rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} -} - -message ExportMetricsServiceRequest { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.metrics.v1.ResourceMetrics resource_metrics = 1; -} - -message ExportMetricsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportMetricsPartialSuccess partial_success = 1; -} - -message ExportMetricsPartialSuccess { - // The number of rejected data points. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_data_points = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto b/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto deleted file mode 100644 index 81bb210..0000000 --- a/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.profiles.v1development; - -import "opentelemetry/proto/profiles/v1development/profiles.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Development"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.profiles.v1development"; -option java_outer_classname = "ProfilesServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1development"; - -// Service that can be used to push profiles between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector. -service ProfilesService { - rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} -} - -message ExportProfilesServiceRequest { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.profiles.v1development.ResourceProfiles resource_profiles = 1; - - // The reference table containing all data shared by profiles across the message being sent. - opentelemetry.proto.profiles.v1development.ProfilesDictionary dictionary = 2; -} - -message ExportProfilesServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportProfilesPartialSuccess partial_success = 1; -} - -message ExportProfilesPartialSuccess { - // The number of rejected profiles. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_profiles = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/proto/opentelemetry/proto/collector/trace/v1/trace_service.proto b/proto/opentelemetry/proto/collector/trace/v1/trace_service.proto deleted file mode 100644 index efbbedb..0000000 --- a/proto/opentelemetry/proto/collector/trace/v1/trace_service.proto +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.trace.v1; - -import "opentelemetry/proto/trace/v1/trace.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.trace.v1"; -option java_outer_classname = "TraceServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; - -// Service that can be used to push spans between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector (in this -// case spans are sent/received to/from multiple Applications). -service TraceService { - rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} -} - -message ExportTraceServiceRequest { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; -} - -message ExportTraceServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportTracePartialSuccess partial_success = 1; -} - -message ExportTracePartialSuccess { - // The number of rejected spans. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_spans = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/proto/opentelemetry/proto/common/v1/common.proto b/proto/opentelemetry/proto/common/v1/common.proto deleted file mode 100644 index 7f9ffab..0000000 --- a/proto/opentelemetry/proto/common/v1/common.proto +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.common.v1; - -option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.common.v1"; -option java_outer_classname = "CommonProto"; -option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; - -// Represents any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -message AnyValue { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "empty". - oneof value { - string string_value = 1; - bool bool_value = 2; - int64 int_value = 3; - double double_value = 4; - ArrayValue array_value = 5; - KeyValueList kvlist_value = 6; - bytes bytes_value = 7; - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -message ArrayValue { - // Array of values. The array may be empty (contain 0 elements). - repeated AnyValue values = 1; -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -message KeyValueList { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - // - // The keys MUST be unique (it is not allowed to have more than one - // value with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated KeyValue values = 1; -} - -// Represents a key-value pair that is used to store Span attributes, Link -// attributes, etc. -message KeyValue { - // The key name of the pair. - string key = 1; - - // The value of the pair. - AnyValue value = 2; -} - -// InstrumentationScope is a message representing the instrumentation scope information -// such as the fully qualified name and version. -message InstrumentationScope { - // A name denoting the Instrumentation scope. - // An empty instrumentation scope name means the name is unknown. - string name = 1; - - // Defines the version of the instrumentation scope. - // An empty instrumentation scope version means the version is unknown. - string version = 2; - - // Additional attributes that describe the scope. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated KeyValue attributes = 3; - - // The number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 4; -} - -// A reference to an Entity. -// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. -// -// Status: [Development] -message EntityRef { - // The Schema URL, if known. This is the identifier of the Schema that the entity data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // - // This schema_url applies to the data in this message and to the Resource attributes - // referenced by id_keys and description_keys. - // TODO: discuss if we are happy with this somewhat complicated definition of what - // the schema_url applies to. - // - // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. - string schema_url = 1; - - // Defines the type of the entity. MUST not change during the lifetime of the entity. - // For example: "service" or "host". This field is required and MUST not be empty - // for valid entities. - string type = 2; - - // Attribute Keys that identify the entity. - // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. - // These keys MUST exist in the containing {message}.attributes. - repeated string id_keys = 3; - - // Descriptive (non-identifying) attribute keys of the entity. - // MAY change over the lifetime of the entity. MAY be empty. - // These attribute keys are not part of entity's identity. - // These keys MUST exist in the containing {message}.attributes. - repeated string description_keys = 4; -} \ No newline at end of file diff --git a/proto/opentelemetry/proto/logs/v1/logs.proto b/proto/opentelemetry/proto/logs/v1/logs.proto deleted file mode 100644 index 842c93c..0000000 --- a/proto/opentelemetry/proto/logs/v1/logs.proto +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.logs.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.logs.v1"; -option java_outer_classname = "LogsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; - -// LogsData represents the logs data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP logs data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message LogsData { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceLogs resource_logs = 1; -} - -// A collection of ScopeLogs from a Resource. -message ResourceLogs { - reserved 1000; - - // The resource for the logs in this message. - // If this field is not set then resource info is unknown. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeLogs that originate from a resource. - repeated ScopeLogs scope_logs = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_logs" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Logs produced by a Scope. -message ScopeLogs { - // The instrumentation scope information for the logs in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of log records. - repeated LogRecord log_records = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "scope" field and all logs in the - // "log_records" field. - string schema_url = 3; -} - -// Possible values for LogRecord.SeverityNumber. -enum SeverityNumber { - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - SEVERITY_NUMBER_UNSPECIFIED = 0; - SEVERITY_NUMBER_TRACE = 1; - SEVERITY_NUMBER_TRACE2 = 2; - SEVERITY_NUMBER_TRACE3 = 3; - SEVERITY_NUMBER_TRACE4 = 4; - SEVERITY_NUMBER_DEBUG = 5; - SEVERITY_NUMBER_DEBUG2 = 6; - SEVERITY_NUMBER_DEBUG3 = 7; - SEVERITY_NUMBER_DEBUG4 = 8; - SEVERITY_NUMBER_INFO = 9; - SEVERITY_NUMBER_INFO2 = 10; - SEVERITY_NUMBER_INFO3 = 11; - SEVERITY_NUMBER_INFO4 = 12; - SEVERITY_NUMBER_WARN = 13; - SEVERITY_NUMBER_WARN2 = 14; - SEVERITY_NUMBER_WARN3 = 15; - SEVERITY_NUMBER_WARN4 = 16; - SEVERITY_NUMBER_ERROR = 17; - SEVERITY_NUMBER_ERROR2 = 18; - SEVERITY_NUMBER_ERROR3 = 19; - SEVERITY_NUMBER_ERROR4 = 20; - SEVERITY_NUMBER_FATAL = 21; - SEVERITY_NUMBER_FATAL2 = 22; - SEVERITY_NUMBER_FATAL3 = 23; - SEVERITY_NUMBER_FATAL4 = 24; -} - -// LogRecordFlags represents constants used to interpret the -// LogRecord.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -// -enum LogRecordFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - LOG_RECORD_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8-31 are reserved for future use. -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -message LogRecord { - reserved 4; - - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 time_unix_nano = 1; - - // Time when the event was observed by the collection system. - // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - // this timestamp is typically set at the generation time and is equal to Timestamp. - // For events originating externally and collected by OpenTelemetry (e.g. using - // Collector) this is the time when OpenTelemetry's code observed the event measured - // by the clock of the OpenTelemetry code. This field MUST be set once the event is - // observed by OpenTelemetry. - // - // For converting OpenTelemetry log data to formats that support only one timestamp or - // when receiving OpenTelemetry log data by recipients that support only one timestamp - // internally the following logic is recommended: - // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 observed_time_unix_nano = 11; - - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber severity_number = 2; - - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - string severity_text = 3; - - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - opentelemetry.proto.common.v1.AnyValue body = 5; - - // Additional attributes that describe the specific event occurrence. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; - uint32 dropped_attributes_count = 7; - - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. - fixed32 flags = 8; - - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. - // - // The receivers SHOULD assume that the log record is not associated with a - // trace if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes trace_id = 9; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. If the sender specifies a valid span_id then it SHOULD also - // specify a valid trace_id. - // - // The receivers SHOULD assume that the log record is not associated with a - // span if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes span_id = 10; - - // A unique identifier of event category/type. - // All events with the same event_name are expected to conform to the same - // schema for both their attributes and their body. - // - // Recommended to be fully qualified and short (no longer than 256 characters). - // - // Presence of event_name on the log record identifies this record - // as an event. - // - // [Optional]. - string event_name = 12; -} diff --git a/proto/opentelemetry/proto/metrics/v1/metrics.proto b/proto/opentelemetry/proto/metrics/v1/metrics.proto deleted file mode 100644 index a6fab4e..0000000 --- a/proto/opentelemetry/proto/metrics/v1/metrics.proto +++ /dev/null @@ -1,735 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.metrics.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.metrics.v1"; -option java_outer_classname = "MetricsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; - -// MetricsData represents the metrics data that can be stored in a persistent -// storage, OR can be embedded by other protocols that transfer OTLP metrics -// data but do not implement the OTLP protocol. -// -// MetricsData -// └─── ResourceMetrics -// ├── Resource -// ├── SchemaURL -// └── ScopeMetrics -// ├── Scope -// ├── SchemaURL -// └── Metric -// ├── Name -// ├── Description -// ├── Unit -// └── data -// ├── Gauge -// ├── Sum -// ├── Histogram -// ├── ExponentialHistogram -// └── Summary -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message MetricsData { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceMetrics resource_metrics = 1; -} - -// A collection of ScopeMetrics from a Resource. -message ResourceMetrics { - reserved 1000; - - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of metrics that originate from a resource. - repeated ScopeMetrics scope_metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_metrics" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Metrics produced by an Scope. -message ScopeMetrics { - // The instrumentation scope information for the metrics in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of metrics that originate from an instrumentation library. - repeated Metric metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "scope" field and all metrics in the - // "metrics" field. - string schema_url = 3; -} - -// Defines a Metric which has one or more timeseries. The following is a -// brief summary of the Metric data model. For more details, see: -// -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -// - DataPoint contains timestamps, attributes, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// Each distinct type of DataPoint represents the output of a specific -// aggregation function, the result of applying the DataPoint's -// associated function of to one or more measurements. -// -// All DataPoint types have three common fields: -// - Attributes includes key-value pairs associated with the data point -// - TimeUnixNano is required, set to the end time of the aggregation -// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -// having an AggregationTemporality field, as discussed below. -// -// Both TimeUnixNano and StartTimeUnixNano values are expressed as -// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -// -// # TimeUnixNano -// -// This field is required, having consistent interpretation across -// DataPoint types. TimeUnixNano is the moment corresponding to when -// the data point's aggregate value was captured. -// -// Data points with the 0 value for TimeUnixNano SHOULD be rejected -// by consumers. -// -// # StartTimeUnixNano -// -// StartTimeUnixNano in general allows detecting when a sequence of -// observations is unbroken. This field indicates to consumers the -// start time for points with cumulative and delta -// AggregationTemporality, and it should be included whenever possible -// to support correct rate calculation. Although it may be omitted -// when the start time is truly unknown, setting StartTimeUnixNano is -// strongly encouraged. -message Metric { - reserved 4, 6, 8; - - // The name of the metric. - string name = 1; - - // A description of the metric, which can be used in documentation. - string description = 2; - - // The unit in which the metric value is reported. Follows the format - // described by https://unitsofmeasure.org/ucum.html. - string unit = 3; - - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - oneof data { - Gauge gauge = 5; - Sum sum = 7; - Histogram histogram = 9; - ExponentialHistogram exponential_histogram = 10; - Summary summary = 11; - } - - // Additional metadata attributes that describe the metric. [Optional]. - // Attributes are non-identifying. - // Consumers SHOULD NOT need to be aware of these attributes. - // These attributes MAY be used to encode information allowing - // for lossless roundtrip translation to / from another data model. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; -} - -// Gauge represents the type of a scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -message Gauge { - // The time series data points. - // Note: Multiple time series may be included (same timestamp, different attributes). - repeated NumberDataPoint data_points = 1; -} - -// Sum represents the type of a scalar metric that is calculated as a sum of all -// reported measurements over a time interval. -message Sum { - // The time series data points. - // Note: Multiple time series may be included (same timestamp, different attributes). - repeated NumberDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; - - // Represents whether the sum is monotonic. - bool is_monotonic = 3; -} - -// Histogram represents the type of a metric that is calculated by aggregating -// as a Histogram of all reported measurements over a time interval. -message Histogram { - // The time series data points. - // Note: Multiple time series may be included (same timestamp, different attributes). - repeated HistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// ExponentialHistogram represents the type of a metric that is calculated by aggregating -// as a ExponentialHistogram of all reported double measurements over a time interval. -message ExponentialHistogram { - // The time series data points. - // Note: Multiple time series may be included (same timestamp, different attributes). - repeated ExponentialHistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// Summary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -// Summary metrics do not have an aggregation temporality field. This is -// because the count and sum fields of a SummaryDataPoint are assumed to be -// cumulative values. -message Summary { - // The time series data points. - // Note: Multiple time series may be included (same timestamp, different attributes). - repeated SummaryDataPoint data_points = 1; -} - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -enum AggregationTemporality { - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AGGREGATION_TEMPORALITY_DELTA = 1; - - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -// bit-field representing 32 distinct boolean flags. Each flag defined in this -// enum is a bit-mask. To test the presence of a single flag in the flags of -// a data point, for example, use an expression like: -// -// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -// -enum DataPointFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - DATA_POINT_FLAGS_DO_NOT_USE = 0; - - // This DataPoint is valid but has no recorded value. This value - // SHOULD be used to reflect explicitly missing data in a series, as - // for an equivalent to the Prometheus "staleness marker". - DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; - - // Bits 2-31 are reserved for future use. -} - -// NumberDataPoint is a single data point in a timeseries that describes the -// time-varying scalar value of a metric. -message NumberDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // The value itself. A point is considered invalid when one of the recognized - // value fields is not present inside this oneof. - oneof value { - double as_double = 4; - sfixed64 as_int = 6; - } - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 5; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram. A Histogram contains summary statistics -// for a population of values, it may optionally contain the distribution of -// those values across a set of buckets. -// -// If the histogram contains the distribution of values, then both -// "explicit_bounds" and "bucket counts" fields must be defined. -// If the histogram does not contain the distribution of values, then both -// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -// "sum" are known. -message HistogramDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. The exception to this rule - // is when the length of bucket_counts is 0, then the length of explicit_bounds - // must also be 0. - repeated fixed64 bucket_counts = 6; - - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // - // The boundaries for bucket at index i are: - // - // (-infinity, explicit_bounds[i]] for i == 0 - // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) - // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) - // - // The values in the explicit_bounds array must be strictly increasing. - // - // Histogram buckets are inclusive of their upper boundary, except the last - // bucket where the boundary is at infinity. This format is intentionally - // compatible with the OpenMetrics histogram definition. - // - // If bucket_counts length is 0 then explicit_bounds length must also be 0, - // otherwise the data point is invalid. - repeated double explicit_bounds = 7; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 8; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // min is the minimum value over (start_time, end_time]. - optional double min = 11; - - // max is the maximum value over (start_time, end_time]. - optional double max = 12; -} - -// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -// -message ExponentialHistogramDataPoint { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // The number of values in the population. Must be - // non-negative. This value must be equal to the sum of the "bucket_counts" - // values in the positive and negative Buckets plus the "zero_count" field. - fixed64 count = 4; - - // The sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // scale describes the resolution of the histogram. Boundaries are - // located at powers of the base, where: - // - // base = (2^(2^-scale)) - // - // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than (base^index) and - // less than or equal to (base^(index+1)). - // - // The positive and negative ranges of the histogram are expressed - // separately. Negative values are mapped by their absolute value - // into the negative range using the same scale as the positive range. - // - // scale is not restricted by the protocol, as the permissible - // values depend on the range of the data. - sint32 scale = 6; - - // The count of values that are either exactly zero or - // within the region considered zero by the instrumentation at the - // tolerated degree of precision. This bucket stores values that - // cannot be expressed using the standard exponential formula as - // well as values that have been rounded to zero. - // - // Implementations MAY consider the zero bucket to have probability - // mass equal to (zero_count / count). - fixed64 zero_count = 7; - - // positive carries the positive range of exponential bucket counts. - Buckets positive = 8; - - // negative carries the negative range of exponential bucket counts. - Buckets negative = 9; - - // Buckets are a set of bucket counts, encoded in a contiguous array - // of counts. - message Buckets { - // The bucket index of the first entry in the bucket_counts array. - // - // Note: This uses a varint encoding as a simple form of compression. - sint32 offset = 1; - - // An array of count values, where bucket_counts[i] carries - // the count of the bucket at index (offset+i). bucket_counts[i] is the count - // of values greater than base^(offset+i) and less than or equal to - // base^(offset+i+1). - // - // Note: By contrast, the explicit HistogramDataPoint uses - // fixed64. This field is expected to have many buckets, - // especially zeros, so uint64 has been selected to ensure - // varint encoding. - repeated uint64 bucket_counts = 2; - } - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 11; - - // The minimum value over (start_time, end_time]. - optional double min = 12; - - // The maximum value over (start_time, end_time]. - optional double max = 13; - - // ZeroThreshold may be optionally set to convey the width of the zero - // region. Where the zero region is defined as the closed interval - // [-ZeroThreshold, ZeroThreshold]. - // When ZeroThreshold is 0, zero count bucket stores values that cannot be - // expressed using the standard exponential formula as well as values that - // have been rounded to zero. - double zero_threshold = 14; -} - -// SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. The count and sum fields represent -// cumulative values. -message SummaryDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary - double sum = 5; - - // Represents the value at a given quantile of a distribution. - // - // To record Min and Max values following conventions are used: - // - The 1.0 quantile is equivalent to the maximum value observed. - // - The 0.0 quantile is equivalent to the minimum value observed. - // - // See the following issue for more context: - // https://github.com/open-telemetry/opentelemetry-proto/issues/125 - message ValueAtQuantile { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - double quantile = 1; - - // The value at the given quantile of a distribution. - // - // Quantile values must NOT be negative. - double value = 2; - } - - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - repeated ValueAtQuantile quantile_values = 6; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// A representation of an exemplar, which is a sample input measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -message Exemplar { - reserved 1; - - // The set of key/value pairs that were filtered out by the aggregator, but - // recorded alongside the original measurement. Only key/value pairs that were - // filtered out by the aggregator should be included - repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; - - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 2; - - // The value of the measurement that was recorded. An exemplar is - // considered invalid when one of the recognized value fields is not present - // inside this oneof. - oneof value { - double as_double = 3; - sfixed64 as_int = 6; - } - - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes span_id = 4; - - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes trace_id = 5; -} diff --git a/proto/opentelemetry/proto/policy/v1/policy.proto b/proto/opentelemetry/proto/policy/v1/policy.proto deleted file mode 100644 index f22dbe9..0000000 --- a/proto/opentelemetry/proto/policy/v1/policy.proto +++ /dev/null @@ -1,378 +0,0 @@ -syntax = "proto3"; - -package opentelemetry.proto.policy.v1; - -import "google/api/annotations.proto"; -import "opentelemetry/proto/common/v1/common.proto"; - -option go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go/policy/v1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.policy.v1"; - -// ============================================================================= -// Matching -// ============================================================================= - -// LogMatcher provides a way to match against log telemetry data using known fields. -// -// IMPORTANT CONSTRAINTS: -// - Multiple matchers are ANDed together: all matchers must match for the -// overall match to succeed. -// - The list of matchers should uniquely identify a specific pattern of telemetry -// for that policy. Matchers should NOT be used as a catch-all; they should be -// specific enough to target the intended telemetry precisely. -// -// All regex fields use RE2 syntax for consistency across implementations. -// -// Examples: -// Match logs from payment service with ERROR severity: -// matchers: [ -// {resource_attribute: {key: "service.name", regex: "^payment-service$"}}, -// {log_severity_text: {regex: "^ERROR$"}} -// ] -// -// Match logs containing PII in body from any service in prod: -// matchers: [ -// {resource_attribute: {key: "deployment.environment", regex: "^prod.*"}}, -// {log_body: {regex: "\\b[0-9]{3}-[0-9]{2}-[0-9]{4}\\b"}} -// ] -message LogMatcher { - // If true, inverts the match (matches when the field does NOT match) - bool negate = 1; - - // The field to match against. Exactly one must be set. - oneof match { - // Resource-level matching - ResourceSchemaUrlMatch resource_schema_url = 10; - ResourceAttributeMatch resource_attribute = 11; - - // Scope-level matching - ScopeSchemaUrlMatch scope_schema_url = 12; - ScopeNameMatch scope_name = 13; - ScopeVersionMatch scope_version = 14; - ScopeAttributeMatch scope_attribute = 15; - - // Log-specific matching - LogBodyMatch log_body = 20; - LogSeverityTextMatch log_severity_text = 21; - LogSeverityNumberMatch log_severity_number = 22; - LogAttributeMatch log_attribute = 23; - } -} - -// ============================================================================= -// Resource and Scope Matchers (apply to all telemetry types) -// ============================================================================= - -// ResourceSchemaUrlMatch matches against the resource schema URL. -message ResourceSchemaUrlMatch { - // Regex pattern to match the resource schema URL. - string regex = 1; -} - -// ResourceAttributeMatch matches against a resource attribute by key. -message ResourceAttributeMatch { - // The attribute key to match (e.g., "service.name", "deployment.environment") - string key = 1; - - // Regex pattern to match the attribute value. Empty matches any value (existence check). - string regex = 2; -} - -// ScopeNameMatch matches against the instrumentation scope name. -message ScopeNameMatch { - // Regex pattern to match the scope name. - string regex = 1; -} - -// ScopeVersionMatch matches against the instrumentation scope version. -message ScopeVersionMatch { - // Regex pattern to match the scope version. - string regex = 1; -} - -// ScopeAttributeMatch matches against a scope attribute by key. -message ScopeAttributeMatch { - // The attribute key to match. - string key = 1; - - // Regex pattern to match the attribute value. Empty matches any value (existence check). - string regex = 2; -} - -// ScopeSchemaUrlMatch matches against the instrumentation scope schema URL. -message ScopeSchemaUrlMatch { - // Regex pattern to match the scope schema URL. - string regex = 1; -} - -// ============================================================================= -// Log Matchers -// ============================================================================= - -// LogBodyMatch matches against the log record body. -message LogBodyMatch { - // Regex pattern to match the log body content. - string regex = 1; -} - -// LogSeverityTextMatch matches against the log severity text. -message LogSeverityTextMatch { - // Regex pattern to match the severity text (e.g., "ERROR", "WARN", "INFO"). - string regex = 1; -} - -// LogSeverityNumberMatch matches against the log severity number. -message LogSeverityNumberMatch { - // Minimum severity number (inclusive). Range: 1-24. - int32 min = 1; - - // Maximum severity number (inclusive). Range: 1-24. - // If 0, only min is used for exact match. - int32 max = 2; -} - -// LogAttributeMatch matches against a log record attribute by key. -message LogAttributeMatch { - // The attribute key to match (e.g., "user.id", "http.method"). - string key = 1; - - // Regex pattern to match the attribute value. Empty matches any value (existence check). - string regex = 2; -} - -// ============================================================================= -// Metric Matchers (future) -// ============================================================================= - -// // MetricNameMatch matches against the metric name. -// message MetricNameMatch { -// // Regex pattern to match the metric name. -// string regex = 1; -// } - -// // MetricAttributeMatch matches against a metric data point attribute by key. -// message MetricAttributeMatch { -// // The attribute key to match. -// string key = 1; - -// // Regex pattern to match the attribute value. Empty matches any value (existence check). -// string regex = 2; -// } - -// ============================================================================= -// Span Matchers (future) -// ============================================================================= - -// // SpanNameMatch matches against the span name. -// message SpanNameMatch { -// // Regex pattern to match the span name. -// string regex = 1; -// } - -// // SpanKindMatch matches against the span kind. -// message SpanKindMatch { -// // The span kind to match. -// // See opentelemetry.proto.trace.v1.Span.SpanKind for valid values. -// opentelemetry.proto.trace.v1.Span.SpanKind kind = 1; -// } - -// // SpanStatusMatch matches against the span status code. -// // Uses opentelemetry.proto.trace.v1.Status.StatusCode from the trace proto. -// message SpanStatusMatch { -// // The status code to match. -// // See opentelemetry.proto.trace.v1.Status.StatusCode for valid values. -// opentelemetry.proto.trace.v1.Status.StatusCode code = 1; -// } - -// // SpanAttributeMatch matches against a span attribute by key. -// message SpanAttributeMatch { -// // The attribute key to match. -// string key = 1; - -// // Regex pattern to match the attribute value. Empty matches any value (existence check). -// string regex = 2; -// } - -// ============================================================================= -// Policy Type Configurations -// ============================================================================= - -// FilterAction specifies what to do with matched logs. -enum FilterAction { - FILTER_ACTION_UNSPECIFIED = 0; - FILTER_ACTION_KEEP = 1; - FILTER_ACTION_DROP = 2; -} - -// LogFilterConfig defines configuration for log filter policies. -// Matches logs based on conditions and keeps or drops them. -message LogFilterConfig { - // Matchers to identify which logs this filter applies to (AND logic) - repeated LogMatcher matchers = 1; - - // Action to take on matched logs - FilterAction action = 2; -} - -// ============================================================================= -// Policy Definition -// ============================================================================= - -// Policy represents a complete telemetry policy definition. -// Policies are designed to be: -// - Implementation Agnostic: Works in SDK, Collector, or any component -// - Standalone: No need to understand pipeline configuration -// - Dynamic: Can be updated post-instantiation -// - Idempotent: Safe to apply to multiple components -// - Fail-Open: Does not interfere with telemetry on failure -// -// Merge Strategy: Policies are merged using priority. -// Higher priority values take precedence when rules conflict. -message Policy { - // Unique identifier for this policy - string id = 1; - - // Human-readable name - string name = 2; - - // Optional description - string description = 3; - - // Priority for merge conflict resolution (higher = more important) - int32 priority = 4; - - // Whether this policy is enabled - bool enabled = 5; - - // Timestamp when this policy was created (Unix epoch nanoseconds) - fixed64 created_at_unix_nano = 6; - - // Timestamp when this policy was last modified (Unix epoch nanoseconds) - fixed64 modified_at_unix_nano = 7; - - // These labels can be used by an implementation to intelligently - // compile and route telemetry. - // Labels MAY contain metadata about the telemetry types this policy applies to. - // The key could be telemetry.types="type1,type2,type3" - repeated opentelemetry.proto.common.v1.KeyValue labels = 8; - - // Policy configuration. Exactly one must be set. - oneof config { - LogFilterConfig log_filter = 9; - } -} - -// ============================================================================= -// Core Policy Types -// ============================================================================= - -// PolicyType identifies the specific use case for a policy. -// Policies of the same type can be merged; different types cannot. -enum PolicyType { - POLICY_TYPE_UNSPECIFIED = 0; - POLICY_TYPE_LOG_FILTER = 1; // Define how logs are filtered (keep/drop) -} - -// ============================================================================= -// Sync Protocol (for Policy Providers) -// ============================================================================= - -// ClientMetadata contains information about the client requesting policies. -message ClientMetadata { - // Policy types this client supports/wants - repeated PolicyType supported_policy_types = 1; - - // Additional metadata labels - // NOTE FOR TERO: - // * workspace.id is required for usage. - repeated opentelemetry.proto.common.v1.KeyValue labels = 2; - - // Resource attributes describing this client's identity - // REQUIRED: - // * service.instance.id - // * service.name - // * service.namespace - // * service.version - repeated opentelemetry.proto.common.v1.KeyValue resource_attributes = 3; -} - -// PolicySyncStatus reports the status of an individual policy during sync. -// Used to communicate policy execution metrics and errors back to the provider. -message PolicySyncStatus { - // The policy ID this status refers to. - string id = 1; - - // Number of times this policy matched telemetry since the last sync. - int64 hits = 2; - - // Number of times this policy was evaluated but did not match. - int64 misses = 3; - - // Error messages encountered while applying this policy. - repeated string errors = 4; -} - -// SyncRequest is sent by clients to request policy updates. -message SyncRequest { - // Client identification and capabilities - ClientMetadata client_metadata = 1; - - // Request full sync (ignore policy_statuses) - bool full_sync = 2; - - // Last sync timestamp (Unix epoch nanoseconds) - fixed64 last_sync_timestamp_unix_nano = 3; - - // The hash of the policy list as last received by the client. - string last_successful_hash = 4; - - // Status of individual policies within this set. - repeated PolicySyncStatus policy_statuses = 5; -} - -enum SyncType { - SYNC_TYPE_UNSPECIFIED = 0; - SYNC_TYPE_FULL = 1; - - // These are future fields for when we may want to support diffing. - // SYNC_TYPE_ADD = 2; - // SYNC_TYPE_REMOVE = 3; - // SYNC_TYPE_UPDATE = 4; -} - -// SyncResponse contains policy updates for the client. -message SyncResponse { - // The policies to sync - repeated Policy policies = 1; - - // Hash of the entire list of policies (for change detection) - string hash = 2; - - // Timestamp of this sync (Unix epoch nanoseconds) - fixed64 sync_timestamp_unix_nano = 3; - - // Suggested interval before next sync (in seconds) - uint32 recommended_sync_interval_seconds = 4; - - // Whether this is a full replacement or incremental update - SyncType sync_type = 5; - - // Error message if sync failed - string error_message = 6; -} - -// ============================================================================= -// Policy Provider Service (optional - for gRPC providers) -// ============================================================================= - -// PolicyService defines the gRPC service for policy providers. -service PolicyService { - // Sync policies with the provider - rpc Sync(SyncRequest) returns (SyncResponse) { - option (google.api.http) = { - post: "/v1/policy/sync" - body: "*" - }; - } -} diff --git a/proto/opentelemetry/proto/profiles/v1development/profiles.proto b/proto/opentelemetry/proto/profiles/v1development/profiles.proto deleted file mode 100644 index a6af56d..0000000 --- a/proto/opentelemetry/proto/profiles/v1development/profiles.proto +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// This file includes work covered by the following copyright and permission notices: -// -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1development; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Development"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1development"; -option java_outer_classname = "ProfilesProto"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1development"; - -// Relationships Diagram -// -// ┌──────────────────┐ LEGEND -// │ ProfilesData │ ─────┐ -// └──────────────────┘ │ ─────▶ embedded -// │ │ -// │ 1-n │ ─────▷ referenced by index -// ▼ ▼ -// ┌──────────────────┐ ┌────────────────────┐ -// │ ResourceProfiles │ │ ProfilesDictionary │ -// └──────────────────┘ └────────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ScopeProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ Profile │ -// └──────────────────┘ -// │ n-1 -// │ 1-n ┌───────────────────────────────────────┐ -// ▼ │ ▽ -// ┌──────────────────┐ 1-n ┌─────────────────┐ ┌──────────┐ -// │ Sample │ ──────▷ │ KeyValueAndUnit │ │ Link │ -// └──────────────────┘ └─────────────────┘ └──────────┘ -// │ △ △ -// │ n-1 │ │ 1-n -// ▽ │ │ -// ┌──────────────────┐ │ │ -// │ Stack │ │ │ -// └──────────────────┘ │ │ -// │ 1-n │ │ -// │ 1-n ┌────────────────┘ │ -// ▽ │ │ -// ┌──────────────────┐ n-1 ┌─────────────┐ -// │ Location │ ──────▷ │ Mapping │ -// └──────────────────┘ └─────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ Line │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▽ -// ┌──────────────────┐ -// │ Function │ -// └──────────────────┘ -// - -// ProfilesDictionary represents the profiles data shared across the -// entire message being sent. The following applies to all fields in this -// message: -// -// - A dictionary is an array of dictionary items. Users of the dictionary -// compactly reference the items using the index within the array. -// -// - A dictionary MUST have a zero value encoded as the first element. This -// allows for _index fields pointing into the dictionary to use a 0 pointer -// value to indicate 'null' / 'not set'. Unless otherwise defined, a 'zero -// value' message value is one with all default field values, so as to -// minimize wire encoded size. -// -// - There SHOULD NOT be dupes in a dictionary. The identity of dictionary -// items is based on their value, recursively as needed. If a particular -// implementation does emit duplicated items, it MUST NOT attempt to give them -// meaning based on the index or order. A profile processor may remove -// duplicate items and this MUST NOT have any observable effects for -// consumers. -// -// - There SHOULD NOT be orphaned (unreferenced) items in a dictionary. A -// profile processor may remove ("garbage-collect") orphaned items and this -// MUST NOT have any observable effects for consumers. -// -message ProfilesDictionary { - // Mappings from address ranges to the image/binary/library mapped - // into that address range referenced by locations via Location.mapping_index. - // - // mapping_table[0] must always be zero value (Mapping{}) and present. - repeated Mapping mapping_table = 1; - - // Locations referenced by samples via Stack.location_indices. - // - // location_table[0] must always be zero value (Location{}) and present. - repeated Location location_table = 2; - - // Functions referenced by locations via Line.function_index. - // - // function_table[0] must always be zero value (Function{}) and present. - repeated Function function_table = 3; - - // Links referenced by samples via Sample.link_index. - // - // link_table[0] must always be zero value (Link{}) and present. - repeated Link link_table = 4; - - // A common table for strings referenced by various messages. - // - // string_table[0] must always be "" and present. - repeated string string_table = 5; - - // A common table for attributes referenced by the Profile, Sample, Mapping - // and Location messages below through attribute_indices field. Each entry is - // a key/value pair with an optional unit. Since this is a dictionary table, - // multiple entries with the same key may be present, unlike direct attribute - // tables like Resource.attributes. The referencing attribute_indices fields, - // though, do maintain the key uniqueness requirement. - // - // It's recommended to use attributes for variables with bounded cardinality, - // such as categorical variables - // (https://en.wikipedia.org/wiki/Categorical_variable). Using an attribute of - // a floating point type (e.g., CPU time) in a sample can quickly make every - // attribute value unique, defeating the purpose of the dictionary and - // impractically increasing the profile size. - // - // Examples of attributes: - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "abc.com/myattribute": true - // "allocation_size": 128 bytes - // - // attribute_table[0] must always be zero value (KeyValueAndUnit{}) and present. - repeated KeyValueAndUnit attribute_table = 6; - - // Stacks referenced by samples via Sample.stack_index. - // - // stack_table[0] must always be zero value (Stack{}) and present. - repeated Stack stack_table = 7; -} - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message ProfilesData { - // An array of ResourceProfiles. - // For data coming from an SDK profiler, this array will typically contain one - // element. Host-level profilers will usually create one ResourceProfile per - // container, as well as one additional ResourceProfile grouping all samples - // from non-containerized processes. - // Other resource groupings are possible as well and clarified via - // Resource.attributes and semantic conventions. - // Tools that visualize profiles should prefer displaying - // resources_profiles[0].scope_profiles[0].profiles[0] by default. - repeated ResourceProfiles resource_profiles = 1; - - // One instance of ProfilesDictionary - ProfilesDictionary dictionary = 2; -} - - -// A collection of ScopeProfiles from a Resource. -message ResourceProfiles { - reserved 1000; - - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeProfiles that originate from a resource. - repeated ScopeProfiles scope_profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Profiles produced by an InstrumentationScope. -message ScopeProfiles { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Profiles that originate from an instrumentation scope. - repeated Profile profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the profile data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "scope" field and all profiles in the - // "profiles" field. - string schema_url = 3; -} - -// Profile is a common stacktrace profile format. -// -// Measurements represented with this format should follow the -// following conventions: -// -// - Consumers should treat unset optional fields as if they had been -// set with their default value. -// -// - When possible, measurements should be stored in "unsampled" form -// that is most useful to humans. There should be enough -// information present to determine the original sampled values. -// -// - The profile is represented as a set of samples, where each sample -// references a stack trace which is a list of locations, each belonging -// to a mapping. -// - There is a N->1 relationship from Stack.location_indices entries to -// locations. For every Stack.location_indices entry there must be a -// unique Location with that index. -// - There is an optional N->1 relationship from locations to -// mappings. For every nonzero Location.mapping_id there must be a -// unique Mapping with that index. - -// Represents a complete profile, including sample types, samples, mappings to -// binaries, stacks, locations, functions, string table, and additional -// metadata. It modifies and annotates pprof Profile with OpenTelemetry -// specific fields. -// -// Note that whilst fields in this message retain the name and field id from pprof in most cases -// for ease of understanding data migration, it is not intended that pprof:Profile and -// OpenTelemetry:Profile encoding be wire compatible. -message Profile { - // The type and unit of all Sample.values in this profile. - // For a cpu or off-cpu profile this might be: - // ["cpu","nanoseconds"] or ["off_cpu","nanoseconds"] - // For a heap profile, this might be: - // ["allocated_objects","count"] or ["allocated_space","bytes"], - ValueType sample_type = 1; - // The set of samples recorded in this profile. - repeated Sample samples = 2; - - // The following fields 3-12 are informational, do not affect - // interpretation of results. - - // Time of collection (UTC) represented as nanoseconds past the epoch. - fixed64 time_unix_nano = 3; - // Duration of the profile, if a duration makes sense. - uint64 duration_nano = 4; - // The kind of events between sampled occurrences. - // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - ValueType period_type = 5; - // The number of events between sampled occurrences. - int64 period = 6; - - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. It may be used for deduplication and signal - // correlation purposes. It is acceptable to treat two profiles with different values - // in this field as not equal, even if they represented the same object at an earlier - // time. - // This field is optional; an ID may be assigned to an ID-less profile in a later step. - bytes profile_id = 7; - - // The number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 8; - - // The original payload format. See also original_payload. Optional, but the - // format and the bytes must be set or unset together. - // - // The allowed values for the format string are defined by the OpenTelemetry - // specification. Some examples are "jfr", "pprof", "linux_perf". - // - // The original payload may be optionally provided when the conversion to the - // OLTP format was done from a different format with some loss of the fidelity - // and the receiver may want to store the original payload to allow future - // lossless export or reinterpretation. Some examples of the original format - // are JFR (Java Flight Recorder), pprof, Linux perf. - // - // Even when the original payload is in a format that is semantically close to - // OTLP, such as pprof, a conversion may still be lossy in some cases (e.g. if - // the pprof file contains custom extensions or conventions). - // - // The original payload can be large in size, so including the original - // payload should be configurable by the profiler or collector options. The - // default behavior should be to not include the original payload. - string original_payload_format = 9; - // The original payload bytes. See also original_payload_format. Optional, but - // format and the bytes must be set or unset together. - bytes original_payload = 10; - - // References to attributes in attribute_table. [optional] - repeated int32 attribute_indices = 11; -} - -// A pointer from a profile Sample to a trace Span. -// Connects a profile sample to a trace span, identified by unique trace and span IDs. -message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; -} - -// ValueType describes the type and units of a value. -message ValueType { - // Index into ProfilesDictionary.string_table. - int32 type_strindex = 1; - - // Index into ProfilesDictionary.string_table. - int32 unit_strindex = 2; -} - -// Each Sample records values encountered in some program context. The program -// context is typically a stack trace, perhaps augmented with auxiliary -// information like the thread-id, some indicator of a higher level request -// being handled etc. -// -// A Sample MUST have have at least one values or timestamps_unix_nano entry. If -// both fields are populated, they MUST contain the same number of elements, and -// the elements at the same index MUST refer to the same event. -// -// Examples of different ways of representing a sample with the total value of 10: -// -// Report of a stacktrace at 10 timestamps (consumers must assume the value is 1 for each point): -// values: [] -// timestamps_unix_nano: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -// -// Report of a stacktrace with an aggregated value without timestamps: -// values: [10] -// timestamps_unix_nano: [] -// -// Report of a stacktrace at 4 timestamps where each point records a specific value: -// values: [2, 2, 3, 3] -// timestamps_unix_nano: [1, 2, 3, 4] -message Sample { - // Reference to stack in ProfilesDictionary.stack_table. - int32 stack_index = 1; - // The type and unit of each value is defined by Profile.sample_type. - repeated int64 values = 2; - // References to attributes in ProfilesDictionary.attribute_table. [optional] - repeated int32 attribute_indices = 3; - - // Reference to link in ProfilesDictionary.link_table. [optional] - // It can be unset / set to 0 if no link exists, as link_table[0] is always a 'null' default value. - int32 link_index = 4; - - // Timestamps associated with Sample represented in nanoseconds. These - // timestamps should fall within the Profile's time range. - repeated fixed64 timestamps_unix_nano = 5; -} - -// Describes the mapping of a binary in memory, including its address range, -// file offset, and metadata like build ID -message Mapping { - // Address at which the binary (or DLL) is loaded into memory. - uint64 memory_start = 1; - // The limit of the address range occupied by this mapping. - uint64 memory_limit = 2; - // Offset in the binary that corresponds to the first mapped address. - uint64 file_offset = 3; - // The object this entry is loaded from. This can be a filename on - // disk for the main binary and shared libraries, or virtual - // abstractions like "[vdso]". - int32 filename_strindex = 4; // Index into ProfilesDictionary.string_table. - // References to attributes in ProfilesDictionary.attribute_table. [optional] - repeated int32 attribute_indices = 5; -} - -// A Stack represents a stack trace as a list of locations. -message Stack { - // References to locations in ProfilesDictionary.location_table. - // The first location is the leaf frame. - repeated int32 location_indices = 1; -} - -// Describes function and line table debug information. -message Location { - // Reference to mapping in ProfilesDictionary.mapping_table. - // It can be unset / set to 0 if the mapping is unknown or not applicable for - // this profile type, as mapping_table[0] is always a 'null' default mapping. - int32 mapping_index = 1; - // The instruction address for this location, if available. It - // should be within [Mapping.memory_start...Mapping.memory_limit] - // for the corresponding mapping. A non-leaf address may be in the - // middle of a call instruction. It is up to display tools to find - // the beginning of the instruction if necessary. - uint64 address = 2; - // Multiple line indicates this location has inlined functions, - // where the last entry represents the caller into which the - // preceding entries were inlined. - // - // E.g., if memcpy() is inlined into printf: - // lines[0].function_name == "memcpy" - // lines[1].function_name == "printf" - repeated Line lines = 3; - // References to attributes in ProfilesDictionary.attribute_table. [optional] - repeated int32 attribute_indices = 4; -} - -// Details a specific line in a source code, linked to a function. -message Line { - // Reference to function in ProfilesDictionary.function_table. - int32 function_index = 1; - // Line number in source code. 0 means unset. - int64 line = 2; - // Column number in source code. 0 means unset. - int64 column = 3; -} - -// Describes a function, including its human-readable name, system name, -// source file, and starting line number in the source. -message Function { - // The function name. Empty string if not available. - int32 name_strindex = 1; - // Function name, as identified by the system. For instance, - // it can be a C++ mangled name. Empty string if not available. - int32 system_name_strindex = 2; - // Source file containing the function. Empty string if not available. - int32 filename_strindex = 3; - // Line number in source file. 0 means unset. - int64 start_line = 4; -} - -// A custom 'dictionary native' style of encoding attributes which is more convenient -// for profiles than opentelemetry.proto.common.v1.KeyValue -// Specifically, uses the string table for keys and allows optional unit information. -message KeyValueAndUnit { - // The index into the string table for the attribute's key. - int32 key_strindex = 1; - // The value of the attribute. - opentelemetry.proto.common.v1.AnyValue value = 2; - // The index into the string table for the attribute's unit. - // zero indicates implicit (by semconv) or non-defined unit. - int32 unit_strindex = 3; -} diff --git a/proto/opentelemetry/proto/resource/v1/resource.proto b/proto/opentelemetry/proto/resource/v1/resource.proto deleted file mode 100644 index 42c5913..0000000 --- a/proto/opentelemetry/proto/resource/v1/resource.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.resource.v1; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.resource.v1"; -option java_outer_classname = "ResourceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; - -// Resource information. -message Resource { - // Set of attributes that describe the resource. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // The number of dropped attributes. If the value is 0, then - // no attributes were dropped. - uint32 dropped_attributes_count = 2; - - // Set of entities that participate in this Resource. - // - // Note: keys in the references MUST exist in attributes of this message. - // - // Status: [Development] - repeated opentelemetry.proto.common.v1.EntityRef entity_refs = 3; -} diff --git a/proto/opentelemetry/proto/trace/v1/trace.proto b/proto/opentelemetry/proto/trace/v1/trace.proto deleted file mode 100644 index 8a992c1..0000000 --- a/proto/opentelemetry/proto/trace/v1/trace.proto +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.trace.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.trace.v1"; -option java_outer_classname = "TraceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; - -// TracesData represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message TracesData { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceSpans resource_spans = 1; -} - -// A collection of ScopeSpans from a Resource. -message ResourceSpans { - reserved 1000; - - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeSpans that originate from a resource. - repeated ScopeSpans scope_spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Spans produced by an InstrumentationScope. -message ScopeSpans { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Spans that originate from an instrumentation scope. - repeated Span spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "scope" field and all spans and span - // events in the "spans" field. - string schema_url = 3; -} - -// A Span represents a single operation performed by a single component of the system. -// -// The next available field id is 17. -message Span { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes trace_id = 1; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes span_id = 2; - - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - string trace_state = 3; - - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - bytes parent_span_id = 4; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - fixed32 flags = 16; - - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - string name = 5; - - // SpanKind is the type of span. Can be used to specify additional relationships between spans - // in addition to a parent/child relationship. - enum SpanKind { - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - SPAN_KIND_UNSPECIFIED = 0; - - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SPAN_KIND_INTERNAL = 1; - - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SPAN_KIND_SERVER = 2; - - // Indicates that the span describes a request to some remote service. - SPAN_KIND_CLIENT = 3; - - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - SPAN_KIND_PRODUCER = 4; - - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - SPAN_KIND_CONSUMER = 5; - } - - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - SpanKind kind = 6; - - // The start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 7; - - // The end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 8; - - // A collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // The number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 10; - - // Event is a time-stamped annotation of the span, consisting of user-supplied - // text description and key-value pairs. - message Event { - // The time the event occurred. - fixed64 time_unix_nano = 1; - - // The name of the event. - // This field is semantically required to be set to non-empty string. - string name = 2; - - // A collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; - - // The number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 4; - } - - // A collection of Event items. - repeated Event events = 11; - - // The number of dropped events. If the value is 0, then no - // events were dropped. - uint32 dropped_events_count = 12; - - // A pointer from the current span to another span in the same trace or in a - // different trace. For example, this can be used in batching operations, - // where a single batch handler processes multiple requests from different - // traces or when the handler receives a request from a different project. - message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; - - // The trace_state associated with the link. - string trace_state = 3; - - // A collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - // The behavior of software that receives duplicated keys can be unpredictable. - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // The number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - fixed32 flags = 6; - } - - // A collection of Links, which are references from this span to a span - // in the same or different trace. - repeated Link links = 13; - - // The number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - uint32 dropped_links_count = 14; - - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status status = 15; -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -message Status { - reserved 1; - - // A developer-facing human readable error message. - string message = 2; - - // For the semantics of status codes see - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status - enum StatusCode { - // The default status. - STATUS_CODE_UNSET = 0; - // The Span has been validated by an Application developer or Operator to - // have completed successfully. - STATUS_CODE_OK = 1; - // The Span contains an error. - STATUS_CODE_ERROR = 2; - }; - - // The status code. - StatusCode code = 3; -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -enum SpanFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - SPAN_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; - SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; - - // Bits 10-31 are reserved for future use. -} diff --git a/proto/tero/policy/v1/log.proto b/proto/tero/policy/v1/log.proto deleted file mode 100644 index f959aae..0000000 --- a/proto/tero/policy/v1/log.proto +++ /dev/null @@ -1,251 +0,0 @@ -syntax = "proto3"; - -package tero.policy.v1; - -import "tero/policy/v1/shared.proto"; - -option go_package = "github.com/usetero/policy/gen/go/tero/policy/v1"; - -// ============================================================================= -// Log Target -// ============================================================================= - -// LogTarget defines matching and actions for logs. -message LogTarget { - // Matchers to identify which logs this policy applies to (AND logic) - repeated LogMatcher match = 1; - - // The keep field controls whether matching telemetry survives. It unifies - // dropping, sampling, and rate limiting into a single concept: what percentage - // or amount of matching telemetry continues to the next stage? - // - // Valid values: - // "all" - Keep everything (default, can be omitted) - // "none" - Drop everything - // "N%" - Keep N percent (0-100), e.g. "50%" - // "N/s" - Keep at most N per second, e.g. "100/s" - // "N/m" - Keep at most N per minute, e.g. "1000/m" - string keep = 2; - - // Transform operations to apply - LogTransform transform = 3; - - // Field to use as the sampling key for consistent sampling. - // When set, all logs with the same value for this field get the same - // keep/drop decision. Use for lifecycle events (request_id, trace_id, job_id) - // to avoid sampling individual log lines independently. - // - // Only applies when keep is a sampling value (N%, N/s, N/m). - // Example: sample_key = log_attribute["request_id"] with keep = "10%" means - // 10% of requests are kept, with all logs from each kept request preserved. - LogSampleKey sample_key = 4; -} - -// LogSampleKey specifies which field to use as the sampling key for consistent -// sampling decisions. -message LogSampleKey { - // FIELD SELECTION (subset of LogMatcher fields appropriate for sampling keys) - // The field to use as the sampling key. Exactly one must be set. - oneof field { - // Simple fields (trace_id, span_id, etc.) - LogField log_field = 1; - - // Log record attribute by key or path - AttributePath log_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - } -} - -// ============================================================================= -// Log Field Selection -// ============================================================================= - -// LogField identifies simple log fields (non-keyed). -enum LogField { - LOG_FIELD_UNSPECIFIED = 0; - - // Log record fields - LOG_FIELD_BODY = 1; - LOG_FIELD_SEVERITY_TEXT = 2; - LOG_FIELD_TRACE_ID = 3; - LOG_FIELD_SPAN_ID = 4; - LOG_FIELD_EVENT_NAME = 5; - - // Schema URLs - LOG_FIELD_RESOURCE_SCHEMA_URL = 10; - LOG_FIELD_SCOPE_SCHEMA_URL = 11; -} - -// ============================================================================= -// Log Matching -// ============================================================================= - -// LogMatcher provides a way to match against log telemetry data using known fields. -// -// IMPORTANT CONSTRAINTS: -// - Multiple matchers are ANDed together: all matchers must match for the -// overall match to succeed. -// - The list of matchers should uniquely identify a specific pattern of telemetry -// for that policy. Matchers should NOT be used as a catch-all; they should be -// specific enough to target the intended telemetry precisely. -// -// All regex fields use RE2 syntax for consistency across implementations. -message LogMatcher { - // FIELD SELECTION (keep in sync with LogRedact, LogRename, LogAdd, LogRemove) - // The field to match against. Exactly one must be set. - oneof field { - // Simple fields (body, severity_text, trace_id, span_id, etc.) - LogField log_field = 1; - - // Log record attribute by key or path - AttributePath log_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - } - - // Match type. Exactly one must be set. - oneof match { - // Exact string match - string exact = 10; - - // Regular expression match - string regex = 11; - - // Field existence check - bool exists = 12; - - // Literal prefix match - string starts_with = 13; - - // Literal suffix match - string ends_with = 14; - - // Literal substring match - string contains = 15; - } - - // If true, inverts the match result - bool negate = 20; - - // If true, applies case-insensitive matching to all match types - bool case_insensitive = 21; -} - -// ============================================================================= -// Log Transform -// ============================================================================= - -// LogTransform defines modifications to logs. -message LogTransform { - // Fields to remove - repeated LogRemove remove = 1; - - // Fields to redact - repeated LogRedact redact = 2; - - // Fields to rename - repeated LogRename rename = 3; - - // Fields to add - repeated LogAdd add = 4; -} - -// LogRemove removes a field. -message LogRemove { - // FIELD SELECTION (keep in sync with LogMatcher, LogRedact, LogRename, LogAdd) - // The field to remove. Exactly one must be set. - oneof field { - // Simple fields (body, severity_text, trace_id, span_id, etc.) - LogField log_field = 1; - - // Log record attribute by key or path - AttributePath log_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - } -} - -// LogRedact masks a field value. -message LogRedact { - // FIELD SELECTION (keep in sync with LogMatcher, LogRemove, LogRename, LogAdd) - // The field to redact. Exactly one must be set. - oneof field { - // Simple fields (body, severity_text, trace_id, span_id, etc.) - LogField log_field = 1; - - // Log record attribute by key or path - AttributePath log_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - } - - // Replacement value (e.g., "[REDACTED]") - string replacement = 10; -} - -// LogRename changes a field name. -message LogRename { - // FIELD SELECTION (keep in sync with LogMatcher, LogRemove, LogRedact, LogAdd) - // The field to rename. Exactly one must be set. - oneof from { - // Simple fields (body, severity_text, trace_id, span_id, etc.) - LogField from_log_field = 1; - - // Log record attribute by key or path - AttributePath from_log_attribute = 2; - - // Resource attribute by key or path - AttributePath from_resource_attribute = 3; - - // Scope attribute by key or path - AttributePath from_scope_attribute = 4; - } - - // The new field name - string to = 10; - - // If true, overwrites the target field if it already exists - bool upsert = 11; -} - -// LogAdd inserts a field. -message LogAdd { - // FIELD SELECTION (keep in sync with LogMatcher, LogRemove, LogRedact, LogRename) - // The field to add. Exactly one must be set. - oneof field { - // Simple fields (body, severity_text, trace_id, span_id, etc.) - LogField log_field = 1; - - // Log record attribute by key or path - AttributePath log_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - } - - // The value to set - string value = 10; - - // If true, overwrites the field if it already exists - bool upsert = 11; -} diff --git a/proto/tero/policy/v1/metric.proto b/proto/tero/policy/v1/metric.proto deleted file mode 100644 index 2b7af25..0000000 --- a/proto/tero/policy/v1/metric.proto +++ /dev/null @@ -1,126 +0,0 @@ -syntax = "proto3"; - -package tero.policy.v1; - -import "tero/policy/v1/shared.proto"; - -option go_package = "github.com/usetero/policy/gen/go/tero/policy/v1"; - -// ============================================================================= -// Metric Target -// ============================================================================= - -// MetricTarget defines matching and actions for metrics. -message MetricTarget { - // Matchers to identify which metrics this policy applies to (AND logic) - repeated MetricMatcher match = 1; - - // Whether to keep matching metrics (true) or drop them (false) - bool keep = 2; -} - -// ============================================================================= -// Metric Field Selection -// ============================================================================= - -// MetricField identifies simple metric fields (non-keyed). -enum MetricField { - METRIC_FIELD_UNSPECIFIED = 0; - - // Metric descriptor fields - METRIC_FIELD_NAME = 1; - METRIC_FIELD_DESCRIPTION = 2; - METRIC_FIELD_UNIT = 3; - - // Schema URLs - METRIC_FIELD_RESOURCE_SCHEMA_URL = 10; - METRIC_FIELD_SCOPE_SCHEMA_URL = 11; - - // Scope fields (InstrumentationScope) - METRIC_FIELD_SCOPE_NAME = 12; - METRIC_FIELD_SCOPE_VERSION = 13; -} - -// MetricType identifies the type of metric for matching. -enum MetricType { - METRIC_TYPE_UNSPECIFIED = 0; - METRIC_TYPE_GAUGE = 1; - METRIC_TYPE_SUM = 2; - METRIC_TYPE_HISTOGRAM = 3; - METRIC_TYPE_EXPONENTIAL_HISTOGRAM = 4; - METRIC_TYPE_SUMMARY = 5; -} - -// AggregationTemporality defines how a metric aggregator reports aggregated values. -// Mirrors opentelemetry.proto.metrics.v1.AggregationTemporality. -enum AggregationTemporality { - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - AGGREGATION_TEMPORALITY_DELTA = 1; - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// ============================================================================= -// Metric Matching -// ============================================================================= - -// MetricMatcher provides a way to match against metric telemetry data using known fields. -// -// IMPORTANT CONSTRAINTS: -// - Multiple matchers are ANDed together: all matchers must match for the -// overall match to succeed. -// - The list of matchers should uniquely identify a specific pattern of telemetry -// for that policy. Matchers should NOT be used as a catch-all; they should be -// specific enough to target the intended telemetry precisely. -// -// All regex fields use RE2 syntax for consistency across implementations. -message MetricMatcher { - // FIELD SELECTION - // The field to match against. Exactly one must be set. - oneof field { - // Simple fields (name, description, unit, etc.) - MetricField metric_field = 1; - - // Data point attribute by key or path - AttributePath datapoint_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - - // Metric type matcher - MetricType metric_type = 5; - - // Aggregation temporality matcher (applies to Sum, Histogram, ExponentialHistogram) - AggregationTemporality aggregation_temporality = 6; - } - - // Match type. Exactly one must be set. - // Note: For metric_type field, only exists is valid (type equality is implicit). - oneof match { - // Exact string match - string exact = 10; - - // Regular expression match - string regex = 11; - - // Field existence check - bool exists = 12; - - // Literal prefix match - string starts_with = 13; - - // Literal suffix match - string ends_with = 14; - - // Literal substring match - string contains = 15; - } - - // If true, inverts the match result - bool negate = 20; - - // If true, applies case-insensitive matching to all match types - bool case_insensitive = 21; -} diff --git a/proto/tero/policy/v1/policy.proto b/proto/tero/policy/v1/policy.proto deleted file mode 100644 index 02d8d8a..0000000 --- a/proto/tero/policy/v1/policy.proto +++ /dev/null @@ -1,178 +0,0 @@ -syntax = "proto3"; - -package tero.policy.v1; - -import "google/api/annotations.proto"; -import "opentelemetry/proto/common/v1/common.proto"; -import "tero/policy/v1/log.proto"; -import "tero/policy/v1/metric.proto"; -import "tero/policy/v1/trace.proto"; - -option go_package = "github.com/usetero/policy/gen/go/tero/policy/v1"; - -// ============================================================================= -// Policy Definition -// ============================================================================= - -// Policy represents a complete telemetry policy definition. -// Policies are designed to be: -// - Implementation Agnostic: Works in SDK, Collector, or any component -// - Standalone: No need to understand pipeline configuration -// - Dynamic: Can be updated post-instantiation -// - Idempotent: Safe to apply to multiple components -// - Fail-Open: Does not interfere with telemetry on failure -message Policy { - // Unique identifier for this policy - string id = 1; - - // Human-readable name - string name = 2; - - // Optional description - string description = 3; - - // Whether this policy is enabled - bool enabled = 4; - - // Timestamp when this policy was created (Unix epoch nanoseconds) - fixed64 created_at_unix_nano = 5; - - // Timestamp when this policy was last modified (Unix epoch nanoseconds) - fixed64 modified_at_unix_nano = 6; - - // Labels for metadata and routing - repeated opentelemetry.proto.common.v1.KeyValue labels = 7; - - // Target configuration. Exactly one must be set. - oneof target { - LogTarget log = 10; - MetricTarget metric = 11; - TraceTarget trace = 12; - } -} - -// ============================================================================= -// Policy Stages -// ============================================================================= - -// PolicyStage identifies the execution stage for a policy. -enum PolicyStage { - POLICY_STAGE_UNSPECIFIED = 0; - POLICY_STAGE_LOG_FILTER = 1; // Log filtering stage (keep/drop decisions) - POLICY_STAGE_LOG_TRANSFORM = 2; // Log transformation stage (field modifications) - POLICY_STAGE_METRIC_FILTER = 3; // Metric filtering stage (keep/drop decisions) - POLICY_STAGE_TRACE_SAMPLING = 4; // Trace probabilistic sampling stage -} - -// ============================================================================= -// Sync Protocol (for HTTP/gRPC Policy Providers) -// ============================================================================= - -// ClientMetadata contains information about the client requesting policies. -message ClientMetadata { - // Policy stages this client supports - repeated PolicyStage supported_policy_stages = 1; - - // Additional metadata labels - repeated opentelemetry.proto.common.v1.KeyValue labels = 2; - - // Resource attributes describing this client's identity - // REQUIRED: - // * service.instance.id - // * service.name - // * service.namespace - // * service.version - repeated opentelemetry.proto.common.v1.KeyValue resource_attributes = 3; -} - -// TransformStageStatus reports hits and misses for a single transform stage. -message TransformStageStatus { - // Number of times this stage was applied. - int64 hits = 1; - - // Number of times this stage was evaluated but the field selected nothing. - int64 misses = 2; -} - -// PolicySyncStatus reports the status of an individual policy during sync. -// Used to communicate policy execution metrics and errors back to the provider. -message PolicySyncStatus { - // The policy ID this status refers to. - string id = 1; - - // Number of times this policy matched telemetry since the last sync. - int64 match_hits = 2; - - // Number of times this policy was evaluated but did not match. - int64 match_misses = 3; - - // Error messages encountered while applying this policy. - repeated string errors = 4; - - // Transform stage statistics - TransformStageStatus remove = 10; - TransformStageStatus redact = 11; - TransformStageStatus rename = 12; - TransformStageStatus add = 13; -} - -// SyncRequest is sent by clients to request policy updates. -message SyncRequest { - // Client identification and capabilities - ClientMetadata client_metadata = 1; - - // Request full sync (ignore policy_statuses) - bool full_sync = 2; - - // Last sync timestamp (Unix epoch nanoseconds) - fixed64 last_sync_timestamp_unix_nano = 3; - - // The hash of the policy list as last received by the client. - string last_successful_hash = 4; - - // Status of individual policies within this set. - repeated PolicySyncStatus policy_statuses = 5; -} - -enum SyncType { - SYNC_TYPE_UNSPECIFIED = 0; - SYNC_TYPE_FULL = 1; - - // In the future we may support diffing with verbs here. -} - -// SyncResponse contains policy updates for the client. -message SyncResponse { - // The policies to sync - repeated Policy policies = 1; - - // Hash of the entire list of policies (for change detection) - string hash = 2; - - // Timestamp of this sync (Unix epoch nanoseconds) - fixed64 sync_timestamp_unix_nano = 3; - - // Suggested interval before next sync (in seconds) - uint32 recommended_sync_interval_seconds = 4; - - // Whether this is a full replacement or incremental update - SyncType sync_type = 5; - - // Error message if sync failed - string error_message = 6; -} - -// ============================================================================= -// Policy Provider Service (optional - for gRPC providers) -// ============================================================================= - -// PolicyService defines the gRPC service for policy providers. -service PolicyService { - // Sync policies with the provider - rpc Sync(SyncRequest) returns (SyncResponse) { - option (google.api.http) = { - post: "/v1/policy/sync" - body: "*" - }; - } -} diff --git a/proto/tero/policy/v1/shared.proto b/proto/tero/policy/v1/shared.proto deleted file mode 100644 index 95ec59f..0000000 --- a/proto/tero/policy/v1/shared.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package tero.policy.v1; - -option go_package = "github.com/usetero/policy/gen/go/tero/policy/v1"; - -// ============================================================================= -// Shared Types -// ============================================================================= - -// AttributePath specifies how to access an attribute value. -// -// The path is represented as an array of string segments. Each segment represents -// a key to traverse into nested maps. -// -// Example usage: -// -// For an attribute structure like: -// Attributes: map[string]any{ -// "http": map[string]any{ -// "method": "POST", -// "status_code": 200, -// }, -// "user_id": "u123", -// } -// -// - To access "user_id": ["user_id"] -// - To access http.method: ["http", "method"] -// -// YAML/JSON Unmarshaling: -// -// Implementations MUST accept both the canonical proto form and shorthand forms -// for ergonomic policy authoring: -// -// Canonical (proto-native): -// log_attribute: -// path: ["http", "method"] -// -// Shorthand array (MUST be supported): -// log_attribute: ["http", "method"] -// -// Shorthand string (MUST be supported for single-segment paths): -// log_attribute: "user_id" # equivalent to ["user_id"] -// -// When marshaling, implementations SHOULD use the shorthand array form for -// cleaner output. -message AttributePath { - // Path segments for attribute traversal. - // A single-element array accesses a flat attribute. - // Multiple elements traverse nested maps. - repeated string path = 1; -} diff --git a/proto/tero/policy/v1/trace.proto b/proto/tero/policy/v1/trace.proto deleted file mode 100644 index ff98fe1..0000000 --- a/proto/tero/policy/v1/trace.proto +++ /dev/null @@ -1,207 +0,0 @@ -syntax = "proto3"; - -package tero.policy.v1; - -import "tero/policy/v1/shared.proto"; - -option go_package = "github.com/usetero/policy/gen/go/tero/policy/v1"; - -// ============================================================================= -// Trace Target -// ============================================================================= - -// TraceTarget defines matching and sampling actions for traces/spans. -message TraceTarget { - // Matchers to identify which spans this policy applies to (AND logic) - repeated TraceMatcher match = 1; - - // The keep field controls whether matching spans are sampled. - // For traces, this uses probabilistic sampling with tracestate support. - TraceSamplingConfig keep = 2; -} - -// ============================================================================= -// Trace Field Selection -// ============================================================================= - -// TraceField identifies simple span fields (non-keyed). -enum TraceField { - TRACE_FIELD_UNSPECIFIED = 0; - - // Span fields - TRACE_FIELD_NAME = 1; - TRACE_FIELD_TRACE_ID = 2; - TRACE_FIELD_SPAN_ID = 3; - TRACE_FIELD_PARENT_SPAN_ID = 4; - TRACE_FIELD_TRACE_STATE = 5; - - // Schema URLs - TRACE_FIELD_RESOURCE_SCHEMA_URL = 10; - TRACE_FIELD_SCOPE_SCHEMA_URL = 11; - - // Scope fields (InstrumentationScope) - TRACE_FIELD_SCOPE_NAME = 12; - TRACE_FIELD_SCOPE_VERSION = 13; -} - -// SpanKind identifies the type of span for matching. -// Mirrors opentelemetry.proto.trace.v1.Span.SpanKind. -enum SpanKind { - SPAN_KIND_UNSPECIFIED = 0; - SPAN_KIND_INTERNAL = 1; - SPAN_KIND_SERVER = 2; - SPAN_KIND_CLIENT = 3; - SPAN_KIND_PRODUCER = 4; - SPAN_KIND_CONSUMER = 5; -} - -// SpanStatusCode identifies the span status for matching. -// Mirrors opentelemetry.proto.trace.v1.Status.StatusCode. -enum SpanStatusCode { - SPAN_STATUS_CODE_UNSPECIFIED = 0; - SPAN_STATUS_CODE_OK = 1; - SPAN_STATUS_CODE_ERROR = 2; -} - -// ============================================================================= -// Trace Matching -// ============================================================================= - -// TraceMatcher provides a way to match against trace/span telemetry data using known fields. -// -// IMPORTANT CONSTRAINTS: -// - Multiple matchers are ANDed together: all matchers must match for the -// overall match to succeed. -// - The list of matchers should uniquely identify a specific pattern of telemetry -// for that policy. Matchers should NOT be used as a catch-all; they should be -// specific enough to target the intended telemetry precisely. -// -// All regex fields use RE2 syntax for consistency across implementations. -message TraceMatcher { - // FIELD SELECTION - // The field to match against. Exactly one must be set. - oneof field { - // Simple fields (name, trace_id, span_id, etc.) - TraceField trace_field = 1; - - // Span attribute by key or path - AttributePath span_attribute = 2; - - // Resource attribute by key or path - AttributePath resource_attribute = 3; - - // Scope attribute by key or path - AttributePath scope_attribute = 4; - - // Span kind matcher - SpanKind span_kind = 5; - - // Span status code matcher - SpanStatusCode span_status = 6; - - // Event name matcher (matches if span contains an event with this name) - string event_name = 7; - - // Event attribute matcher (matches if span contains an event with this attribute) - AttributePath event_attribute = 8; - - // Link trace ID matcher (matches if span has a link to this trace) - string link_trace_id = 9; - } - - // Match type. Exactly one must be set. - // Note: For span_kind and span_status fields, only exists is valid (equality is implicit). - oneof match { - // Exact string match - string exact = 10; - - // Regular expression match - string regex = 11; - - // Field existence check - bool exists = 12; - - // Literal prefix match - string starts_with = 13; - - // Literal suffix match - string ends_with = 14; - - // Literal substring match - string contains = 15; - } - - // If true, inverts the match result - bool negate = 20; - - // If true, applies case-insensitive matching to all match types - bool case_insensitive = 21; -} - -// ============================================================================= -// Probabilistic Sampling Configuration -// ============================================================================= - -// TraceSamplingConfig configures probabilistic sampling for traces. -// -// This configuration follows the OpenTelemetry probability sampling specification: -// https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/ -// -// Implementations MUST follow tracestate standards to allow multi-stage sampling: -// https://opentelemetry.io/docs/specs/otel/trace/tracestate-handling/#sampling-threshold-value-th -// -// The sampling decision is based on comparing a 56-bit randomness value (R) against -// a rejection threshold (T). If R >= T, the span is kept; otherwise it is dropped. -// The threshold is derived from the configured percentage: -// T = (1 - percentage/100) * 2^56 -message TraceSamplingConfig { - // Percentage at which items are sampled (0-100). - // >= 100 samples all items, 0 rejects all items. - // This is a 32-bit floating point value for precision. - float percentage = 1; - - // Sampling mode determines how the sampling decision is made. - // Optional. Default is SAMPLING_MODE_HASH_SEED. - optional SamplingMode mode = 2; - - // Determines the number of hexadecimal digits used to encode the sampling threshold - // in the tracestate. Permitted values are 1-14. - // Optional. Default is 4. - // Higher precision allows finer-grained sampling probabilities. - // The threshold is encoded with trailing zeros removed. - optional uint32 sampling_precision = 3; - - // An integer used to compute the hash algorithm. - // All collectors for a given tier (e.g., behind the same load balancer) - // should have the same hash_seed to ensure consistent sampling decisions. - // Optional. Default is 0. - optional uint32 hash_seed = 4; - - // Determines behavior when sampling errors occur. - // When true (default), items with errors are rejected (fail closed). - // When false, items with errors are accepted (fail open). - // Optional. Default is true. - optional bool fail_closed = 5; -} - -// SamplingMode determines how the sampling decision is made. -enum SamplingMode { - SAMPLING_MODE_UNSPECIFIED = 0; - - // hash_seed mode: Uses a hash of the trace ID combined with the hash_seed - // to make deterministic sampling decisions. This is the default mode. - // Suitable when you want consistent sampling across multiple collectors. - SAMPLING_MODE_HASH_SEED = 1; - - // proportional mode: Respects existing sampling probability in tracestate. - // Adjusts the effective probability to achieve the target percentage - // relative to the incoming probability. For example, if incoming spans - // are already sampled at 50% and target is 10%, this mode will sample - // 20% of incoming spans to achieve 10% overall. - SAMPLING_MODE_PROPORTIONAL = 2; - - // equalizing mode: Attempts to achieve the target percentage by preferentially - // sampling spans that have been sampled at higher rates. This helps balance - // the sampling across different sources while respecting existing thresholds. - SAMPLING_MODE_EQUALIZING = 3; -} diff --git a/src/config/types.zig b/src/config/types.zig index 8e0ff63..64a4ba0 100644 --- a/src/config/types.zig +++ b/src/config/types.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const policy = @import("../policy/root.zig"); +const policy = @import("policy_zig"); pub const ProviderConfig = policy.ProviderConfig; pub const ServiceMetadata = policy.ServiceMetadata; diff --git a/src/datadog_main.zig b/src/datadog_main.zig index 95a9c6e..5945d7e 100644 --- a/src/datadog_main.zig +++ b/src/datadog_main.zig @@ -25,7 +25,7 @@ const policy = edge.policy; const ProxyConfig = config_types.ProxyConfig; -const o11y = @import("observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const StdLogAdapter = o11y.StdLogAdapter; const Level = o11y.Level; diff --git a/src/hyperscan/hyperscan.zig b/src/hyperscan/hyperscan.zig deleted file mode 100644 index 1d0c4b7..0000000 --- a/src/hyperscan/hyperscan.zig +++ /dev/null @@ -1,1203 +0,0 @@ -//! Zig bindings for Vectorscan/Hyperscan - High-performance regex matching library -//! -//! This module provides idiomatic Zig wrappers around the Vectorscan C API, -//! offering RAII-style resource management and type-safe interfaces. -//! -//! ## Quick Start -//! ```zig -//! const hs = @import("hyperscan"); -//! -//! // Single pattern matching -//! var db = try hs.Database.compile("hello\\s+world", .{}); -//! defer db.deinit(); -//! -//! var scratch = try hs.Scratch.init(&db); -//! defer scratch.deinit(); -//! -//! var scanner = db.scanner(&scratch); -//! for (scanner.scan("say hello world!")) |match| { -//! std.debug.print("Match at {}-{}\n", .{match.start, match.end}); -//! } -//! ``` -//! -//! ## Multi-pattern matching -//! ```zig -//! var db = try hs.Database.compileMulti(allocator, &.{ -//! .{ .expression = "error", .id = 1 }, -//! .{ .expression = "warn", .id = 2 }, -//! .{ .expression = "info", .id = 3 }, -//! }, .{}); -//! ``` - -const std = @import("std"); - -// ============================================================================= -// C Bindings -// ============================================================================= - -const c = struct { - // Opaque types - const hs_database_t = opaque {}; - const hs_scratch_t = opaque {}; - const hs_stream_t = opaque {}; - - const hs_compile_error_t = extern struct { - message: [*:0]const u8, - expression: c_int, - }; - - // Error codes - const HS_SUCCESS: c_int = 0; - const HS_INVALID: c_int = -1; - const HS_NOMEM: c_int = -2; - const HS_SCAN_TERMINATED: c_int = -3; - const HS_COMPILER_ERROR: c_int = -4; - const HS_DB_VERSION_ERROR: c_int = -5; - const HS_DB_PLATFORM_ERROR: c_int = -6; - const HS_DB_MODE_ERROR: c_int = -7; - const HS_BAD_ALIGN: c_int = -8; - const HS_BAD_ALLOC: c_int = -9; - const HS_SCRATCH_IN_USE: c_int = -10; - const HS_ARCH_ERROR: c_int = -11; - const HS_INSUFFICIENT_SPACE: c_int = -12; - const HS_UNKNOWN_ERROR: c_int = -13; - - // Compile flags - const HS_FLAG_CASELESS: c_uint = 1; - const HS_FLAG_DOTALL: c_uint = 2; - const HS_FLAG_MULTILINE: c_uint = 4; - const HS_FLAG_SINGLEMATCH: c_uint = 8; - const HS_FLAG_ALLOWEMPTY: c_uint = 16; - const HS_FLAG_UTF8: c_uint = 32; - const HS_FLAG_UCP: c_uint = 64; - const HS_FLAG_PREFILTER: c_uint = 128; - const HS_FLAG_SOM_LEFTMOST: c_uint = 256; - - // Mode flags - const HS_MODE_BLOCK: c_uint = 1; - const HS_MODE_STREAM: c_uint = 2; - const HS_MODE_VECTORED: c_uint = 4; - const HS_MODE_SOM_HORIZON_LARGE: c_uint = 1 << 24; - const HS_MODE_SOM_HORIZON_MEDIUM: c_uint = 1 << 25; - const HS_MODE_SOM_HORIZON_SMALL: c_uint = 1 << 26; - - // Callback type - const match_event_handler = *const fn ( - id: c_uint, - from: c_ulonglong, - to: c_ulonglong, - flags: c_uint, - context: ?*anyopaque, - ) callconv(.c) c_int; - - // Compilation functions - extern fn hs_compile( - expression: [*:0]const u8, - flags: c_uint, - mode: c_uint, - platform: ?*anyopaque, - db: *?*hs_database_t, - compile_error: *?*hs_compile_error_t, - ) c_int; - - extern fn hs_compile_multi( - expressions: [*]const [*:0]const u8, - flags: ?[*]const c_uint, - ids: ?[*]const c_uint, - elements: c_uint, - mode: c_uint, - platform: ?*anyopaque, - db: *?*hs_database_t, - compile_error: *?*hs_compile_error_t, - ) c_int; - - extern fn hs_compile_lit( - expression: [*]const u8, - flags: c_uint, - len: usize, - mode: c_uint, - platform: ?*anyopaque, - db: *?*hs_database_t, - compile_error: *?*hs_compile_error_t, - ) c_int; - - extern fn hs_compile_lit_multi( - expressions: [*]const [*]const u8, - flags: ?[*]const c_uint, - ids: ?[*]const c_uint, - lens: [*]const usize, - elements: c_uint, - mode: c_uint, - platform: ?*anyopaque, - db: *?*hs_database_t, - compile_error: *?*hs_compile_error_t, - ) c_int; - - extern fn hs_free_compile_error(compile_error: ?*hs_compile_error_t) c_int; - - // Database functions - extern fn hs_free_database(db: ?*hs_database_t) c_int; - extern fn hs_database_size(db: ?*const hs_database_t, size: *usize) c_int; - extern fn hs_database_info(db: ?*const hs_database_t, info: *?[*:0]u8) c_int; - extern fn hs_serialize_database(db: ?*const hs_database_t, bytes: *?[*]u8, length: *usize) c_int; - extern fn hs_deserialize_database(bytes: [*]const u8, length: usize, db: *?*hs_database_t) c_int; - - // Scratch functions - extern fn hs_alloc_scratch(db: ?*const hs_database_t, scratch: *?*hs_scratch_t) c_int; - extern fn hs_free_scratch(scratch: ?*hs_scratch_t) c_int; - extern fn hs_clone_scratch(src: ?*const hs_scratch_t, dest: *?*hs_scratch_t) c_int; - extern fn hs_scratch_size(scratch: ?*const hs_scratch_t, size: *usize) c_int; - - // Block scanning - extern fn hs_scan( - db: ?*const hs_database_t, - data: [*]const u8, - length: c_uint, - flags: c_uint, - scratch: ?*hs_scratch_t, - onEvent: ?match_event_handler, - context: ?*anyopaque, - ) c_int; - - // Vectored scanning - extern fn hs_scan_vector( - db: ?*const hs_database_t, - data: [*]const [*]const u8, - length: [*]const c_uint, - count: c_uint, - flags: c_uint, - scratch: ?*hs_scratch_t, - onEvent: ?match_event_handler, - context: ?*anyopaque, - ) c_int; - - // Stream functions - extern fn hs_open_stream(db: ?*const hs_database_t, flags: c_uint, stream: *?*hs_stream_t) c_int; - extern fn hs_scan_stream( - stream: ?*hs_stream_t, - data: [*]const u8, - length: c_uint, - flags: c_uint, - scratch: ?*hs_scratch_t, - onEvent: ?match_event_handler, - context: ?*anyopaque, - ) c_int; - extern fn hs_close_stream( - stream: ?*hs_stream_t, - scratch: ?*hs_scratch_t, - onEvent: ?match_event_handler, - context: ?*anyopaque, - ) c_int; - extern fn hs_reset_stream( - stream: ?*hs_stream_t, - flags: c_uint, - scratch: ?*hs_scratch_t, - onEvent: ?match_event_handler, - context: ?*anyopaque, - ) c_int; - - // Utility - extern fn hs_version() [*:0]const u8; - extern fn hs_valid_platform() c_int; -}; - -// ============================================================================= -// Error Handling -// ============================================================================= - -/// Errors that can occur during Hyperscan operations -pub const Error = error{ - /// Invalid parameter passed to function - Invalid, - /// Memory allocation failed - OutOfMemory, - /// Pattern compilation failed - CompileError, - /// Database version mismatch - DatabaseVersionError, - /// Database platform mismatch - DatabasePlatformError, - /// Database mode mismatch (e.g., using streaming API with block database) - DatabaseModeError, - /// Memory alignment error - BadAlignment, - /// Allocator returned misaligned memory - BadAlloc, - /// Scratch space already in use - ScratchInUse, - /// Unsupported CPU architecture - ArchitectureError, - /// Buffer too small - InsufficientSpace, - /// Unknown internal error - UnknownError, -}; - -/// Compile-specific error with detailed message -pub const CompileErrorDetails = struct { - message: []const u8, - /// Expression index that caused the error (-1 if not expression-specific) - expression_index: i32, -}; - -fn mapError(code: c_int) Error { - return switch (code) { - c.HS_INVALID => error.Invalid, - c.HS_NOMEM => error.OutOfMemory, - c.HS_COMPILER_ERROR => error.CompileError, - c.HS_DB_VERSION_ERROR => error.DatabaseVersionError, - c.HS_DB_PLATFORM_ERROR => error.DatabasePlatformError, - c.HS_DB_MODE_ERROR => error.DatabaseModeError, - c.HS_BAD_ALIGN => error.BadAlignment, - c.HS_BAD_ALLOC => error.BadAlloc, - c.HS_SCRATCH_IN_USE => error.ScratchInUse, - c.HS_ARCH_ERROR => error.ArchitectureError, - c.HS_INSUFFICIENT_SPACE => error.InsufficientSpace, - else => error.UnknownError, - }; -} - -// ============================================================================= -// Compile Flags -// ============================================================================= - -/// Flags that modify pattern matching behavior -pub const Flags = packed struct(c_uint) { - /// Case-insensitive matching - caseless: bool = false, - /// Dot (.) matches newlines - dotall: bool = false, - /// ^ and $ match at newlines - multiline: bool = false, - /// Only report first match per pattern - single_match: bool = false, - /// Allow patterns that match empty strings - allow_empty: bool = false, - /// Treat pattern as UTF-8 - utf8: bool = false, - /// Use Unicode character properties - ucp: bool = false, - /// Compile in prefilter mode - prefilter: bool = false, - /// Report start of match (leftmost) - som_leftmost: bool = false, - - _padding: u23 = 0, - - /// Combine multiple flags - pub fn with(self: Flags, other: Flags) Flags { - return @bitCast(@as(c_uint, @bitCast(self)) | @as(c_uint, @bitCast(other))); - } -}; - -// ============================================================================= -// Database Mode -// ============================================================================= - -/// Database compilation mode -pub const Mode = enum(c_uint) { - /// Block mode - scan complete data in single call - block = c.HS_MODE_BLOCK, - /// Stream mode - scan data incrementally - stream = c.HS_MODE_STREAM, - /// Vectored mode - scan non-contiguous data blocks - vectored = c.HS_MODE_VECTORED, - - /// Add start-of-match tracking with large horizon - pub fn withSomLarge(self: Mode) c_uint { - return @intFromEnum(self) | c.HS_MODE_SOM_HORIZON_LARGE; - } - - /// Add start-of-match tracking with medium horizon - pub fn withSomMedium(self: Mode) c_uint { - return @intFromEnum(self) | c.HS_MODE_SOM_HORIZON_MEDIUM; - } - - /// Add start-of-match tracking with small horizon - pub fn withSomSmall(self: Mode) c_uint { - return @intFromEnum(self) | c.HS_MODE_SOM_HORIZON_SMALL; - } -}; - -// ============================================================================= -// Match Result -// ============================================================================= - -/// A single match result from scanning -pub const Match = struct { - /// Pattern ID (0 for single-pattern databases) - id: u32, - /// Start offset of match (only valid if SOM flag was used) - start: u64, - /// End offset of match (exclusive) - end: u64, -}; - -// ============================================================================= -// Pattern Definition -// ============================================================================= - -/// A pattern with optional ID and flags for multi-pattern compilation -pub const Pattern = struct { - /// The regex pattern string - expression: []const u8, - /// Unique identifier for this pattern (returned in match callbacks) - id: u32 = 0, - /// Pattern-specific flags - flags: Flags = .{}, -}; - -// ============================================================================= -// Database -// ============================================================================= - -/// A compiled pattern database -/// -/// Databases are immutable after compilation and can be shared across threads. -/// Each thread needs its own Scratch space for scanning. -pub const Database = struct { - handle: *c.hs_database_t, - - const Self = @This(); - - /// Compile options - pub const CompileOptions = struct { - flags: Flags = .{}, - mode: Mode = .block, - }; - - /// Compile a single regex pattern - pub fn compile(pattern: []const u8, options: CompileOptions) Error!Self { - return compileWithDetails(pattern, options) catch |err| switch (err) { - error.CompileErrorWithDetails => error.CompileError, - else => |e| e, - }; - } - - /// Error type that includes compile details - pub const CompileWithDetailsError = Error || error{CompileErrorWithDetails}; - - /// Compile a single regex pattern, storing error details on failure - pub fn compileWithDetails( - pattern: []const u8, - options: CompileOptions, - ) CompileWithDetailsError!Self { - // Pattern must be null-terminated for C API - var pattern_buf: [4096]u8 = undefined; - if (pattern.len >= pattern_buf.len) return error.Invalid; - - @memcpy(pattern_buf[0..pattern.len], pattern); - pattern_buf[pattern.len] = 0; - - var db: ?*c.hs_database_t = null; - var comp_err: ?*c.hs_compile_error_t = null; - - const rc = c.hs_compile( - @ptrCast(&pattern_buf), - @bitCast(options.flags), - @intFromEnum(options.mode), - null, - &db, - &comp_err, - ); - - if (rc != c.HS_SUCCESS) { - if (comp_err) |err| { - _ = c.hs_free_compile_error(err); - } - if (rc == c.HS_COMPILER_ERROR) { - return error.CompileErrorWithDetails; - } - return mapError(rc); - } - - return .{ .handle = db.? }; - } - - /// Compile multiple patterns into a single database - /// - /// The allocator is used for temporary storage during compilation and - /// is not retained after this function returns. - pub fn compileMulti( - allocator: std.mem.Allocator, - patterns: []const Pattern, - options: CompileOptions, - ) (Error || std.mem.Allocator.Error)!Self { - if (patterns.len == 0) return error.Invalid; - if (patterns.len > std.math.maxInt(c_uint)) return error.Invalid; - - // Allocate temporary arrays for C API - var arena = std.heap.ArenaAllocator.init(allocator); - defer arena.deinit(); - const alloc = arena.allocator(); - - const expressions = try alloc.alloc([*:0]const u8, patterns.len); - const flags = try alloc.alloc(c_uint, patterns.len); - const ids = try alloc.alloc(c_uint, patterns.len); - const pattern_bufs = try alloc.alloc([4096]u8, patterns.len); - - for (patterns, 0..) |pat, i| { - if (pat.expression.len >= 4096) return error.Invalid; - @memcpy(pattern_bufs[i][0..pat.expression.len], pat.expression); - pattern_bufs[i][pat.expression.len] = 0; - expressions[i] = @ptrCast(&pattern_bufs[i]); - flags[i] = @bitCast(options.flags.with(pat.flags)); - ids[i] = pat.id; - } - - var db: ?*c.hs_database_t = null; - var comp_err: ?*c.hs_compile_error_t = null; - - const rc = c.hs_compile_multi( - expressions.ptr, - flags.ptr, - ids.ptr, - @intCast(patterns.len), - @intFromEnum(options.mode), - null, - &db, - &comp_err, - ); - - if (rc != c.HS_SUCCESS) { - if (comp_err) |err| { - _ = c.hs_free_compile_error(err); - } - return mapError(rc); - } - - return .{ .handle = db.? }; - } - - /// Compile a literal (non-regex) pattern for exact matching - pub fn compileLiteral(pattern: []const u8, options: CompileOptions) Error!Self { - var db: ?*c.hs_database_t = null; - var comp_err: ?*c.hs_compile_error_t = null; - - const rc = c.hs_compile_lit( - pattern.ptr, - @bitCast(options.flags), - pattern.len, - @intFromEnum(options.mode), - null, - &db, - &comp_err, - ); - - if (rc != c.HS_SUCCESS) { - if (comp_err) |err| { - _ = c.hs_free_compile_error(err); - } - return mapError(rc); - } - - return .{ .handle = db.? }; - } - - /// Compile multiple literal patterns - /// - /// The allocator is used for temporary storage during compilation and - /// is not retained after this function returns. - pub fn compileLiteralMulti( - allocator: std.mem.Allocator, - patterns: []const Pattern, - options: CompileOptions, - ) (Error || std.mem.Allocator.Error)!Self { - if (patterns.len == 0) return error.Invalid; - if (patterns.len > std.math.maxInt(c_uint)) return error.Invalid; - - var arena = std.heap.ArenaAllocator.init(allocator); - defer arena.deinit(); - const alloc = arena.allocator(); - - const expressions = try alloc.alloc([*]const u8, patterns.len); - const flags = try alloc.alloc(c_uint, patterns.len); - const ids = try alloc.alloc(c_uint, patterns.len); - const lens = try alloc.alloc(usize, patterns.len); - - for (patterns, 0..) |pat, i| { - expressions[i] = pat.expression.ptr; - flags[i] = @bitCast(options.flags.with(pat.flags)); - ids[i] = pat.id; - lens[i] = pat.expression.len; - } - - var db: ?*c.hs_database_t = null; - var comp_err: ?*c.hs_compile_error_t = null; - - const rc = c.hs_compile_lit_multi( - expressions.ptr, - flags.ptr, - ids.ptr, - lens.ptr, - @intCast(patterns.len), - @intFromEnum(options.mode), - null, - &db, - &comp_err, - ); - - if (rc != c.HS_SUCCESS) { - if (comp_err) |err| { - _ = c.hs_free_compile_error(err); - } - return mapError(rc); - } - - return .{ .handle = db.? }; - } - - /// Deserialize a database from bytes - pub fn deserialize(bytes: []const u8) Error!Self { - var db: ?*c.hs_database_t = null; - const rc = c.hs_deserialize_database(bytes.ptr, bytes.len, &db); - if (rc != c.HS_SUCCESS) return mapError(rc); - return .{ .handle = db.? }; - } - - /// Free the database - pub fn deinit(self: *Self) void { - _ = c.hs_free_database(self.handle); - self.handle = undefined; - } - - /// Get the size of the database in bytes - pub fn size(self: *const Self) Error!usize { - var sz: usize = 0; - const rc = c.hs_database_size(self.handle, &sz); - if (rc != c.HS_SUCCESS) return mapError(rc); - return sz; - } - - /// Serialize the database to bytes - pub fn serialize(self: *const Self, allocator: std.mem.Allocator) (Error || std.mem.Allocator.Error)![]u8 { - var bytes: ?[*]u8 = null; - var length: usize = 0; - - const rc = c.hs_serialize_database(self.handle, &bytes, &length); - if (rc != c.HS_SUCCESS) return mapError(rc); - - // Copy to Zig-managed memory - const result = try allocator.alloc(u8, length); - @memcpy(result, bytes.?[0..length]); - - // Free C-allocated memory (uses libc free) - std.c.free(bytes.?); - - return result; - } - - /// Scan data for matches using block mode - /// - /// Returns an iterator over matches. The scratch space must remain valid - /// for the duration of iteration. - pub fn scan(self: *const Self, scratch: *Scratch, data: []const u8) BlockScanner { - return BlockScanner.init(self, scratch, data); - } - - /// Scan and call a callback for each match - /// - /// This is more efficient than the iterator when you don't need to - /// collect matches or when early termination is desired. - pub fn scanWithCallback( - self: *const Self, - scratch: *Scratch, - data: []const u8, - context: anytype, - comptime callback: fn (@TypeOf(context), Match) bool, - ) Error!bool { - const Context = @TypeOf(context); - const Wrapper = struct { - fn handler( - id: c_uint, - from: c_ulonglong, - to: c_ulonglong, - _: c_uint, - ctx: ?*anyopaque, - ) callconv(.c) c_int { - const match = Match{ - .id = id, - .start = from, - .end = to, - }; - const user_ctx: Context = @ptrCast(@alignCast(ctx)); - // Return 1 to stop scanning, 0 to continue - return if (callback(user_ctx, match)) 0 else 1; - } - }; - - const rc = c.hs_scan( - self.handle, - data.ptr, - @intCast(data.len), - 0, - scratch.handle, - Wrapper.handler, - @ptrCast(@alignCast(context)), - ); - - if (rc == c.HS_SCAN_TERMINATED) return false; - if (rc != c.HS_SUCCESS) return mapError(rc); - return true; - } - - /// Check if any pattern matches (short-circuit on first match) - pub fn matches(self: *const Self, scratch: *Scratch, data: []const u8) Error!bool { - var found = false; - - const rc = c.hs_scan( - self.handle, - data.ptr, - @intCast(data.len), - 0, - scratch.handle, - struct { - fn handler(_: c_uint, _: c_ulonglong, _: c_ulonglong, _: c_uint, ctx: ?*anyopaque) callconv(.c) c_int { - const ptr: *bool = @ptrCast(@alignCast(ctx)); - ptr.* = true; - return 1; // Stop scanning - } - }.handler, - &found, - ); - - if (rc == c.HS_SCAN_TERMINATED) return true; - if (rc != c.HS_SUCCESS) return mapError(rc); - return found; - } - - /// Find the first matching pattern and return its ID - /// Returns null if no pattern matches - pub fn findFirstMatch(self: *const Self, scratch: *Scratch, data: []const u8) Error!?u32 { - const Context = struct { - found: bool = false, - pattern_id: u32 = 0, - }; - var ctx = Context{}; - - const rc = c.hs_scan( - self.handle, - data.ptr, - @intCast(data.len), - 0, - scratch.handle, - struct { - fn handler(id: c_uint, _: c_ulonglong, _: c_ulonglong, _: c_uint, context: ?*anyopaque) callconv(.c) c_int { - const ptr: *Context = @ptrCast(@alignCast(context)); - ptr.found = true; - ptr.pattern_id = id; - return 1; // Stop scanning after first match - } - }.handler, - &ctx, - ); - - if (rc == c.HS_SCAN_TERMINATED) return ctx.pattern_id; - if (rc != c.HS_SUCCESS) return mapError(rc); - if (ctx.found) return ctx.pattern_id; - return null; - } - - /// Open a stream for incremental scanning (streaming mode only) - pub fn openStream(self: *const Self) Error!Stream { - var stream: ?*c.hs_stream_t = null; - const rc = c.hs_open_stream(self.handle, 0, &stream); - if (rc != c.HS_SUCCESS) return mapError(rc); - return .{ .handle = stream.?, .database = self }; - } -}; - -// ============================================================================= -// Scratch Space -// ============================================================================= - -/// Per-thread scratch space for scanning operations -/// -/// Each thread performing scans needs its own scratch space. Scratch spaces -/// can be reused across multiple scan calls but not concurrently. -pub const Scratch = struct { - handle: *c.hs_scratch_t, - - const Self = @This(); - - /// Allocate scratch space for the given database - pub fn init(db: *const Database) Error!Self { - var scratch: ?*c.hs_scratch_t = null; - const rc = c.hs_alloc_scratch(db.handle, &scratch); - if (rc != c.HS_SUCCESS) return mapError(rc); - return .{ .handle = scratch.? }; - } - - /// Allocate scratch space for multiple databases - /// - /// The resulting scratch can be used with any of the provided databases. - pub fn initMulti(databases: []const *const Database) Error!Self { - var scratch: ?*c.hs_scratch_t = null; - for (databases) |db| { - const rc = c.hs_alloc_scratch(db.handle, &scratch); - if (rc != c.HS_SUCCESS) { - if (scratch) |s| _ = c.hs_free_scratch(s); - return mapError(rc); - } - } - return .{ .handle = scratch.? }; - } - - /// Free the scratch space - pub fn deinit(self: *Self) void { - _ = c.hs_free_scratch(self.handle); - self.handle = undefined; - } - - /// Clone this scratch space - pub fn clone(self: *const Self) Error!Self { - var new_scratch: ?*c.hs_scratch_t = null; - const rc = c.hs_clone_scratch(self.handle, &new_scratch); - if (rc != c.HS_SUCCESS) return mapError(rc); - return .{ .handle = new_scratch.? }; - } - - /// Get the size of the scratch space in bytes - pub fn size(self: *const Self) Error!usize { - var sz: usize = 0; - const rc = c.hs_scratch_size(self.handle, &sz); - if (rc != c.HS_SUCCESS) return mapError(rc); - return sz; - } -}; - -// ============================================================================= -// Block Scanner (Iterator) -// ============================================================================= - -/// Iterator for block-mode scanning -pub const BlockScanner = struct { - database: *const Database, - scratch: *Scratch, - data: []const u8, - matches_buf: [64]Match = undefined, - matches_len: usize = 0, - match_index: usize = 0, - scan_complete: bool = false, - scan_error: ?Error = null, - - const Self = @This(); - - fn init(database: *const Database, scratch: *Scratch, data: []const u8) Self { - return .{ - .database = database, - .scratch = scratch, - .data = data, - }; - } - - /// Get the next match, or null if no more matches - pub fn next(self: *Self) ?Match { - // Return buffered matches first - if (self.match_index < self.matches_len) { - const match = self.matches_buf[self.match_index]; - self.match_index += 1; - return match; - } - - // If scan is complete, no more matches - if (self.scan_complete) return null; - - // Perform the scan - self.matches_len = 0; - self.match_index = 0; - - const rc = c.hs_scan( - self.database.handle, - self.data.ptr, - @intCast(self.data.len), - 0, - self.scratch.handle, - matchCallback, - self, - ); - - self.scan_complete = true; - - if (rc != c.HS_SUCCESS and rc != c.HS_SCAN_TERMINATED) { - self.scan_error = mapError(rc); - return null; - } - - if (self.matches_len > 0) { - const match = self.matches_buf[0]; - self.match_index = 1; - return match; - } - - return null; - } - - fn matchCallback( - id: c_uint, - from: c_ulonglong, - to: c_ulonglong, - _: c_uint, - ctx: ?*anyopaque, - ) callconv(.c) c_int { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.matches_len >= self.matches_buf.len) { - // Buffer full, stop scanning - return 1; - } - self.matches_buf[self.matches_len] = .{ - .id = id, - .start = from, - .end = to, - }; - self.matches_len += 1; - return 0; - } - - /// Check if an error occurred during scanning - pub fn err(self: *const Self) ?Error { - return self.scan_error; - } -}; - -// ============================================================================= -// Stream (Streaming Mode) -// ============================================================================= - -/// A stream for incremental pattern matching -/// -/// Streams allow scanning data that arrives in chunks while maintaining -/// state between chunks. Matches that span chunks are correctly detected. -pub const Stream = struct { - handle: *c.hs_stream_t, - database: *const Database, - - const Self = @This(); - - /// Write data to the stream and scan for matches - pub fn scan( - self: *Self, - scratch: *Scratch, - data: []const u8, - context: anytype, - comptime callback: fn (@TypeOf(context), Match) bool, - ) Error!bool { - const Context = @TypeOf(context); - const Wrapper = struct { - fn handler( - id: c_uint, - from: c_ulonglong, - to: c_ulonglong, - _: c_uint, - ctx: ?*anyopaque, - ) callconv(.c) c_int { - const match = Match{ - .id = id, - .start = from, - .end = to, - }; - const user_ctx: Context = @ptrCast(@alignCast(ctx)); - return if (callback(user_ctx, match)) 0 else 1; - } - }; - - const rc = c.hs_scan_stream( - self.handle, - data.ptr, - @intCast(data.len), - 0, - scratch.handle, - Wrapper.handler, - @ptrCast(@alignCast(@constCast(context))), - ); - - if (rc == c.HS_SCAN_TERMINATED) return false; - if (rc != c.HS_SUCCESS) return mapError(rc); - return true; - } - - /// Write data without callbacks (useful for building up context) - pub fn write(self: *Self, scratch: *Scratch, data: []const u8) Error!void { - const rc = c.hs_scan_stream( - self.handle, - data.ptr, - @intCast(data.len), - 0, - scratch.handle, - null, - null, - ); - if (rc != c.HS_SUCCESS) return mapError(rc); - } - - /// Reset stream to initial state - pub fn reset(self: *Self, scratch: *Scratch) Error!void { - const rc = c.hs_reset_stream(self.handle, 0, scratch.handle, null, null); - if (rc != c.HS_SUCCESS) return mapError(rc); - } - - /// Close the stream and free resources - /// - /// This may trigger end-of-data matches (e.g., $ anchors). - pub fn close( - self: *Self, - scratch: *Scratch, - context: anytype, - comptime callback: ?fn (@TypeOf(context), Match) bool, - ) Error!void { - if (callback) |cb| { - const Context = @TypeOf(context); - const Wrapper = struct { - fn handler( - id: c_uint, - from: c_ulonglong, - to: c_ulonglong, - _: c_uint, - ctx: ?*anyopaque, - ) callconv(.c) c_int { - const match = Match{ - .id = id, - .start = from, - .end = to, - }; - const user_ctx: Context = @ptrCast(@alignCast(ctx)); - return if (cb(user_ctx, match)) 0 else 1; - } - }; - - const rc = c.hs_close_stream( - self.handle, - scratch.handle, - Wrapper.handler, - @ptrCast(@alignCast(context)), - ); - if (rc != c.HS_SUCCESS) return mapError(rc); - } else { - const rc = c.hs_close_stream(self.handle, scratch.handle, null, null); - if (rc != c.HS_SUCCESS) return mapError(rc); - } - self.handle = undefined; - } - - /// Close the stream without checking for EOD matches - pub fn deinit(self: *Self) void { - _ = c.hs_close_stream(self.handle, null, null, null); - self.handle = undefined; - } -}; - -// ============================================================================= -// Utility Functions -// ============================================================================= - -/// Get Hyperscan library version string -pub fn version() []const u8 { - const ver = c.hs_version(); - return std.mem.sliceTo(ver, 0); -} - -/// Check if the current platform supports Hyperscan -pub fn isPlatformValid() bool { - return c.hs_valid_platform() == c.HS_SUCCESS; -} - -// ============================================================================= -// Tests -// ============================================================================= - -test "compile single pattern" { - var db = try Database.compile("hello", .{}); - defer db.deinit(); - - const db_size = try db.size(); - try std.testing.expect(db_size > 0); -} - -test "compile with flags" { - var db = try Database.compile("hello", .{ - .flags = (Flags{ .caseless = true }).with(.{ .utf8 = true }), - }); - defer db.deinit(); -} - -test "compile multi-pattern" { - var db = try Database.compileMulti(std.testing.allocator, &.{ - .{ .expression = "error", .id = 1 }, - .{ .expression = "warn", .id = 2 }, - .{ .expression = "info", .id = 3 }, - }, .{}); - defer db.deinit(); -} - -test "scratch allocation" { - var db = try Database.compile("test", .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - const scratch_size = try scratch.size(); - try std.testing.expect(scratch_size > 0); -} - -test "scratch clone" { - var db = try Database.compile("test", .{}); - defer db.deinit(); - - var scratch1 = try Scratch.init(&db); - defer scratch1.deinit(); - - var scratch2 = try scratch1.clone(); - defer scratch2.deinit(); -} - -test "block scan - matches found" { - var db = try Database.compile("hello", .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - var scanner = db.scan(&scratch, "say hello world"); - var count: usize = 0; - while (scanner.next()) |match| { - try std.testing.expectEqual(@as(u32, 0), match.id); - try std.testing.expectEqual(@as(u64, 9), match.end); - count += 1; - } - try std.testing.expectEqual(@as(usize, 1), count); - try std.testing.expect(scanner.err() == null); -} - -test "block scan - no matches" { - var db = try Database.compile("hello", .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - var scanner = db.scan(&scratch, "goodbye world"); - try std.testing.expect(scanner.next() == null); - try std.testing.expect(scanner.err() == null); -} - -test "matches helper" { - var db = try Database.compile("hello", .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - try std.testing.expect(try db.matches(&scratch, "hello world")); - try std.testing.expect(!try db.matches(&scratch, "goodbye world")); -} - -test "multi-pattern scan" { - var db = try Database.compileMulti(std.testing.allocator, &.{ - .{ .expression = "error", .id = 1 }, - .{ .expression = "warn", .id = 2 }, - .{ .expression = "info", .id = 3 }, - }, .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - var scanner = db.scan(&scratch, "error: something bad, warn: be careful"); - var found_error = false; - var found_warn = false; - - while (scanner.next()) |match| { - if (match.id == 1) found_error = true; - if (match.id == 2) found_warn = true; - } - - try std.testing.expect(found_error); - try std.testing.expect(found_warn); -} - -test "literal pattern" { - // Literal patterns treat special chars as-is - var db = try Database.compileLiteral("hello?", .{}); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - // Should NOT match "hell" (? is literal, not optional) - try std.testing.expect(!try db.matches(&scratch, "hell")); - // Should match "hello?" - try std.testing.expect(try db.matches(&scratch, "hello?")); -} - -test "database serialization" { - var db = try Database.compile("test", .{}); - defer db.deinit(); - - const bytes = try db.serialize(std.testing.allocator); - defer std.testing.allocator.free(bytes); - - try std.testing.expect(bytes.len > 0); - - var db2 = try Database.deserialize(bytes); - defer db2.deinit(); - - var scratch = try Scratch.init(&db2); - defer scratch.deinit(); - - try std.testing.expect(try db2.matches(&scratch, "test")); -} - -test "streaming mode" { - var db = try Database.compile("hello world", .{ .mode = .stream }); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - var stream = try db.openStream(); - defer stream.deinit(); - - var match_count: usize = 0; - const Ctx = struct { - count: *usize, - }; - - // Write data in chunks - _ = try stream.scan(&scratch, "hello ", &Ctx{ .count = &match_count }, struct { - fn cb(_: *const Ctx, _: Match) bool { - return true; - } - }.cb); - - _ = try stream.scan(&scratch, "world", &Ctx{ .count = &match_count }, struct { - fn cb(ctx: *const Ctx, _: Match) bool { - ctx.count.* += 1; - return true; - } - }.cb); - - try std.testing.expectEqual(@as(usize, 1), match_count); -} - -test "case insensitive matching" { - var db = try Database.compile("hello", .{ .flags = .{ .caseless = true } }); - defer db.deinit(); - - var scratch = try Scratch.init(&db); - defer scratch.deinit(); - - try std.testing.expect(try db.matches(&scratch, "HELLO")); - try std.testing.expect(try db.matches(&scratch, "HeLLo")); - try std.testing.expect(try db.matches(&scratch, "hello")); -} - -test "version check" { - const ver = version(); - try std.testing.expect(ver.len > 0); -} - -test "platform validation" { - // Should succeed on supported platforms - try std.testing.expect(isPlatformValid()); -} diff --git a/src/lambda/extension_api.zig b/src/lambda/extension_api.zig index b86ae54..b56d784 100644 --- a/src/lambda/extension_api.zig +++ b/src/lambda/extension_api.zig @@ -10,7 +10,7 @@ //! 3. Shut down gracefully when receiving SHUTDOWN event const std = @import("std"); -const o11y = @import("../observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; // ============================================================================= diff --git a/src/lambda_main.zig b/src/lambda_main.zig index fb3a368..ed3a628 100644 --- a/src/lambda_main.zig +++ b/src/lambda_main.zig @@ -28,7 +28,7 @@ const zonfig = edge.zonfig; const lambda = @import("lambda/root.zig"); const ExtensionClient = lambda.ExtensionClient; -const o11y = @import("observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const StdLogAdapter = o11y.StdLogAdapter; const Level = o11y.Level; diff --git a/src/main.zig b/src/main.zig index 72ee191..4cae8b2 100644 --- a/src/main.zig +++ b/src/main.zig @@ -28,7 +28,7 @@ const policy = edge.policy; const ProxyConfig = config_types.ProxyConfig; -const o11y = @import("observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const StdLogAdapter = o11y.StdLogAdapter; const Level = o11y.Level; diff --git a/src/modules/datadog_logs_v2.zig b/src/modules/datadog_logs_v2.zig index 1a98658..3cfb155 100644 --- a/src/modules/datadog_logs_v2.zig +++ b/src/modules/datadog_logs_v2.zig @@ -1,7 +1,7 @@ const std = @import("std"); const zimdjson = @import("zimdjson"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const datadog_log = @import("datadog_log.zig"); const PolicyEngine = policy.PolicyEngine; diff --git a/src/modules/datadog_metrics_v2.zig b/src/modules/datadog_metrics_v2.zig index 9d7af15..f1e7ab0 100644 --- a/src/modules/datadog_metrics_v2.zig +++ b/src/modules/datadog_metrics_v2.zig @@ -1,7 +1,7 @@ const std = @import("std"); const zimdjson = @import("zimdjson"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const datadog_metric = @import("datadog_metric.zig"); const PolicyEngine = policy.PolicyEngine; diff --git a/src/modules/datadog_module.zig b/src/modules/datadog_module.zig index 1cbe07c..d30b3ae 100644 --- a/src/modules/datadog_module.zig +++ b/src/modules/datadog_module.zig @@ -1,9 +1,9 @@ const std = @import("std"); const proxy_module = @import("./proxy_module.zig"); -const policy = @import("../policy/root.zig"); +const policy = @import("policy_zig"); const logs_v2 = @import("./datadog_logs_v2.zig"); const metrics_v2 = @import("./datadog_metrics_v2.zig"); -const o11y = @import("../observability/root.zig"); +const o11y = @import("o11y"); const ProxyModule = proxy_module.ProxyModule; const ModuleConfig = proxy_module.ModuleConfig; diff --git a/src/modules/otlp_logs.zig b/src/modules/otlp_logs.zig index 91ea9fd..0cc423a 100644 --- a/src/modules/otlp_logs.zig +++ b/src/modules/otlp_logs.zig @@ -1,7 +1,7 @@ const std = @import("std"); const proto = @import("proto"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const LogsData = proto.logs.LogsData; const ResourceLogs = proto.logs.ResourceLogs; diff --git a/src/modules/otlp_metrics.zig b/src/modules/otlp_metrics.zig index f128914..2a76145 100644 --- a/src/modules/otlp_metrics.zig +++ b/src/modules/otlp_metrics.zig @@ -19,8 +19,8 @@ const std = @import("std"); const proto = @import("proto"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const MetricsData = proto.metrics.MetricsData; const ResourceMetrics = proto.metrics.ResourceMetrics; diff --git a/src/modules/otlp_module.zig b/src/modules/otlp_module.zig index bf206f3..726d9b1 100644 --- a/src/modules/otlp_module.zig +++ b/src/modules/otlp_module.zig @@ -1,10 +1,10 @@ const std = @import("std"); const proxy_module = @import("./proxy_module.zig"); -const policy = @import("../policy/root.zig"); +const policy = @import("policy_zig"); const otlp_logs = @import("./otlp_logs.zig"); const otlp_metrics = @import("./otlp_metrics.zig"); const otlp_traces = @import("./otlp_traces.zig"); -const o11y = @import("../observability/root.zig"); +const o11y = @import("o11y"); const ProxyModule = proxy_module.ProxyModule; const ModuleConfig = proxy_module.ModuleConfig; diff --git a/src/modules/otlp_traces.zig b/src/modules/otlp_traces.zig index a9e44c1..4647594 100644 --- a/src/modules/otlp_traces.zig +++ b/src/modules/otlp_traces.zig @@ -22,8 +22,8 @@ const std = @import("std"); const proto = @import("proto"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const TracesData = proto.trace.TracesData; const ResourceSpans = proto.trace.ResourceSpans; diff --git a/src/modules/prometheus_module.zig b/src/modules/prometheus_module.zig index 68076ab..46c4b44 100644 --- a/src/modules/prometheus_module.zig +++ b/src/modules/prometheus_module.zig @@ -6,8 +6,8 @@ const std = @import("std"); const proxy_module = @import("./proxy_module.zig"); const prometheus = @import("../prometheus/root.zig"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const ProxyModule = proxy_module.ProxyModule; const ModuleConfig = proxy_module.ModuleConfig; diff --git a/src/observability/event_bus.zig b/src/observability/event_bus.zig deleted file mode 100644 index 4a7d75c..0000000 --- a/src/observability/event_bus.zig +++ /dev/null @@ -1,553 +0,0 @@ -const std = @import("std"); -const Level = @import("level.zig").Level; -const Span = @import("span.zig").Span; - -/// Derive event name from type name -/// UserCreated -> user.created -/// PatternGenerationStarted -> pattern.generation.started -fn eventName(comptime T: type) []const u8 { - const type_name = @typeName(T); - - // Find the last component after any dots (module path) - const start = comptime blk: { - var s: usize = 0; - for (type_name, 0..) |c, i| { - if (c == '.') s = i + 1; - } - break :blk s; - }; - - const name = type_name[start..]; - - // Convert PascalCase to snake.case at comptime - const result = comptime blk: { - var buf: [256]u8 = undefined; - var len: usize = 0; - var prev_was_upper = false; - - for (name) |c| { - if (c >= 'A' and c <= 'Z') { - // Insert dot before uppercase (if not first char and prev wasn't upper) - if (len > 0 and !prev_was_upper) { - buf[len] = '.'; - len += 1; - } - buf[len] = c + 32; // toLowerCase - len += 1; - prev_was_upper = true; - } else { - buf[len] = c; - len += 1; - prev_was_upper = false; - } - } - - break :blk buf[0..len].*; - }; - - return &result; -} - -/// EventBus emits structured events for observability. -/// Events are structs that describe what happened - the EventBus formats and outputs them. -/// -/// Usage: -/// ``` -/// const events = @import("observability"); -/// -/// // Simple event -/// bus.info(UserLoggedIn{ .username = "alice" }); -/// -/// // Timed operation with span -/// var span = bus.started(.info, RequestStarted{ .path = "/api" }); -/// defer span.completed(RequestCompleted{ .status = 200 }); -/// -/// // Events within the span get timing automatically -/// bus.withSpan(&span).info(StepCompleted{ .step = "validation" }); -/// ``` -/// No-op event bus that discards all events. -/// Useful for tests where you need a non-null EventBus but don't care about output. -/// -/// IMPORTANT: Like StdioEventBus, this struct must NOT be moved after init() is called. -pub const NoopEventBus = struct { - bus: EventBus, - discarding: std.Io.Writer.Discarding, - buf: [64]u8, - - /// Initialize an uninitialized NoopEventBus in place. - /// Call this on a variable that is already at its final address. - pub fn init(self: *NoopEventBus) void { - self.buf = undefined; - self.discarding = std.Io.Writer.Discarding.init(&self.buf); - self.bus = EventBus.init(&self.discarding.writer); - } - - pub fn eventBus(self: *NoopEventBus) *EventBus { - return &self.bus; - } -}; - -/// Wrapper that owns stdout/stderr writers with buffers. -/// Routes error-level events to stderr, everything else to stdout. -/// -/// IMPORTANT: This struct must NOT be moved after init() is called. -/// The EventBus holds pointers to the writers inside this struct. -pub const StdioEventBus = struct { - stdout_writer: std.fs.File.Writer, - stderr_writer: std.fs.File.Writer, - bus: EventBus, - - /// Buffers for writers - static to avoid lifetime issues - var stdout_buf: [4096]u8 = undefined; - var stderr_buf: [4096]u8 = undefined; - - /// Initialize an uninitialized StdioEventBus in place. - /// Call this on a variable that is already at its final address. - pub fn init(self: *StdioEventBus) void { - self.stdout_writer = std.fs.File.stdout().writer(&stdout_buf); - self.stderr_writer = std.fs.File.stderr().writer(&stderr_buf); - self.bus = EventBus.initDual(&self.stdout_writer.interface, &self.stderr_writer.interface); - } - - /// Get a pointer to the EventBus for use - pub fn eventBus(self: *StdioEventBus) *EventBus { - return &self.bus; - } -}; - -pub const EventBus = struct { - /// Writer for non-error events (debug, info, warn) - writer: *std.Io.Writer, - /// Writer for error events (defaults to same as writer if not set) - err_writer: *std.Io.Writer, - min_level: Level, - current_span: ?*const Span, - /// Mutex for thread-safe writes (writers are not thread-safe) - mutex: std.Thread.Mutex, - - /// Initialize with a single std.Io.Writer for all levels - pub fn init(writer: *std.Io.Writer) EventBus { - return .{ - .writer = writer, - .err_writer = writer, - .min_level = .info, - .current_span = null, - .mutex = .{}, - }; - } - - /// Initialize with separate writers: stdout for non-errors, stderr for errors - pub fn initDual(stdout_writer: *std.Io.Writer, stderr_writer: *std.Io.Writer) EventBus { - return .{ - .writer = stdout_writer, - .err_writer = stderr_writer, - .min_level = .info, - .current_span = null, - .mutex = .{}, - }; - } - - /// Set minimum log level - pub fn setLevel(self: *EventBus, level: Level) void { - self.min_level = level; - } - - /// Create a child EventBus with a span context - pub fn withSpan(self: EventBus, span: *const Span) EventBus { - return .{ - .writer = self.writer, - .err_writer = self.err_writer, - .min_level = self.min_level, - .current_span = span, - }; - } - - /// Get the appropriate writer for the given level - fn writerForLevel(self: *EventBus, level: Level) *std.Io.Writer { - return if (level == .err) self.err_writer else self.writer; - } - - /// Start a timed span. Returns a SpanGuard that should be deferred. - /// The started event is emitted immediately. - /// The event type name becomes the span name (e.g., BatchProcessingStarted -> batch.processing) - pub fn started( - self: *EventBus, - comptime level: Level, - event: anytype, - ) SpanGuard(@TypeOf(event)) { - const full_name = comptime eventName(@TypeOf(event)); - // Remove ".started" suffix if present to get base span name - const span_name = comptime blk: { - const suffix = ".started"; - if (std.mem.endsWith(u8, full_name, suffix)) { - break :blk full_name[0 .. full_name.len - suffix.len]; - } - break :blk full_name; - }; - - var guard = SpanGuard(@TypeOf(event)){ - .bus = self, - .span = .{ - .id = Span.generateSpanId(), - .name = span_name, - .level = level, - .start_time = std.time.microTimestamp(), - .parent = self.current_span, - }, - }; - - // Emit the started event - guard.bus.emitInternal(level, &guard.span, event); - - return guard; - } - - /// Emit a debug event - pub fn debug(self: *EventBus, event: anytype) void { - self.emit(.debug, event); - } - - /// Emit an info event - pub fn info(self: *EventBus, event: anytype) void { - self.emit(.info, event); - } - - /// Emit a warning event - pub fn warn(self: *EventBus, event: anytype) void { - self.emit(.warn, event); - } - - /// Emit an error event - pub fn err(self: *EventBus, event: anytype) void { - self.emit(.err, event); - } - - /// Emit an event at the specified level - pub fn emit(self: *EventBus, level: Level, event: anytype) void { - self.emitInternal(level, self.current_span, event); - } - - fn emitInternal( - self: *EventBus, - level: Level, - span: ?*const Span, - event: anytype, - ) void { - const name = comptime eventName(@TypeOf(event)); - // Check level filter - if (@intFromEnum(level) < @intFromEnum(self.min_level)) { - return; - } - - // Acquire mutex for thread-safe writes - self.mutex.lock(); - defer self.mutex.unlock(); - - // Select writer based on level (errors go to err_writer) - const writer = self.writerForLevel(level); - - // Timestamp (ISO 8601 with milliseconds: 2025-12-03T14:30:45.123Z) - const millis = std.time.milliTimestamp(); - const secs: u64 = @intCast(@divFloor(millis, 1000)); - const ms: u64 = @intCast(@mod(millis, 1000)); - const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = secs }; - const epoch_day = epoch_seconds.getEpochDay(); - const year_day = epoch_day.calculateYearDay(); - const month_day = year_day.calculateMonthDay(); - const day_seconds = epoch_seconds.getDaySeconds(); - - writer.print("{d}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}.{d:0>3}Z [{s}] {s}", .{ - year_day.year, - month_day.month.numeric(), - month_day.day_index + 1, - day_seconds.getHoursIntoDay(), - day_seconds.getMinutesIntoHour(), - day_seconds.getSecondsIntoMinute(), - ms, - level.asText(), - name, - }) catch return; - - // Event fields - const T = @TypeOf(event); - const fields = @typeInfo(T).@"struct".fields; - inline for (fields) |field| { - const value = @field(event, field.name); - writeField(writer, field.name, value); - } - - // Span ID and elapsed time if we have a span - if (span) |s| { - var span_id_buf: [16]u8 = undefined; - const span_id = Span.formatSpanId(s.id, &span_id_buf); - writer.print(" span_id={s}", .{span_id}) catch return; - - var elapsed_buf: [32]u8 = undefined; - const elapsed = s.formatElapsed(&elapsed_buf); - writer.print(" elapsed={s}", .{elapsed}) catch return; - } - - // Newline and flush - writer.writeAll("\n") catch return; - writer.flush() catch return; - } - - fn writeField(writer: *std.Io.Writer, name: []const u8, value: anytype) void { - const T = @TypeOf(value); - - if (T == []const u8 or T == []u8) { - writer.print(" {s}=\"{s}\"", .{ name, value }) catch return; - } else if (@typeInfo(T) == .pointer) { - // Handle pointer to array (like *const [N]u8) - const child = @typeInfo(T).pointer.child; - if (@typeInfo(child) == .array) { - const elem_type = @typeInfo(child).array.child; - if (elem_type == u8) { - writer.print(" {s}=\"{s}\"", .{ name, value }) catch return; - return; - } - } - writer.print(" {s}={any}", .{ name, value }) catch return; - } else if (@typeInfo(T) == .int or @typeInfo(T) == .comptime_int) { - writer.print(" {s}={d}", .{ name, value }) catch return; - } else if (@typeInfo(T) == .float or @typeInfo(T) == .comptime_float) { - writer.print(" {s}={d:.2}", .{ name, value }) catch return; - } else if (@typeInfo(T) == .bool) { - writer.print(" {s}={}", .{ name, value }) catch return; - } else if (@typeInfo(T) == .@"enum") { - writer.print(" {s}={s}", .{ name, @tagName(value) }) catch return; - } else if (@typeInfo(T) == .optional) { - if (value) |v| { - writeField(writer, name, v); - } else { - writer.print(" {s}=null", .{name}) catch return; - } - } else if (@typeInfo(T) == .error_union) { - if (value) |v| { - writeField(writer, name, v); - } else |e| { - writer.print(" {s}={s}", .{ name, @errorName(e) }) catch return; - } - } else if (T == anyerror or @typeInfo(T) == .error_set) { - writer.print(" {s}={s}", .{ name, @errorName(value) }) catch return; - } else { - writer.print(" {s}={any}", .{ name, value }) catch return; - } - } -}; - -/// SpanGuard wraps a span and provides scoped completion -/// Generic over the started event type to derive the span name at comptime -pub fn SpanGuard(comptime StartedEvent: type) type { - const full_name = comptime eventName(StartedEvent); - // Remove ".started" suffix if present to get base span name - const span_name = comptime blk: { - const suffix = ".started"; - if (std.mem.endsWith(u8, full_name, suffix)) { - break :blk full_name[0 .. full_name.len - suffix.len]; - } - break :blk full_name; - }; - - return struct { - bus: *EventBus, - span: Span, - - const Self = @This(); - - /// Get an EventBus that includes this span's context - pub fn eventBus(self: *Self) EventBus { - return self.bus.withSpan(&self.span); - } - - /// Complete the span with a completion event - pub fn completed(self: *Self, event: anytype) void { - _ = span_name; // Use the comptime span_name - self.bus.emitInternal( - self.span.level, - &self.span, - event, - ); - } - - /// Complete the span without an event (just log completion) - pub fn done(self: *Self) void { - const CompletedEvent = struct {}; - self.bus.emitInternal( - self.span.level, - &self.span, - CompletedEvent{}, - ); - } - }; -} - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; - -/// Test writer that captures output to an ArrayList -const TestWriter = struct { - output: std.ArrayListUnmanaged(u8), - allocator: std.mem.Allocator, - io_writer: std.Io.Writer.Allocating, - - fn init(allocator: std.mem.Allocator) TestWriter { - return .{ - .output = .{}, - .allocator = allocator, - .io_writer = std.Io.Writer.Allocating.init(allocator), - }; - } - - fn deinit(self: *TestWriter) void { - self.output.deinit(self.allocator); - self.io_writer.deinit(); - } - - fn writer(self: *TestWriter) *std.Io.Writer { - return &self.io_writer.writer; - } - - fn getOutput(self: *TestWriter) []const u8 { - return self.io_writer.written(); - } - - fn reset(self: *TestWriter) void { - self.io_writer.clearRetainingCapacity(); - } -}; - -test "EventBus: simple info event" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - const UserLoggedIn = struct { - username: []const u8, - method: []const u8, - }; - - bus.info(UserLoggedIn{ .username = "alice", .method = "oauth" }); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "[INFO]")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "user.logged.in")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "username=\"alice\"")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "method=\"oauth\"")); -} - -test "EventBus: level filtering" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.warn); // Only warn and above - - const DebugEvent = struct { detail: []const u8 }; - const WarnEvent = struct { message: []const u8 }; - - bus.debug(DebugEvent{ .detail = "should not appear" }); - bus.warn(WarnEvent{ .message = "should appear" }); - - const output = tw.getOutput(); - try testing.expect(!std.mem.containsAtLeast(u8, output, 1, "should not appear")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "should appear")); -} - -test "EventBus: span with timing" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - const BatchProcessingStarted = struct { batch_id: u32 }; - const BatchProcessingCompleted = struct { items_processed: u32 }; - - var span = bus.started(.info, BatchProcessingStarted{ .batch_id = 123 }); - - // Simulate some work - std.Thread.sleep(10 * std.time.ns_per_ms); - - span.completed(BatchProcessingCompleted{ .items_processed = 5 }); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "batch.processing.started")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "batch_id=123")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "batch.processing.completed")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "items_processed=5")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "elapsed=")); -} - -test "EventBus: numeric and boolean fields" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - const MetricsEvent = struct { - count: u32, - rate: f32, - success: bool, - }; - - bus.info(MetricsEvent{ .count = 42, .rate = 3.14, .success = true }); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "count=42")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "rate=3.14")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "success=true")); -} - -test "EventBus: enum fields" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - const Status = enum { pending, running, completed }; - const StatusEvent = struct { status: Status }; - - bus.info(StatusEvent{ .status = .running }); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "status=running")); -} - -test "EventBus: optional fields" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - const OptionalEvent = struct { - name: []const u8, - error_msg: ?[]const u8, - }; - - bus.info(OptionalEvent{ .name = "test", .error_msg = null }); - bus.info(OptionalEvent{ .name = "test2", .error_msg = "something failed" }); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "error_msg=null")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "error_msg=\"something failed\"")); -} - -test "eventName: PascalCase to snake.case" { - const UserCreated = struct {}; - const name1 = eventName(UserCreated); - // Name will include module path, but should end with the converted name - try testing.expect(std.mem.endsWith(u8, name1, "user.created")); - - const PatternGenerationStarted = struct {}; - const name2 = eventName(PatternGenerationStarted); - try testing.expect(std.mem.endsWith(u8, name2, "pattern.generation.started")); -} diff --git a/src/observability/formatters.zig b/src/observability/formatters.zig deleted file mode 100644 index d31ad98..0000000 --- a/src/observability/formatters.zig +++ /dev/null @@ -1,50 +0,0 @@ -const std = @import("std"); -const Level = @import("level.zig").Level; - -/// Format a timestamp as HH:MM:SS -pub fn formatTimestamp(buf: []u8) []const u8 { - const timestamp = std.time.timestamp(); - const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = @intCast(timestamp) }; - const day_seconds = epoch_seconds.getDaySeconds(); - - return std.fmt.bufPrint(buf, "{d:0>2}:{d:0>2}:{d:0>2}", .{ - day_seconds.getHoursIntoDay(), - day_seconds.getMinutesIntoHour(), - day_seconds.getSecondsIntoMinute(), - }) catch "??:??:??"; -} - -/// Format a timestamp as ISO8601 -pub fn formatTimestampISO(buf: []u8) []const u8 { - const timestamp = std.time.timestamp(); - const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = @intCast(timestamp) }; - const epoch_day = epoch_seconds.getEpochDay(); - const year_day = epoch_day.calculateYearDay(); - const month_day = year_day.calculateMonthDay(); - const day_seconds = epoch_seconds.getDaySeconds(); - - return std.fmt.bufPrint(buf, "{d:0>4}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}Z", .{ - year_day.year, - month_day.month.numeric(), - month_day.day_index + 1, - day_seconds.getHoursIntoDay(), - day_seconds.getMinutesIntoHour(), - day_seconds.getSecondsIntoMinute(), - }) catch "????-??-??T??:??:??Z"; -} - -test "formatTimestamp" { - var buf: [16]u8 = undefined; - const ts = formatTimestamp(&buf); - try std.testing.expectEqual(@as(usize, 8), ts.len); // HH:MM:SS - try std.testing.expectEqual(@as(u8, ':'), ts[2]); - try std.testing.expectEqual(@as(u8, ':'), ts[5]); -} - -test "formatTimestampISO" { - var buf: [32]u8 = undefined; - const ts = formatTimestampISO(&buf); - try std.testing.expectEqual(@as(usize, 20), ts.len); - try std.testing.expectEqual(@as(u8, 'T'), ts[10]); - try std.testing.expectEqual(@as(u8, 'Z'), ts[19]); -} diff --git a/src/observability/level.zig b/src/observability/level.zig deleted file mode 100644 index 5034e72..0000000 --- a/src/observability/level.zig +++ /dev/null @@ -1,93 +0,0 @@ -const std = @import("std"); - -/// Event severity levels, matching std.log.Level for compatibility -pub const Level = enum { - debug, - info, - warn, - err, - - pub fn asText(self: Level) []const u8 { - return switch (self) { - .debug => "DEBUG", - .info => "INFO", - .warn => "WARN", - .err => "ERROR", - }; - } - - pub fn asTextLower(self: Level) []const u8 { - return switch (self) { - .debug => "debug", - .info => "info", - .warn => "warn", - .err => "error", - }; - } - - /// Parse log level from a string (case-insensitive). - /// Returns null if the string is not a valid level. - pub fn parse(value: []const u8) ?Level { - if (std.ascii.eqlIgnoreCase(value, "debug")) return .debug; - if (std.ascii.eqlIgnoreCase(value, "info")) return .info; - if (std.ascii.eqlIgnoreCase(value, "warn")) return .warn; - if (std.ascii.eqlIgnoreCase(value, "warning")) return .warn; - if (std.ascii.eqlIgnoreCase(value, "error")) return .err; - if (std.ascii.eqlIgnoreCase(value, "err")) return .err; - return null; - } - - /// Parse log level from an environment variable. - /// Returns the default level if the env var is not set or invalid. - pub fn parseFromEnv(env_var: []const u8, default: Level) Level { - const env_value = std.posix.getenv(env_var) orelse return default; - return parse(env_value) orelse default; - } - - /// Convert from std.log.Level - pub fn fromStd(level: std.log.Level) Level { - return switch (level) { - .debug => .debug, - .info => .info, - .warn => .warn, - .err => .err, - }; - } - - /// Convert to std.log.Level - pub fn toStd(self: Level) std.log.Level { - return switch (self) { - .debug => .debug, - .info => .info, - .warn => .warn, - .err => .err, - }; - } -}; - -test "Level.asText" { - const testing = std.testing; - try testing.expectEqualStrings("DEBUG", Level.debug.asText()); - try testing.expectEqualStrings("INFO", Level.info.asText()); - try testing.expectEqualStrings("WARN", Level.warn.asText()); - try testing.expectEqualStrings("ERROR", Level.err.asText()); -} - -test "Level.parse" { - const testing = std.testing; - - // Valid values (case-insensitive) - try testing.expectEqual(Level.debug, Level.parse("debug").?); - try testing.expectEqual(Level.debug, Level.parse("DEBUG").?); - try testing.expectEqual(Level.debug, Level.parse("Debug").?); - try testing.expectEqual(Level.info, Level.parse("info").?); - try testing.expectEqual(Level.warn, Level.parse("warn").?); - try testing.expectEqual(Level.warn, Level.parse("warning").?); - try testing.expectEqual(Level.err, Level.parse("error").?); - try testing.expectEqual(Level.err, Level.parse("err").?); - - // Invalid values - try testing.expect(Level.parse("invalid") == null); - try testing.expect(Level.parse("") == null); - try testing.expect(Level.parse("trace") == null); -} diff --git a/src/observability/root.zig b/src/observability/root.zig deleted file mode 100644 index 9a59d09..0000000 --- a/src/observability/root.zig +++ /dev/null @@ -1,16 +0,0 @@ -const std = @import("std"); - -const event_bus = @import("event_bus.zig"); - -pub const EventBus = event_bus.EventBus; -pub const StdioEventBus = event_bus.StdioEventBus; -pub const NoopEventBus = event_bus.NoopEventBus; -pub const SpanGuard = event_bus.SpanGuard; -pub const Level = @import("level.zig").Level; -pub const Span = @import("span.zig").Span; -pub const formatters = @import("formatters.zig"); -pub const StdLogAdapter = @import("std_log_adapter.zig").StdLogAdapter; - -test { - std.testing.refAllDecls(@This()); -} diff --git a/src/observability/span.zig b/src/observability/span.zig deleted file mode 100644 index c7f2028..0000000 --- a/src/observability/span.zig +++ /dev/null @@ -1,134 +0,0 @@ -const std = @import("std"); -const Level = @import("level.zig").Level; - -/// 8-byte span identifier, displayed as 16 hex characters (e.g., "00f067aa0ba902b7") -pub const SpanId = [8]u8; - -/// Generate a random span ID -pub fn generateSpanId() SpanId { - var id: SpanId = undefined; - std.crypto.random.bytes(&id); - return id; -} - -/// Format a span ID as a 16-character hex string -pub fn formatSpanId(id: SpanId, buf: *[16]u8) []const u8 { - const hex_chars = "0123456789abcdef"; - for (id, 0..) |byte, i| { - buf[i * 2] = hex_chars[byte >> 4]; - buf[i * 2 + 1] = hex_chars[byte & 0x0f]; - } - return buf[0..16]; -} - -/// A span represents a timed operation with a start and end. -/// Created via EventBus.started() and completed via span.completed(). -pub const Span = struct { - id: SpanId, - name: []const u8, - level: Level, - start_time: i64, - parent: ?*const Span, - - /// Generate a random span ID - pub fn generateSpanId() SpanId { - var id: SpanId = undefined; - std.crypto.random.bytes(&id); - return id; - } - - /// Format a span ID as a 16-character hex string - pub fn formatSpanId(id: SpanId, buf: *[16]u8) []const u8 { - const hex_chars = "0123456789abcdef"; - for (id, 0..) |byte, i| { - buf[i * 2] = hex_chars[byte >> 4]; - buf[i * 2 + 1] = hex_chars[byte & 0x0f]; - } - return buf[0..16]; - } - - /// Get elapsed time in nanoseconds since span started - pub fn elapsedNs(self: *const Span) i64 { - const now = std.time.microTimestamp(); - return (now - self.start_time) * 1000; - } - - /// Get elapsed time in milliseconds since span started - pub fn elapsedMs(self: *const Span) i64 { - return @divFloor(self.elapsedNs(), std.time.ns_per_ms); - } - - /// Format elapsed time as a human-readable string - pub fn formatElapsed(self: *const Span, buf: []u8) []const u8 { - const elapsed_ns = self.elapsedNs(); - - if (elapsed_ns < std.time.ns_per_ms) { - // Microseconds - const us = @divFloor(elapsed_ns, std.time.ns_per_us); - return std.fmt.bufPrint(buf, "{d}µs", .{us}) catch "?"; - } else if (elapsed_ns < std.time.ns_per_s) { - // Milliseconds - const ms = @divFloor(elapsed_ns, std.time.ns_per_ms); - return std.fmt.bufPrint(buf, "{d}ms", .{ms}) catch "?"; - } else { - // Seconds with one decimal - const ms = @divFloor(elapsed_ns, std.time.ns_per_ms); - const secs = @divFloor(ms, 1000); - const frac = @divFloor(@mod(ms, 1000), 100); - return std.fmt.bufPrint(buf, "{d}.{d}s", .{ secs, frac }) catch "?"; - } - } -}; - -test "Span.elapsedMs" { - const span = Span{ - .id = .{ 0, 1, 2, 3, 4, 5, 6, 7 }, - .name = "test", - .level = .info, - .start_time = std.time.microTimestamp() - 100_000, // 100ms ago - .parent = null, - }; - - const elapsed = span.elapsedMs(); - try std.testing.expect(elapsed >= 99 and elapsed <= 110); -} - -test "Span.formatElapsed" { - var buf: [32]u8 = undefined; - - // Test microseconds - const span_us = Span{ - .id = .{ 0, 1, 2, 3, 4, 5, 6, 7 }, - .name = "test", - .level = .info, - .start_time = std.time.microTimestamp() - 500, // 500µs ago - .parent = null, - }; - const us_str = span_us.formatElapsed(&buf); - try std.testing.expect(std.mem.endsWith(u8, us_str, "µs")); - - // Test milliseconds - const span_ms = Span{ - .id = .{ 0, 1, 2, 3, 4, 5, 6, 7 }, - .name = "test", - .level = .info, - .start_time = std.time.microTimestamp() - 50_000, // 50ms ago - .parent = null, - }; - const ms_str = span_ms.formatElapsed(&buf); - try std.testing.expect(std.mem.endsWith(u8, ms_str, "ms")); -} - -test "Span.formatSpanId" { - var buf: [16]u8 = undefined; - const id: SpanId = .{ 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7 }; - const formatted = Span.formatSpanId(id, &buf); - try std.testing.expectEqualStrings("00f067aa0ba902b7", formatted); -} - -test "Span.generateSpanId" { - const id1 = Span.generateSpanId(); - const id2 = Span.generateSpanId(); - // IDs should be different (with extremely high probability) - try std.testing.expect(!std.mem.eql(u8, &id1, &id2)); -} diff --git a/src/observability/std_log_adapter.zig b/src/observability/std_log_adapter.zig deleted file mode 100644 index a741c70..0000000 --- a/src/observability/std_log_adapter.zig +++ /dev/null @@ -1,219 +0,0 @@ -//! Std.log Adapter for EventBus -//! -//! Provides integration between Zig's std.log and the EventBus observability system. -//! This allows legacy code using std.log to emit structured events through the EventBus. -//! -//! Usage: -//! ```zig -//! const std = @import("std"); -//! const o11y = @import("observability"); -//! -//! // Override std.log options to use the adapter -//! pub const std_options: std.Options = .{ -//! .logFn = o11y.StdLogAdapter.logFn, -//! }; -//! -//! // Initialize the adapter with your EventBus (call once at startup) -//! var stdio_bus: o11y.StdioEventBus = undefined; -//! stdio_bus.init(); -//! o11y.StdLogAdapter.init(stdio_bus.eventBus()); -//! -//! // Now std.log calls go through EventBus -//! const log = std.log.scoped(.my_module); -//! log.info("User logged in", .{}); -//! ``` - -const std = @import("std"); -const EventBus = @import("event_bus.zig").EventBus; -const Level = @import("level.zig").Level; - -/// Global EventBus pointer for std.log adapter. -/// Must be initialized before any std.log calls. -var global_bus: ?*EventBus = null; - -/// Std.log adapter that routes log messages through the EventBus. -/// -/// IMPORTANT: Call init() with your EventBus before any std.log calls. -pub const StdLogAdapter = struct { - /// Initialize the adapter with an EventBus. - /// This must be called before any std.log calls are made. - pub fn init(bus: *EventBus) void { - global_bus = bus; - } - - /// Reset the adapter (mainly for testing). - pub fn deinit() void { - global_bus = null; - } - - /// Log function compatible with std.Options.logFn - pub fn logFn( - comptime level: std.log.Level, - comptime scope: @Type(.enum_literal), - comptime format: []const u8, - args: anytype, - ) void { - const bus = global_bus orelse { - // Fallback to default log if not initialized - std.log.defaultLog(level, scope, format, args); - return; - }; - - const event_level = mapLevel(level); - - // Check level filter - if (@intFromEnum(event_level) < @intFromEnum(bus.min_level)) { - return; - } - - // Select writer based on level - const writer = if (event_level == .err) bus.err_writer else bus.writer; - - // Timestamp (ISO 8601 with milliseconds) - const millis = std.time.milliTimestamp(); - const secs: u64 = @intCast(@divFloor(millis, 1000)); - const ms: u64 = @intCast(@mod(millis, 1000)); - const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = secs }; - const epoch_day = epoch_seconds.getEpochDay(); - const year_day = epoch_day.calculateYearDay(); - const month_day = year_day.calculateMonthDay(); - const day_seconds = epoch_seconds.getDaySeconds(); - - // Format: 2025-12-03T14:30:45.123Z [INFO] scope: message - writer.print("{d}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}.{d:0>3}Z [{s}] {s}: ", .{ - year_day.year, - month_day.month.numeric(), - month_day.day_index + 1, - day_seconds.getHoursIntoDay(), - day_seconds.getMinutesIntoHour(), - day_seconds.getSecondsIntoMinute(), - ms, - event_level.asText(), - @tagName(scope), - }) catch return; - - // Print the formatted message - writer.print(format, args) catch return; - - // Newline and flush - writer.writeAll("\n") catch return; - writer.flush() catch return; - } - - /// Map std.log.Level to our Level - fn mapLevel(level: std.log.Level) Level { - return switch (level) { - .debug => .debug, - .info => .info, - .warn => .warn, - .err => .err, - }; - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; - -/// Test writer that captures output -const TestWriter = struct { - io_writer: std.Io.Writer.Allocating, - - fn init(allocator: std.mem.Allocator) TestWriter { - return .{ - .io_writer = std.Io.Writer.Allocating.init(allocator), - }; - } - - fn deinit(self: *TestWriter) void { - self.io_writer.deinit(); - } - - fn writer(self: *TestWriter) *std.Io.Writer { - return &self.io_writer.writer; - } - - fn getOutput(self: *TestWriter) []const u8 { - return self.io_writer.written(); - } - - fn reset(self: *TestWriter) void { - self.io_writer.clearRetainingCapacity(); - } -}; - -test "StdLogAdapter: formats log message correctly" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.debug); - - StdLogAdapter.init(&bus); - defer StdLogAdapter.deinit(); - - // Call logFn directly since we can't override std_options in tests - StdLogAdapter.logFn(.info, .test_scope, "hello {s}", .{"world"}); - - const output = tw.getOutput(); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "[INFO]")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "test_scope:")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "hello world")); -} - -test "StdLogAdapter: respects level filtering" { - var tw = TestWriter.init(testing.allocator); - defer tw.deinit(); - - var bus = EventBus.init(tw.writer()); - bus.setLevel(.warn); // Only warn and above - - StdLogAdapter.init(&bus); - defer StdLogAdapter.deinit(); - - StdLogAdapter.logFn(.debug, .test_scope, "debug message", .{}); - StdLogAdapter.logFn(.info, .test_scope, "info message", .{}); - StdLogAdapter.logFn(.warn, .test_scope, "warn message", .{}); - StdLogAdapter.logFn(.err, .test_scope, "error message", .{}); - - const output = tw.getOutput(); - try testing.expect(!std.mem.containsAtLeast(u8, output, 1, "debug message")); - try testing.expect(!std.mem.containsAtLeast(u8, output, 1, "info message")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "warn message")); - try testing.expect(std.mem.containsAtLeast(u8, output, 1, "error message")); -} - -test "StdLogAdapter: maps levels correctly" { - try testing.expectEqual(Level.debug, StdLogAdapter.mapLevel(.debug)); - try testing.expectEqual(Level.info, StdLogAdapter.mapLevel(.info)); - try testing.expectEqual(Level.warn, StdLogAdapter.mapLevel(.warn)); - try testing.expectEqual(Level.err, StdLogAdapter.mapLevel(.err)); -} - -test "StdLogAdapter: routes errors to err_writer" { - var stdout_tw = TestWriter.init(testing.allocator); - defer stdout_tw.deinit(); - - var stderr_tw = TestWriter.init(testing.allocator); - defer stderr_tw.deinit(); - - var bus = EventBus.initDual(stdout_tw.writer(), stderr_tw.writer()); - bus.setLevel(.debug); - - StdLogAdapter.init(&bus); - defer StdLogAdapter.deinit(); - - StdLogAdapter.logFn(.info, .test_scope, "info goes to stdout", .{}); - StdLogAdapter.logFn(.err, .test_scope, "error goes to stderr", .{}); - - const stdout_output = stdout_tw.getOutput(); - const stderr_output = stderr_tw.getOutput(); - - try testing.expect(std.mem.containsAtLeast(u8, stdout_output, 1, "info goes to stdout")); - try testing.expect(!std.mem.containsAtLeast(u8, stdout_output, 1, "error goes to stderr")); - - try testing.expect(!std.mem.containsAtLeast(u8, stderr_output, 1, "info goes to stdout")); - try testing.expect(std.mem.containsAtLeast(u8, stderr_output, 1, "error goes to stderr")); -} diff --git a/src/otlp_main.zig b/src/otlp_main.zig index 9f14308..9b04b07 100644 --- a/src/otlp_main.zig +++ b/src/otlp_main.zig @@ -25,7 +25,7 @@ const policy = edge.policy; const ProxyConfig = config_types.ProxyConfig; -const o11y = @import("observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const StdLogAdapter = o11y.StdLogAdapter; const Level = o11y.Level; diff --git a/src/policy/loader.zig b/src/policy/loader.zig deleted file mode 100644 index 661da8c..0000000 --- a/src/policy/loader.zig +++ /dev/null @@ -1,370 +0,0 @@ -//! Async Policy Loader -//! -//! Provides shared logic for loading policy providers asynchronously, -//! allowing the server to start responding to requests immediately -//! while policies are loaded in the background. -//! -//! ## Usage -//! -//! ```zig -//! var loader = try PolicyLoader.init(allocator, bus, ®istry, config.policy_providers); -//! defer loader.deinit(); -//! -//! // Start loading providers asynchronously (non-blocking) -//! try loader.startAsync(); -//! -//! // Server can now start handling requests... -//! -//! // Optionally wait for initial load to complete -//! loader.waitForInitialLoad(); -//! ``` - -const std = @import("std"); -const policy = @import("./root.zig"); - -const Registry = policy.Registry; -const Provider = policy.Provider; -const FileProvider = policy.FileProvider; -const HttpProvider = policy.HttpProvider; -const PolicyCallback = policy.PolicyCallback; -const PolicyUpdate = policy.PolicyUpdate; -const SourceType = policy.SourceType; -const ProviderConfig = policy.ProviderConfig; -const ServiceMetadata = policy.ServiceMetadata; - -const o11y = @import("../observability/root.zig"); -const EventBus = o11y.EventBus; - -// ============================================================================= -// Observability Events -// ============================================================================= - -const PolicyLoaderStarting = struct { provider_count: usize }; -const PolicyLoaderReady = struct { loaded_count: usize, failed_count: usize }; -const ProviderLoadStarted = struct { provider_id: []const u8, provider_type: []const u8 }; -const ProviderLoadCompleted = struct { provider_id: []const u8, policy_count: usize }; -const ProviderLoadFailed = struct { provider_id: []const u8, err: []const u8 }; -const FileProviderConfigured = struct { path: []const u8 }; -const HttpProviderConfigured = struct { url: []const u8, poll_interval: u64 }; -const PolicyRegistryUpdated = struct { provider_id: []const u8, policy_count: usize }; - -// ============================================================================= -// Policy Callback Context -// ============================================================================= - -const CallbackContext = struct { - registry: *Registry, - bus: *EventBus, - source_type: SourceType, - - fn handleUpdate(context: *anyopaque, update: PolicyUpdate) !void { - const self: *CallbackContext = @ptrCast(@alignCast(context)); - try self.registry.updatePolicies(update.policies, update.provider_id, self.source_type); - self.bus.info(PolicyRegistryUpdated{ - .provider_id = update.provider_id, - .policy_count = update.policies.len, - }); - } -}; - -// ============================================================================= -// Provider State -// ============================================================================= - -const ProviderState = struct { - config: ProviderConfig, - provider: ?Provider = null, - callback_context: ?CallbackContext = null, - load_error: ?[]const u8 = null, - loaded: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), -}; - -// ============================================================================= -// Policy Loader -// ============================================================================= - -pub const PolicyLoader = struct { - allocator: std.mem.Allocator, - bus: *EventBus, - registry: *Registry, - service: ServiceMetadata, - - /// Provider states (one per configured provider) - provider_states: []ProviderState, - - /// Background loading thread - load_thread: ?std.Thread = null, - - /// Shutdown flag for clean termination - shutdown_flag: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), - - /// Signal when initial load is complete - initial_load_complete: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), - - /// Initialize the policy loader with provider configurations. - /// Does not start loading - call `startAsync()` or `loadSync()` to begin. - pub fn init( - allocator: std.mem.Allocator, - bus: *EventBus, - registry: *Registry, - provider_configs: []const ProviderConfig, - service: ServiceMetadata, - ) !*PolicyLoader { - const self = try allocator.create(PolicyLoader); - errdefer allocator.destroy(self); - - // Allocate provider states - const states = try allocator.alloc(ProviderState, provider_configs.len); - errdefer allocator.free(states); - - for (provider_configs, 0..) |config, i| { - states[i] = .{ .config = config }; - } - - self.* = .{ - .allocator = allocator, - .bus = bus, - .registry = registry, - .service = service, - .provider_states = states, - }; - - return self; - } - - /// Start loading providers asynchronously in a background thread. - /// Returns immediately, allowing the server to start handling requests. - pub fn startAsync(self: *PolicyLoader) !void { - self.bus.info(PolicyLoaderStarting{ .provider_count = self.provider_states.len }); - self.load_thread = try std.Thread.spawn(.{}, loadProvidersThread, .{self}); - } - - /// Load all providers synchronously (blocks until complete). - /// Use this if you need policies loaded before accepting requests. - pub fn loadSync(self: *PolicyLoader) void { - self.bus.info(PolicyLoaderStarting{ .provider_count = self.provider_states.len }); - self.loadAllProviders(); - } - - /// Wait for the initial load to complete. - /// Call this after `startAsync()` if you need to block until ready. - pub fn waitForInitialLoad(self: *PolicyLoader) void { - while (!self.initial_load_complete.load(.acquire)) { - std.Thread.sleep(10 * std.time.ns_per_ms); - } - } - - /// Check if initial load is complete (non-blocking). - pub fn isReady(self: *PolicyLoader) bool { - return self.initial_load_complete.load(.acquire); - } - - /// Get the number of successfully loaded providers. - pub fn getLoadedCount(self: *PolicyLoader) usize { - var count: usize = 0; - for (self.provider_states) |state| { - if (state.loaded.load(.acquire) and state.load_error == null) { - count += 1; - } - } - return count; - } - - /// Get the number of providers that failed to load. - pub fn getFailedCount(self: *PolicyLoader) usize { - var count: usize = 0; - for (self.provider_states) |state| { - if (state.load_error != null) { - count += 1; - } - } - return count; - } - - /// Shutdown all providers and clean up resources. - pub fn deinit(self: *PolicyLoader) void { - // Signal shutdown - self.shutdown_flag.store(true, .release); - - // Wait for load thread to finish - if (self.load_thread) |thread| { - thread.join(); - self.load_thread = null; - } - - // Deinit all providers - for (self.provider_states) |*state| { - if (state.provider) |provider| { - provider.deinit(); - } - if (state.load_error) |err| { - self.allocator.free(err); - } - } - - self.allocator.free(self.provider_states); - self.allocator.destroy(self); - } - - // ========================================================================= - // Private Implementation - // ========================================================================= - - fn loadProvidersThread(self: *PolicyLoader) void { - self.loadAllProviders(); - } - - fn loadAllProviders(self: *PolicyLoader) void { - var loaded_count: usize = 0; - var failed_count: usize = 0; - - for (self.provider_states) |*state| { - if (self.shutdown_flag.load(.acquire)) break; - - self.loadProvider(state) catch |err| { - const err_str = self.allocator.dupe(u8, @errorName(err)) catch "allocation_failed"; - state.load_error = err_str; - self.bus.err(ProviderLoadFailed{ - .provider_id = state.config.id, - .err = err_str, - }); - failed_count += 1; - continue; - }; - - loaded_count += 1; - } - - self.initial_load_complete.store(true, .release); - self.bus.info(PolicyLoaderReady{ - .loaded_count = loaded_count, - .failed_count = failed_count, - }); - } - - fn loadProvider(self: *PolicyLoader, state: *ProviderState) !void { - const config = state.config; - const provider_type_str = switch (config.type) { - .file => "file", - .http => "http", - }; - - self.bus.debug(ProviderLoadStarted{ - .provider_id = config.id, - .provider_type = provider_type_str, - }); - - switch (config.type) { - .file => { - const path = config.path orelse return error.FileProviderRequiresPath; - self.bus.info(FileProviderConfigured{ .path = path }); - - const file_provider = try FileProvider.init( - self.allocator, - self.bus, - config.id, - path, - ); - errdefer file_provider.deinit(); - - // Set up callback context - state.callback_context = .{ - .registry = self.registry, - .bus = self.bus, - .source_type = .file, - }; - - const callback = PolicyCallback{ - .context = @ptrCast(&state.callback_context.?), - .onUpdate = CallbackContext.handleUpdate, - }; - - try file_provider.subscribe(callback); - - // Store provider interface - state.provider = Provider.init(file_provider); - try self.registry.registerProvider(&state.provider.?); - state.loaded.store(true, .release); - - self.bus.debug(ProviderLoadCompleted{ - .provider_id = config.id, - .policy_count = self.registry.getPolicyCount(), - }); - }, - .http => { - const url = config.url orelse return error.HttpProviderRequiresUrl; - const poll_interval = config.poll_interval orelse 60; - self.bus.info(HttpProviderConfigured{ .url = url, .poll_interval = poll_interval }); - - const http_provider = try HttpProvider.init( - self.allocator, - self.bus, - config.id, - url, - poll_interval, - self.service, - config.headers, - ); - errdefer http_provider.deinit(); - - // Set up callback context - state.callback_context = .{ - .registry = self.registry, - .bus = self.bus, - .source_type = .http, - }; - - const callback = PolicyCallback{ - .context = @ptrCast(&state.callback_context.?), - .onUpdate = CallbackContext.handleUpdate, - }; - - try http_provider.subscribe(callback); - - // Store provider interface - state.provider = Provider.init(http_provider); - try self.registry.registerProvider(&state.provider.?); - state.loaded.store(true, .release); - - self.bus.debug(ProviderLoadCompleted{ - .provider_id = config.id, - .policy_count = self.registry.getPolicyCount(), - }); - }, - } - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -test "PolicyLoader: init and deinit" { - const allocator = std.testing.allocator; - - var stdio_bus: o11y.StdioEventBus = undefined; - stdio_bus.init(); - const bus = stdio_bus.eventBus(); - - var registry = Registry.init(allocator, bus); - defer registry.deinit(); - - const configs = [_]ProviderConfig{}; - - var loader = try PolicyLoader.init( - allocator, - bus, - ®istry, - &configs, - .{ - .namespace = "test", - .name = "test-service", - .instance_id = "test-instance", - .version = "1.0.0", - }, - ); - defer loader.deinit(); - - try std.testing.expect(!loader.isReady()); - try std.testing.expectEqual(@as(usize, 0), loader.getLoadedCount()); -} diff --git a/src/policy/log_transform.zig b/src/policy/log_transform.zig deleted file mode 100644 index 1dd3f66..0000000 --- a/src/policy/log_transform.zig +++ /dev/null @@ -1,688 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); -const types = @import("./types.zig"); - -const LogTransform = proto.policy.LogTransform; -const LogRemove = proto.policy.LogRemove; -const LogRedact = proto.policy.LogRedact; -const LogRename = proto.policy.LogRename; -const LogAdd = proto.policy.LogAdd; - -// Re-export types for convenience -pub const FieldRef = types.FieldRef; -pub const LogFieldAccessor = types.LogFieldAccessor; -pub const LogFieldMutator = types.LogFieldMutator; -pub const MutateOp = types.MutateOp; -pub const TransformResult = types.TransformResult; - -/// Apply all transforms from a LogTransform in order: remove → redact → rename → add -pub fn applyTransforms( - transform: *const LogTransform, - ctx: *anyopaque, - accessor: LogFieldAccessor, - mutator: LogFieldMutator, -) TransformResult { - var result = TransformResult{}; - - // 1. Remove - result.removes_attempted = transform.remove.items.len; - for (transform.remove.items) |*rule| { - if (applyRemove(rule, ctx, mutator)) { - result.removes_applied += 1; - } - } - - // 2. Redact - result.redacts_attempted = transform.redact.items.len; - for (transform.redact.items) |*rule| { - if (applyRedact(rule, ctx, accessor, mutator)) { - result.redacts_applied += 1; - } - } - - // 3. Rename - result.renames_attempted = transform.rename.items.len; - for (transform.rename.items) |*rule| { - if (applyRename(rule, ctx, accessor, mutator)) { - result.renames_applied += 1; - } - } - - // 4. Add - result.adds_attempted = transform.add.items.len; - for (transform.add.items) |*rule| { - if (applyAdd(rule, ctx, accessor, mutator)) { - result.adds_applied += 1; - } - } - - return result; -} - -/// Apply a single remove rule -/// Returns true if the field was removed -pub fn applyRemove( - rule: *const LogRemove, - ctx: *anyopaque, - mutator: LogFieldMutator, -) bool { - const field_ref = FieldRef.fromRemoveField(rule.field) orelse return false; - return mutator(ctx, .{ .remove = field_ref }); -} - -/// Apply a single redact rule -/// Replaces the field value with the replacement string -/// Returns true if the field was redacted -pub fn applyRedact( - rule: *const LogRedact, - ctx: *anyopaque, - accessor: LogFieldAccessor, - mutator: LogFieldMutator, -) bool { - const field_ref = FieldRef.fromRedactField(rule.field) orelse return false; - - // Only redact if the field exists - if (accessor(ctx, field_ref) == null) return false; - - // Replace the value with the replacement string - return mutator(ctx, .{ - .set = .{ - .field = field_ref, - .value = rule.replacement, - .upsert = false, // Must exist to redact - }, - }); -} - -/// Apply a single rename rule -/// Moves the value from one field to another -/// Returns true if the field was renamed -pub fn applyRename( - rule: *const LogRename, - ctx: *anyopaque, - accessor: LogFieldAccessor, - mutator: LogFieldMutator, -) bool { - const from_ref = FieldRef.fromRenameFrom(rule.from) orelse return false; - - // Check the source value exists - if (accessor(ctx, from_ref) == null) { - // Source doesn't exist - nothing to rename - return false; - } - - // Perform the rename operation - return mutator(ctx, .{ - .rename = .{ - .from = from_ref, - .to = rule.to, - .upsert = rule.upsert, - }, - }); -} - -/// Apply a single add rule -/// Inserts a field with the given value -/// If upsert is false, only adds if field doesn't exist -/// Returns true if the field was added/updated -pub fn applyAdd( - rule: *const LogAdd, - ctx: *anyopaque, - accessor: LogFieldAccessor, - mutator: LogFieldMutator, -) bool { - const field_ref = FieldRef.fromAddField(rule.field) orelse return false; - - // Check if field already exists - const exists = accessor(ctx, field_ref) != null; - - // If not upsert and field exists, don't overwrite - if (!rule.upsert and exists) return false; - - return mutator(ctx, .{ - .set = .{ - .field = field_ref, - .value = rule.value, - .upsert = rule.upsert, - }, - }); -} - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; - -/// Test context that holds a simple key-value store -const TestContext = struct { - fields: std.StringHashMap([]const u8), - allocator: std.mem.Allocator, - - fn init(allocator: std.mem.Allocator) TestContext { - return .{ - .fields = std.StringHashMap([]const u8).init(allocator), - .allocator = allocator, - }; - } - - fn deinit(self: *TestContext) void { - var it = self.fields.iterator(); - while (it.next()) |entry| { - self.allocator.free(entry.key_ptr.*); - self.allocator.free(entry.value_ptr.*); - } - self.fields.deinit(); - } - - fn set(self: *TestContext, key: []const u8, value: []const u8) !void { - const value_copy = try self.allocator.dupe(u8, value); - errdefer self.allocator.free(value_copy); - - const gop = try self.fields.getOrPut(key); - if (gop.found_existing) { - // Key already exists - just update the value - self.allocator.free(gop.value_ptr.*); - gop.value_ptr.* = value_copy; - } else { - // New key - need to dupe it - gop.key_ptr.* = try self.allocator.dupe(u8, key); - gop.value_ptr.* = value_copy; - } - } - - /// Get first path segment for flat test storage - fn getFirstPathSegment(path: []const []const u8) ?[]const u8 { - if (path.len == 0) return null; - return path[0]; - } - - fn fieldAccessor(ctx: *const anyopaque, field: FieldRef) ?[]const u8 { - const self: *const TestContext = @ptrCast(@alignCast(ctx)); - const key: ?[]const u8 = switch (field) { - .log_field => |f| @tagName(f), - .log_attribute => |p| getFirstPathSegment(p.path.items), - .resource_attribute => |p| getFirstPathSegment(p.path.items), - .scope_attribute => |p| getFirstPathSegment(p.path.items), - }; - return if (key) |k| self.fields.get(k) else null; - } - - fn fieldMutator(ctx: *anyopaque, op: MutateOp) bool { - const self: *TestContext = @ptrCast(@alignCast(ctx)); - switch (op) { - .remove => |field| { - const key: ?[]const u8 = switch (field) { - .log_field => |f| @tagName(f), - .log_attribute => |p| getFirstPathSegment(p.path.items), - .resource_attribute => |p| getFirstPathSegment(p.path.items), - .scope_attribute => |p| getFirstPathSegment(p.path.items), - }; - const k = key orelse return false; - if (self.fields.fetchRemove(k)) |removed| { - self.allocator.free(removed.key); - self.allocator.free(removed.value); - return true; - } - return false; - }, - .set => |s| { - const key: ?[]const u8 = switch (s.field) { - .log_field => |f| @tagName(f), - .log_attribute => |p| getFirstPathSegment(p.path.items), - .resource_attribute => |p| getFirstPathSegment(p.path.items), - .scope_attribute => |p| getFirstPathSegment(p.path.items), - }; - const k = key orelse return false; - const exists = self.fields.contains(k); - if (!s.upsert and !exists) return false; - - self.set(k, s.value) catch return false; - return true; - }, - .rename => |r| { - const from_key: ?[]const u8 = switch (r.from) { - .log_field => |f| @tagName(f), - .log_attribute => |p| getFirstPathSegment(p.path.items), - .resource_attribute => |p| getFirstPathSegment(p.path.items), - .scope_attribute => |p| getFirstPathSegment(p.path.items), - }; - const fk = from_key orelse return false; - - // Get and remove the source value - const removed = self.fields.fetchRemove(fk) orelse return false; - defer self.allocator.free(removed.key); - - // Check if target exists - const target_exists = self.fields.contains(r.to); - if (!r.upsert and target_exists) { - // Put the value back since we can't rename - self.fields.put(removed.key, removed.value) catch {}; - return false; - } - - // Set the new field - self.set(r.to, removed.value) catch { - // Put the value back on failure - self.fields.put(removed.key, removed.value) catch {}; - return false; - }; - self.allocator.free(removed.value); - return true; - }, - } - } -}; - -/// Helper to create AttributePath for tests -fn testAttrPath(comptime key: []const u8) proto.policy.AttributePath { - return .{ .path = .{ .items = @constCast(&[_][]const u8{key}), .capacity = 1 } }; -} - -test "applyRemove: removes existing field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("service", "payment-api"); - - var rule = LogRemove{ - .field = .{ .log_attribute = testAttrPath("service") }, - }; - - const result = applyRemove(&rule, @ptrCast(&ctx), TestContext.fieldMutator); - try testing.expect(result); - try testing.expect(ctx.fields.get("service") == null); -} - -test "applyRemove: returns false for non-existent field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - var rule = LogRemove{ - .field = .{ .log_attribute = testAttrPath("nonexistent") }, - }; - - const result = applyRemove(&rule, @ptrCast(&ctx), TestContext.fieldMutator); - try testing.expect(!result); -} - -test "TestContext: set and update" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("key", "value1"); - try testing.expectEqualStrings("value1", ctx.fields.get("key").?); - - try ctx.set("key", "value2"); - try testing.expectEqualStrings("value2", ctx.fields.get("key").?); -} - -test "applyRedact: replaces field value" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("password", "secret123"); - try testing.expectEqualStrings("secret123", ctx.fields.get("password").?); - - var rule = LogRedact{ - .field = .{ .log_attribute = testAttrPath("password") }, - .replacement = "[REDACTED]", - }; - - const result = applyRedact(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(result); - try testing.expectEqualStrings("[REDACTED]", ctx.fields.get("password").?); -} - -test "applyRedact: returns false for non-existent field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - var rule = LogRedact{ - .field = .{ .log_attribute = testAttrPath("nonexistent") }, - .replacement = "[REDACTED]", - }; - - const result = applyRedact(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(!result); -} - -test "applyRename: renames existing field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("old_name", "value123"); - - var rule = LogRename{ - .from = .{ .from_log_attribute = testAttrPath("old_name") }, - .to = "new_name", - .upsert = true, - }; - - const result = applyRename(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(result); - try testing.expect(ctx.fields.get("old_name") == null); - try testing.expectEqualStrings("value123", ctx.fields.get("new_name").?); -} - -test "applyRename: returns false for non-existent source" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - var rule = LogRename{ - .from = .{ .from_log_attribute = testAttrPath("nonexistent") }, - .to = "new_name", - .upsert = true, - }; - - const result = applyRename(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(!result); -} - -test "applyAdd: adds new field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - var rule = LogAdd{ - .field = .{ .log_attribute = testAttrPath("new_field") }, - .value = "new_value", - .upsert = true, - }; - - const result = applyAdd(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(result); - try testing.expectEqualStrings("new_value", ctx.fields.get("new_field").?); -} - -test "applyAdd: upsert=false skips existing field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("existing", "original"); - - var rule = LogAdd{ - .field = .{ .log_attribute = testAttrPath("existing") }, - .value = "new_value", - .upsert = false, - }; - - const result = applyAdd(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(!result); - try testing.expectEqualStrings("original", ctx.fields.get("existing").?); -} - -test "applyAdd: upsert=true overwrites existing field" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("existing", "original"); - - var rule = LogAdd{ - .field = .{ .log_attribute = testAttrPath("existing") }, - .value = "new_value", - .upsert = true, - }; - - const result = applyAdd(&rule, @ptrCast(&ctx), TestContext.fieldAccessor, TestContext.fieldMutator); - try testing.expect(result); - try testing.expectEqualStrings("new_value", ctx.fields.get("existing").?); -} - -test "applyTransforms: applies in correct order" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - // Setup initial state - try ctx.set("to_remove", "value1"); - try ctx.set("to_redact", "sensitive"); - try ctx.set("to_rename", "rename_me"); - - // Build transform with all operation types - var transform = LogTransform{}; - - // Remove - try transform.remove.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("to_remove") }, - }); - - // Redact - try transform.redact.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("to_redact") }, - .replacement = "[HIDDEN]", - }); - - // Rename - try transform.rename.append(allocator, .{ - .from = .{ .from_log_attribute = testAttrPath("to_rename") }, - .to = "renamed", - .upsert = true, - }); - - // Add - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("added") }, - .value = "new_value", - .upsert = true, - }); - - defer transform.remove.deinit(allocator); - defer transform.redact.deinit(allocator); - defer transform.rename.deinit(allocator); - defer transform.add.deinit(allocator); - - const result = applyTransforms( - &transform, - @ptrCast(&ctx), - TestContext.fieldAccessor, - TestContext.fieldMutator, - ); - - // Verify attempted counts - try testing.expectEqual(@as(usize, 1), result.removes_attempted); - try testing.expectEqual(@as(usize, 1), result.redacts_attempted); - try testing.expectEqual(@as(usize, 1), result.renames_attempted); - try testing.expectEqual(@as(usize, 1), result.adds_attempted); - try testing.expectEqual(@as(usize, 4), result.totalAttempted()); - - // Verify applied counts - try testing.expectEqual(@as(usize, 1), result.removes_applied); - try testing.expectEqual(@as(usize, 1), result.redacts_applied); - try testing.expectEqual(@as(usize, 1), result.renames_applied); - try testing.expectEqual(@as(usize, 1), result.adds_applied); - try testing.expectEqual(@as(usize, 4), result.totalApplied()); - - // Verify final state - try testing.expect(ctx.fields.get("to_remove") == null); - try testing.expectEqualStrings("[HIDDEN]", ctx.fields.get("to_redact").?); - try testing.expect(ctx.fields.get("to_rename") == null); - try testing.expectEqualStrings("rename_me", ctx.fields.get("renamed").?); - try testing.expectEqualStrings("new_value", ctx.fields.get("added").?); -} - -test "applyTransforms: counts attempted vs applied when some operations fail" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - // Setup: only some fields exist - try ctx.set("exists1", "value1"); - try ctx.set("exists2", "value2"); - try ctx.set("existing_field", "original"); - - var transform = LogTransform{}; - - // 3 removes: 2 exist, 1 doesn't - try transform.remove.append(allocator, .{ .field = .{ .log_attribute = testAttrPath("exists1") } }); - try transform.remove.append(allocator, .{ .field = .{ .log_attribute = testAttrPath("missing1") } }); - try transform.remove.append(allocator, .{ .field = .{ .log_attribute = testAttrPath("exists2") } }); - - // 2 redacts: 1 exists, 1 doesn't - try transform.redact.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("missing2") }, - .replacement = "[REDACTED]", - }); - try transform.redact.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("existing_field") }, - .replacement = "[REDACTED]", - }); - - // 2 renames: 1 source exists, 1 doesn't - try transform.rename.append(allocator, .{ - .from = .{ .from_log_attribute = testAttrPath("existing_field") }, - .to = "renamed_field", - .upsert = true, - }); - try transform.rename.append(allocator, .{ - .from = .{ .from_log_attribute = testAttrPath("missing3") }, - .to = "wont_exist", - .upsert = true, - }); - - // 3 adds: 2 with upsert=true succeed, 1 with upsert=false on existing field fails - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("new1") }, - .value = "added1", - .upsert = true, - }); - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("renamed_field") }, // Will exist after rename - .value = "should_not_overwrite", - .upsert = false, // Won't overwrite existing - }); - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("new2") }, - .value = "added2", - .upsert = true, - }); - - defer transform.remove.deinit(allocator); - defer transform.redact.deinit(allocator); - defer transform.rename.deinit(allocator); - defer transform.add.deinit(allocator); - - const result = applyTransforms( - &transform, - @ptrCast(&ctx), - TestContext.fieldAccessor, - TestContext.fieldMutator, - ); - - // Verify attempted counts (total rules defined) - try testing.expectEqual(@as(usize, 3), result.removes_attempted); - try testing.expectEqual(@as(usize, 2), result.redacts_attempted); - try testing.expectEqual(@as(usize, 2), result.renames_attempted); - try testing.expectEqual(@as(usize, 3), result.adds_attempted); - - // Verify applied counts (only successful operations) - try testing.expectEqual(@as(usize, 2), result.removes_applied); // exists1, exists2 - try testing.expectEqual(@as(usize, 1), result.redacts_applied); // existing_field - try testing.expectEqual(@as(usize, 1), result.renames_applied); // existing_field -> renamed_field - try testing.expectEqual(@as(usize, 2), result.adds_applied); // new1, new2 (not renamed_field due to upsert=false) - - // Verify misses can be computed - try testing.expectEqual(@as(usize, 1), result.removes_attempted - result.removes_applied); - try testing.expectEqual(@as(usize, 1), result.redacts_attempted - result.redacts_applied); - try testing.expectEqual(@as(usize, 1), result.renames_attempted - result.renames_applied); - try testing.expectEqual(@as(usize, 1), result.adds_attempted - result.adds_applied); -} - -test "applyTransforms: empty transform returns zero counts" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - try ctx.set("field", "value"); - - const transform = LogTransform{}; - - const result = applyTransforms( - &transform, - @ptrCast(&ctx), - TestContext.fieldAccessor, - TestContext.fieldMutator, - ); - - try testing.expectEqual(@as(usize, 0), result.removes_attempted); - try testing.expectEqual(@as(usize, 0), result.redacts_attempted); - try testing.expectEqual(@as(usize, 0), result.renames_attempted); - try testing.expectEqual(@as(usize, 0), result.adds_attempted); - try testing.expectEqual(@as(usize, 0), result.totalAttempted()); - try testing.expectEqual(@as(usize, 0), result.totalApplied()); - - // Field should be unchanged - try testing.expectEqualStrings("value", ctx.fields.get("field").?); -} - -test "applyTransforms: all operations fail returns zero applied" { - const allocator = testing.allocator; - var ctx = TestContext.init(allocator); - defer ctx.deinit(); - - // Don't set any fields - all operations will fail - - var transform = LogTransform{}; - - // Remove non-existent field - try transform.remove.append(allocator, .{ .field = .{ .log_attribute = testAttrPath("missing") } }); - - // Redact non-existent field - try transform.redact.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("missing") }, - .replacement = "[REDACTED]", - }); - - // Rename non-existent field - try transform.rename.append(allocator, .{ - .from = .{ .from_log_attribute = testAttrPath("missing") }, - .to = "new_name", - .upsert = true, - }); - - // Add with upsert=false to non-existent field (this actually succeeds - it's an insert) - // So let's test add with upsert=false when field exists - try ctx.set("blocker", "blocks_add"); - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = testAttrPath("blocker") }, - .value = "wont_work", - .upsert = false, - }); - - defer transform.remove.deinit(allocator); - defer transform.redact.deinit(allocator); - defer transform.rename.deinit(allocator); - defer transform.add.deinit(allocator); - - const result = applyTransforms( - &transform, - &ctx, - TestContext.fieldAccessor, - TestContext.fieldMutator, - ); - - // All attempted - try testing.expectEqual(@as(usize, 1), result.removes_attempted); - try testing.expectEqual(@as(usize, 1), result.redacts_attempted); - try testing.expectEqual(@as(usize, 1), result.renames_attempted); - try testing.expectEqual(@as(usize, 1), result.adds_attempted); - - // None applied - try testing.expectEqual(@as(usize, 0), result.removes_applied); - try testing.expectEqual(@as(usize, 0), result.redacts_applied); - try testing.expectEqual(@as(usize, 0), result.renames_applied); - try testing.expectEqual(@as(usize, 0), result.adds_applied); - try testing.expectEqual(@as(usize, 0), result.totalApplied()); - - // Blocker field unchanged - try testing.expectEqualStrings("blocks_add", ctx.fields.get("blocker").?); -} diff --git a/src/policy/matcher_index.zig b/src/policy/matcher_index.zig deleted file mode 100644 index 9e73e31..0000000 --- a/src/policy/matcher_index.zig +++ /dev/null @@ -1,2131 +0,0 @@ -//! Matcher Index - Inverted index for efficient policy matching -//! -//! This module compiles policies into Hyperscan databases indexed by MatcherKey. -//! At evaluation time, we scan each field value against its corresponding database -//! and aggregate matches to determine which policies fully match. -//! -//! ## Architecture -//! -//! 1. **LogMatcherIndex / MetricMatcherIndex**: Type-specific indices for each telemetry type -//! 2. **MatcherDatabase**: Compiled Hyperscan DBs for one MatcherKey (positive + negated) -//! 3. **IndexBuilder(T)**: Generic builder for constructing type-specific indices -//! -//! ## Performance Optimizations -//! -//! - **Compile-time dispatch**: No runtime telemetry type filtering -//! - **Numeric policy indices**: O(1) array lookups instead of string hash lookups -//! - **Separate positive/negated databases**: Clean separation, no per-pattern negate flag -//! - **Dense policy array**: Cache-friendly iteration over matched policies - -const std = @import("std"); -const proto = @import("proto"); -const hyperscan = @import("../hyperscan/hyperscan.zig"); -const policy_types = @import("./types.zig"); -const sampler_mod = @import("./sampler.zig"); -const rate_limiter_mod = @import("./rate_limiter.zig"); -const o11y = @import("../observability/root.zig"); -const EventBus = o11y.EventBus; -const NoopEventBus = o11y.NoopEventBus; - -const Sampler = sampler_mod.Sampler; -const RateLimiter = rate_limiter_mod.RateLimiter; - -const FieldRef = policy_types.FieldRef; -const MetricFieldRef = policy_types.MetricFieldRef; -const TraceFieldRef = policy_types.TraceFieldRef; -pub const TelemetryType = policy_types.TelemetryType; - -const Policy = proto.policy.Policy; -const LogMatcher = proto.policy.LogMatcher; -const MatchType = LogMatcher._match_case; -const LogTarget = proto.policy.LogTarget; -const LogField = proto.policy.LogField; -const MetricMatcher = proto.policy.MetricMatcher; -const MetricTarget = proto.policy.MetricTarget; -const MetricField = proto.policy.MetricField; -const TraceMatcher = proto.policy.TraceMatcher; -const TraceTarget = proto.policy.TraceTarget; -const TraceField = proto.policy.TraceField; -const AttributePath = proto.policy.AttributePath; -const LogSampleKey = proto.policy.LogSampleKey; - -// ============================================================================= -// Observability Events -// ============================================================================= - -const MatcherIndexBuildStarted = struct { policy_count: usize, telemetry_type: TelemetryType }; -const MatcherIndexBuildCompleted = struct { database_count: usize, matcher_key_count: usize, policy_count: usize }; -const ScanMatched = struct { pattern_count: usize, value_len: usize, value_preview: []const u8, is_negated: bool }; -const ScanMatchDetail = struct { pattern_id: u32, policy_index: PolicyIndex }; -const ScanError = struct { err: []const u8 }; -const ProcessingPolicy = struct { id: []const u8, name: []const u8, enabled: bool, index: PolicyIndex, telemetry_type: TelemetryType }; -const SkippingPolicyWrongType = struct { id: []const u8 }; -const PolicyMatcherCount = struct { id: []const u8, matcher_count: usize }; -const MatcherNullField = struct { matcher_idx: usize }; -const MatcherNullMatch = struct { matcher_idx: usize }; -const MatcherEmptyRegex = struct { matcher_idx: usize }; -const MatcherDetail = struct { matcher_idx: usize, regex: []const u8, negate: bool }; -const PolicyStored = struct { id: []const u8, index: PolicyIndex, required_matches: u16, negated_count: u16 }; - -// ============================================================================= -// Policy Index - Numeric identifier for O(1) lookups -// ============================================================================= - -/// Numeric policy index for efficient array-based lookups at runtime. -pub const PolicyIndex = u16; - -/// Maximum number of policies supported -pub const MAX_POLICIES: usize = 8192; - -// ============================================================================= -// MatcherKey Types - Separate types for log and metric -// ============================================================================= - -/// Key for indexing Hyperscan databases for log policies. -pub const LogMatcherKey = struct { - field: FieldRef, - - const Self = @This(); - - pub fn hash(self: Self) u64 { - return hashFieldRef(FieldRef, self.field); - } - - pub fn eql(a: Self, b: Self) bool { - return eqlFieldRef(FieldRef, a.field, b.field); - } -}; - -/// Key for indexing Hyperscan databases for metric policies. -pub const MetricMatcherKey = struct { - field: MetricFieldRef, - - const Self = @This(); - - pub fn hash(self: Self) u64 { - return hashFieldRef(MetricFieldRef, self.field); - } - - pub fn eql(a: Self, b: Self) bool { - return eqlFieldRef(MetricFieldRef, a.field, b.field); - } -}; - -/// Generic hash implementation for field refs -fn hashFieldRef(comptime FieldRefT: type, field: FieldRefT) u64 { - var h = std.hash.Wyhash.init(0); - switch (field) { - inline else => |val, tag| { - h.update(std.mem.asBytes(&tag)); - const T = @TypeOf(val); - if (T == AttributePath) { - // Hash AttributePath: each path segment plus separator - for (val.path.items) |segment| { - h.update(segment); - h.update(&[_]u8{0}); // null separator between segments - } - } else if (T == []const []const u8) { - // Hash path array: each segment plus separator - for (val) |segment| { - h.update(segment); - h.update(&[_]u8{0}); // null separator between segments - } - } else if (T == []const u8) { - h.update(val); - } else { - h.update(std.mem.asBytes(&val)); - } - }, - } - return h.final(); -} - -/// Generic equality implementation for field refs -fn eqlFieldRef(comptime FieldRefT: type, a: FieldRefT, b: FieldRefT) bool { - const tag_a = std.meta.activeTag(a); - const tag_b = std.meta.activeTag(b); - if (tag_a != tag_b) return false; - - switch (a) { - inline else => |val_a, tag| { - const val_b = @field(b, @tagName(tag)); - const T = @TypeOf(val_a); - if (T == AttributePath) { - // Compare AttributePath: same length and all segments equal - const path_a = val_a.path.items; - const path_b = val_b.path.items; - if (path_a.len != path_b.len) return false; - for (path_a, path_b) |seg_a, seg_b| { - if (!std.mem.eql(u8, seg_a, seg_b)) return false; - } - return true; - } else if (T == []const []const u8) { - // Compare path arrays: same length and all segments equal - if (val_a.len != val_b.len) return false; - for (val_a, val_b) |seg_a, seg_b| { - if (!std.mem.eql(u8, seg_a, seg_b)) return false; - } - return true; - } else if (T == []const u8) { - return std.mem.eql(u8, val_a, val_b); - } else { - return val_a == val_b; - } - }, - } -} - -/// Hash context for LogMatcherKey in hash maps -pub const LogMatcherKeyContext = struct { - pub fn hash(_: LogMatcherKeyContext, key: LogMatcherKey) u64 { - return key.hash(); - } - pub fn eql(_: LogMatcherKeyContext, a: LogMatcherKey, b: LogMatcherKey) bool { - return a.eql(b); - } -}; - -/// Hash context for MetricMatcherKey in hash maps -pub const MetricMatcherKeyContext = struct { - pub fn hash(_: MetricMatcherKeyContext, key: MetricMatcherKey) u64 { - return key.hash(); - } - pub fn eql(_: MetricMatcherKeyContext, a: MetricMatcherKey, b: MetricMatcherKey) bool { - return a.eql(b); - } -}; - -/// Key for indexing Hyperscan databases for trace policies. -pub const TraceMatcherKey = struct { - field: TraceFieldRef, - - const Self = @This(); - - pub fn hash(self: Self) u64 { - return hashFieldRef(TraceFieldRef, self.field); - } - - pub fn eql(a: Self, b: Self) bool { - return eqlFieldRef(TraceFieldRef, a.field, b.field); - } -}; - -/// Hash context for TraceMatcherKey in hash maps -pub const TraceMatcherKeyContext = struct { - pub fn hash(_: TraceMatcherKeyContext, key: TraceMatcherKey) u64 { - return key.hash(); - } - pub fn eql(_: TraceMatcherKeyContext, a: TraceMatcherKey, b: TraceMatcherKey) bool { - return a.eql(b); - } -}; - -// ============================================================================= -// KeepValue - Parsed keep configuration -// ============================================================================= - -/// Parsed keep value from policy. -/// Priority order (most restrictive first): none > rate_limit > percentage > all -pub const KeepValue = union(enum) { - all, - none, - percentage: u8, - per_second: u32, - per_minute: u32, - - pub fn parse(s: []const u8) KeepValue { - if (s.len == 0 or std.mem.eql(u8, s, "all")) return .all; - if (std.mem.eql(u8, s, "none")) return .none; - - if (s.len >= 2 and s[s.len - 1] == '%') { - const pct = std.fmt.parseInt(u8, s[0 .. s.len - 1], 10) catch return .all; - if (pct > 100) return .all; - return .{ .percentage = pct }; - } - - if (s.len >= 3 and s[s.len - 2] == '/') { - const rate = std.fmt.parseInt(u32, s[0 .. s.len - 2], 10) catch return .all; - return switch (s[s.len - 1]) { - 's' => .{ .per_second = rate }, - 'm' => .{ .per_minute = rate }, - else => .all, - }; - } - return .all; - } - - pub fn isMoreRestrictiveThan(self: KeepValue, other: KeepValue) bool { - const self_rank = self.restrictiveness(); - const other_rank = other.restrictiveness(); - if (self_rank != other_rank) return self_rank < other_rank; - return switch (self) { - .percentage => |p| switch (other) { - .percentage => |op| p < op, - else => false, - }, - else => false, - }; - } - - fn restrictiveness(self: KeepValue) u8 { - // Lower rank = more restrictive - // none: drop everything (most restrictive) - // rate limit: keep up to N per time unit - // percentage: keep N% of data - // all: keep everything (least restrictive) - return switch (self) { - .none => 0, - .per_second, .per_minute => 1, - .percentage => 2, - .all => 3, - }; - } -}; - -// ============================================================================= -// PolicyInfo - Policy metadata for match aggregation -// ============================================================================= - -/// Policy information needed for match aggregation and action determination. -/// No telemetry_type field - that's implicit in the index type. -pub const PolicyInfo = struct { - id: []const u8, - index: PolicyIndex, - required_match_count: u16, - negated_count: u16, - keep: KeepValue, - enabled: bool, - /// Rate limiter for per_second/per_minute policies. Null for other keep types. - /// Pointer because RateLimiter contains atomics that need stable addresses. - rate_limiter: ?*RateLimiter, - /// Sample key for deterministic log sampling. When set, the specified field's - /// value is hashed for consistent sampling decisions (e.g., same trace_id always - /// gets same decision). Only applicable for log policies with percentage keep. - sample_key: ?LogSampleKey = null, -}; - -// ============================================================================= -// PatternMeta - Metadata for each pattern in a database -// ============================================================================= - -const PatternMeta = struct { - policy_index: PolicyIndex, -}; - -const PatternCollector = struct { - policy_index: PolicyIndex, - pattern: []const u8, - match_type: MatchType = .regex, - case_insensitive: bool = false, -}; - -// ============================================================================= -// ScanResult - Result of scanning a value -// ============================================================================= - -pub const ScanResult = struct { - count: usize, - buf: []u32, - - pub fn matches(self: ScanResult) []const u32 { - return self.buf[0..self.count]; - } -}; - -// ============================================================================= -// MatcherDatabase - Compiled Hyperscan DBs for one MatcherKey -// ============================================================================= - -pub const MatcherDatabase = struct { - positive_db: ?hyperscan.Database, - negated_db: ?hyperscan.Database, - scratch: ?hyperscan.Scratch, - mutex: std.Thread.Mutex, - positive_patterns: []const PatternMeta, - negated_patterns: []const PatternMeta, - allocator: std.mem.Allocator, - bus: *EventBus, - - const Self = @This(); - - pub fn scanPositive(self: *Self, value: []const u8, result_buf: []u32) ScanResult { - return self.scanDb(self.positive_db, self.positive_patterns, value, result_buf, false); - } - - pub fn scanNegated(self: *Self, value: []const u8, result_buf: []u32) ScanResult { - return self.scanDb(self.negated_db, self.negated_patterns, value, result_buf, true); - } - - fn scanDb(self: *Self, db: ?hyperscan.Database, patterns: []const PatternMeta, value: []const u8, result_buf: []u32, is_negated: bool) ScanResult { - const database = db orelse return ScanResult{ .count = 0, .buf = result_buf }; - const scratch = &(self.scratch orelse return ScanResult{ .count = 0, .buf = result_buf }); - - self.mutex.lock(); - defer self.mutex.unlock(); - - var result = ScanResult{ .count = 0, .buf = result_buf }; - _ = database.scanWithCallback(scratch, value, &result, scanCallback) catch |err| { - self.bus.warn(ScanError{ .err = @errorName(err) }); - return result; - }; - - if (result.count > 0) { - self.bus.debug(ScanMatched{ - .pattern_count = result.count, - .value_len = value.len, - .value_preview = if (value.len > 100) value[0..100] else value, - .is_negated = is_negated, - }); - for (result.matches()) |pattern_id| { - if (pattern_id < patterns.len) { - self.bus.debug(ScanMatchDetail{ .pattern_id = pattern_id, .policy_index = patterns[pattern_id].policy_index }); - } - } - } - return result; - } - - fn scanCallback(ctx: *ScanResult, match: hyperscan.Match) bool { - if (ctx.count < ctx.buf.len) { - // Deduplicate: only store each pattern ID once - // (Hyperscan calls back multiple times for different match positions) - for (ctx.buf[0..ctx.count]) |existing_id| { - if (existing_id == match.id) { - return true; // Already recorded, skip - } - } - ctx.buf[ctx.count] = match.id; - ctx.count += 1; - return true; - } - return false; - } - - pub fn deinit(self: *Self) void { - if (self.scratch) |*s| s.deinit(); - if (self.positive_db) |*db| db.deinit(); - if (self.negated_db) |*db| db.deinit(); - self.allocator.free(self.positive_patterns); - self.allocator.free(self.negated_patterns); - } -}; - -// ============================================================================= -// Comptime Type Helpers -// ============================================================================= - -/// Returns the MatcherKey type for a given telemetry type -pub fn MatcherKeyType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogMatcherKey, - .metric => MetricMatcherKey, - .trace => TraceMatcherKey, - }; -} - -/// Returns the FieldRef type for a given telemetry type -pub fn FieldRefType(comptime T: TelemetryType) type { - return switch (T) { - .log => FieldRef, - .metric => MetricFieldRef, - .trace => TraceFieldRef, - }; -} - -/// Returns the Matcher type for a given telemetry type -fn MatcherType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogMatcher, - .metric => MetricMatcher, - .trace => TraceMatcher, - }; -} - -/// Returns the Target type for a given telemetry type -fn TargetType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogTarget, - .metric => MetricTarget, - .trace => TraceTarget, - }; -} - -/// Returns the HashContext type for a given telemetry type -fn HashContextType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogMatcherKeyContext, - .metric => MetricMatcherKeyContext, - .trace => TraceMatcherKeyContext, - }; -} - -/// Returns the MatcherIndex type for a given telemetry type -pub fn MatcherIndexType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogMatcherIndex, - .metric => MetricMatcherIndex, - .trace => TraceMatcherIndex, - }; -} - -// ============================================================================= -// PatternsPerKey - Collected patterns before compilation -// ============================================================================= - -const PatternsPerKey = struct { - positive: std.ArrayListUnmanaged(PatternCollector), - negated: std.ArrayListUnmanaged(PatternCollector), -}; - -// ============================================================================= -// IndexBuilder - Generic builder for type-specific indices -// ============================================================================= - -fn IndexBuilder(comptime T: TelemetryType) type { - const MatcherKeyT = MatcherKeyType(T); - const FieldRefT = FieldRefType(T); - const MatcherT = MatcherType(T); - const TargetT = TargetType(T); - const HashContextT = HashContextType(T); - const IndexT = MatcherIndexType(T); - - return struct { - allocator: std.mem.Allocator, - temp_allocator: std.mem.Allocator, - bus: *EventBus, - patterns_by_key: std.HashMap(MatcherKeyT, PatternsPerKey, HashContextT, std.hash_map.default_max_load_percentage), - policy_info_list: std.ArrayListUnmanaged(PolicyInfo), - path_storage: std.ArrayListUnmanaged([]const []const u8), - policy_id_storage: std.ArrayListUnmanaged([]const u8), - policy_index: PolicyIndex, - current_positive_count: u16, - current_negated_count: u16, - - const Self = @This(); - - fn init(allocator: std.mem.Allocator, temp_allocator: std.mem.Allocator, bus: *EventBus) Self { - return .{ - .allocator = allocator, - .temp_allocator = temp_allocator, - .bus = bus, - .patterns_by_key = std.HashMap(MatcherKeyT, PatternsPerKey, HashContextT, std.hash_map.default_max_load_percentage).init(temp_allocator), - .policy_info_list = .{}, - .path_storage = .{}, - .policy_id_storage = .{}, - .policy_index = 0, - .current_positive_count = 0, - .current_negated_count = 0, - }; - } - - fn processPolicy(self: *Self, policy: *const Policy) !void { - const target = getTarget(policy) orelse { - self.bus.debug(SkippingPolicyWrongType{ .id = policy.id }); - return; - }; - - self.bus.debug(ProcessingPolicy{ - .id = policy.id, - .name = policy.name, - .enabled = policy.enabled, - .index = self.policy_index, - .telemetry_type = T, - }); - - self.current_positive_count = 0; - self.current_negated_count = 0; - - self.bus.debug(PolicyMatcherCount{ .id = policy.id, .matcher_count = target.match.items.len }); - - for (target.match.items, 0..) |matcher, matcher_idx| { - try self.processMatcher(&matcher, matcher_idx); - } - - const keep_value = parseKeepValue(target); - try self.storePolicyInfo(policy, target, keep_value); - } - - fn getTarget(policy: *const Policy) ?*const TargetT { - const target_ptr = &(policy.target orelse return null); - return switch (T) { - .log => switch (target_ptr.*) { - .log => |*log| log, - .metric, .trace => null, - }, - .metric => switch (target_ptr.*) { - .metric => |*metric| metric, - .log, .trace => null, - }, - .trace => switch (target_ptr.*) { - .trace => |*trace| trace, - .log, .metric => null, - }, - }; - } - - fn parseKeepValue(target: *const TargetT) KeepValue { - return switch (T) { - .log => KeepValue.parse(target.keep), - .metric => if (target.keep) .all else .none, - .trace => blk: { - // Trace uses TraceSamplingConfig with percentage (0-100) - const keep_config = target.keep orelse break :blk .all; - const percentage = keep_config.percentage; - if (percentage >= 100.0) break :blk .all; - if (percentage <= 0.0) break :blk .none; - break :blk .{ .percentage = @intFromFloat(@min(100.0, @max(0.0, percentage))) }; - }, - }; - } - - fn processMatcher(self: *Self, matcher: *const MatcherT, matcher_idx: usize) !void { - const field_ref = getFieldRef(matcher) orelse { - self.bus.debug(MatcherNullField{ .matcher_idx = matcher_idx }); - return; - }; - - const m = matcher.match orelse { - self.bus.debug(MatcherNullMatch{ .matcher_idx = matcher_idx }); - return; - }; - - const pattern, const match_type, const negate = switch (m) { - .regex => |r| .{ r, MatchType.regex, matcher.negate }, - .exact => |e| .{ e, MatchType.exact, matcher.negate }, - .exists => |exists| .{ "", MatchType.exists, matcher.negate != !exists }, - .starts_with => |s| .{ s, MatchType.starts_with, matcher.negate }, - .ends_with => |s| .{ s, MatchType.ends_with, matcher.negate }, - .contains => |s| .{ s, MatchType.contains, matcher.negate }, - }; - - if (pattern.len == 0 and match_type != .exists) { - self.bus.debug(MatcherEmptyRegex{ .matcher_idx = matcher_idx }); - return; - } - - self.bus.debug(MatcherDetail{ - .matcher_idx = matcher_idx, - .regex = pattern, - .negate = negate, - }); - - const matcher_key = MatcherKeyT{ .field = field_ref }; - try self.addPattern(matcher_key, .{ - .pattern = pattern, - .match_type = match_type, - .case_insensitive = matcher.case_insensitive, - }, negate, field_ref); - } - - fn getFieldRef(matcher: *const MatcherT) ?FieldRefT { - return switch (T) { - .log => FieldRef.fromMatcherField(matcher.field), - .metric => MetricFieldRef.fromMatcherField(matcher.field), - .trace => TraceFieldRef.fromMatcherField(matcher.field), - }; - } - - const PatternInfo = struct { pattern: []const u8, match_type: MatchType, case_insensitive: bool }; - - fn addPattern(self: *Self, key: MatcherKeyT, info: PatternInfo, negate: bool, field_ref: FieldRefT) !void { - if (negate) { - self.current_negated_count += 1; - } else { - self.current_positive_count += 1; - } - - const gop = try self.patterns_by_key.getOrPut(key); - if (!gop.found_existing) { - try self.dupeKeyIfNeeded(gop.key_ptr, field_ref); - gop.value_ptr.* = .{ .positive = .{}, .negated = .{} }; - } - - const collector = PatternCollector{ - .policy_index = self.policy_index, - .pattern = info.pattern, - .match_type = info.match_type, - .case_insensitive = info.case_insensitive, - }; - if (negate) { - try gop.value_ptr.negated.append(self.temp_allocator, collector); - } else { - try gop.value_ptr.positive.append(self.temp_allocator, collector); - } - } - - fn dupeKeyIfNeeded(self: *Self, key_ptr: *MatcherKeyT, field_ref: FieldRefT) !void { - const path = field_ref.getPath(); - if (path.len == 0) return; - - // Dupe each segment of the path - const path_copy = try self.allocator.alloc([]const u8, path.len); - errdefer self.allocator.free(path_copy); - - for (path, 0..) |segment, i| { - path_copy[i] = try self.allocator.dupe(u8, segment); - } - - try self.path_storage.append(self.allocator, path_copy); - - // Update the key's field to point to the duped path - key_ptr.field = dupeFieldRef(FieldRefT, field_ref, path_copy); - } - - fn dupeFieldRef(comptime FieldRefTT: type, field_ref: FieldRefTT, path_copy: []const []const u8) FieldRefTT { - // Create an AttributePath with the copied path segments - // Cast is safe because we own these allocations and they won't be mutated - const attr_path = AttributePath{ .path = .{ .items = @constCast(path_copy) } }; - - switch (T) { - .log => return switch (field_ref) { - .log_attribute => .{ .log_attribute = attr_path }, - .resource_attribute => .{ .resource_attribute = attr_path }, - .scope_attribute => .{ .scope_attribute = attr_path }, - .log_field => field_ref, - }, - .metric => return switch (field_ref) { - .datapoint_attribute => .{ .datapoint_attribute = attr_path }, - .resource_attribute => .{ .resource_attribute = attr_path }, - .scope_attribute => .{ .scope_attribute = attr_path }, - .metric_field, .metric_type, .aggregation_temporality => field_ref, - }, - .trace => return switch (field_ref) { - .span_attribute => .{ .span_attribute = attr_path }, - .resource_attribute => .{ .resource_attribute = attr_path }, - .scope_attribute => .{ .scope_attribute = attr_path }, - .event_attribute => .{ .event_attribute = attr_path }, - // These fields use []const u8, not AttributePath - .event_name => .{ .event_name = if (path_copy.len > 0) path_copy[0] else "" }, - .link_trace_id => .{ .link_trace_id = if (path_copy.len > 0) path_copy[0] else "" }, - .trace_field, .span_kind, .span_status => field_ref, - }, - } - } - - fn storePolicyInfo(self: *Self, policy: *const Policy, target: *const TargetT, keep: KeepValue) !void { - const policy_id_copy = try self.allocator.dupe(u8, policy.id); - try self.policy_id_storage.append(self.allocator, policy_id_copy); - - // Create rate limiter for rate limit policies - const rate_limiter: ?*RateLimiter = switch (keep) { - .per_second => |limit| blk: { - const rl = try self.allocator.create(RateLimiter); - rl.* = RateLimiter.initPerSecond(limit); - break :blk rl; - }, - .per_minute => |limit| blk: { - const rl = try self.allocator.create(RateLimiter); - rl.* = RateLimiter.initPerMinute(limit); - break :blk rl; - }, - else => null, - }; - - // Extract sample_key for log policies - const sample_key: ?LogSampleKey = if (T == .log) target.sample_key else null; - - try self.policy_info_list.append(self.temp_allocator, .{ - .id = policy_id_copy, - .index = self.policy_index, - .required_match_count = self.current_positive_count + self.current_negated_count, - .negated_count = self.current_negated_count, - .keep = keep, - .enabled = policy.enabled, - .rate_limiter = rate_limiter, - .sample_key = sample_key, - }); - - self.bus.debug(PolicyStored{ - .id = policy.id, - .index = self.policy_index, - .required_matches = self.current_positive_count, - .negated_count = self.current_negated_count, - }); - - self.policy_index += 1; - } - - fn finish(self: *Self) !IndexT { - const policies = try self.allocator.dupe(PolicyInfo, self.policy_info_list.items); - - var negation_indices = std.ArrayListUnmanaged(PolicyIndex){}; - for (policies) |p| { - if (p.negated_count > 0) { - try negation_indices.append(self.temp_allocator, p.index); - } - } - const policies_with_negation = try self.allocator.dupe(PolicyIndex, negation_indices.items); - - var databases = std.HashMap(MatcherKeyT, *MatcherDatabase, HashContextT, std.hash_map.default_max_load_percentage).init(self.allocator); - var keys_list = std.ArrayListUnmanaged(MatcherKeyT){}; - - var key_it = self.patterns_by_key.iterator(); - while (key_it.next()) |entry| { - const matcher_key = entry.key_ptr.*; - const patterns = entry.value_ptr.*; - - if (patterns.positive.items.len == 0 and patterns.negated.items.len == 0) continue; - - const db = try compileDatabase(self.allocator, self.bus, patterns.positive.items, patterns.negated.items); - try databases.put(matcher_key, db); - try keys_list.append(self.temp_allocator, matcher_key); - } - - const matcher_keys = try self.allocator.dupe(MatcherKeyT, keys_list.items); - - return IndexT{ - .allocator = self.allocator, - .databases = databases, - .policies = policies, - .policies_with_negation = policies_with_negation, - .matcher_keys = matcher_keys, - .path_storage = self.path_storage, - .policy_id_storage = self.policy_id_storage, - .bus = self.bus, - }; - } - }; -} - -// ============================================================================= -// LogMatcherIndex - Index for log policies only -// ============================================================================= - -pub const LogMatcherIndex = struct { - allocator: std.mem.Allocator, - databases: std.HashMap(LogMatcherKey, *MatcherDatabase, LogMatcherKeyContext, std.hash_map.default_max_load_percentage), - policies: []PolicyInfo, - policies_with_negation: []PolicyIndex, - matcher_keys: []LogMatcherKey, - path_storage: std.ArrayListUnmanaged([]const []const u8), - policy_id_storage: std.ArrayListUnmanaged([]const u8), - bus: *EventBus, - - const Self = @This(); - - pub fn build(allocator: std.mem.Allocator, bus: *EventBus, policies_slice: []const Policy) !Self { - var span = bus.started(.info, MatcherIndexBuildStarted{ .policy_count = policies_slice.len, .telemetry_type = .log }); - - if (policies_slice.len > MAX_POLICIES) { - return error.TooManyPolicies; - } - - var arena = std.heap.ArenaAllocator.init(allocator); - defer arena.deinit(); - - var builder = IndexBuilder(.log).init(allocator, arena.allocator(), bus); - - for (policies_slice) |*policy| { - try builder.processPolicy(policy); - } - - var index = try builder.finish(); - - span.completed(MatcherIndexBuildCompleted{ - .database_count = index.databases.count(), - .matcher_key_count = index.matcher_keys.len, - .policy_count = index.policies.len, - }); - - return index; - } - - pub fn getDatabase(self: *const Self, key: LogMatcherKey) ?*MatcherDatabase { - return self.databases.get(key); - } - - pub fn getPolicyByIndex(self: *const Self, index: PolicyIndex) ?PolicyInfo { - if (index >= self.policies.len) return null; - return self.policies[index]; - } - - pub fn getPolicy(self: *const Self, id: []const u8) ?PolicyInfo { - for (self.policies) |info| { - if (std.mem.eql(u8, info.id, id)) return info; - } - return null; - } - - pub fn getMatcherKeys(self: *const Self) []const LogMatcherKey { - return self.matcher_keys; - } - - pub fn getPolicies(self: *const Self) []const PolicyInfo { - return self.policies; - } - - pub fn getPoliciesWithNegation(self: *const Self) []const PolicyIndex { - return self.policies_with_negation; - } - - pub fn isEmpty(self: *const Self) bool { - return self.databases.count() == 0; - } - - pub fn getDatabaseCount(self: *const Self) usize { - return self.databases.count(); - } - - pub fn getPolicyCount(self: *const Self) usize { - return self.policies.len; - } - - pub fn deinit(self: *Self) void { - var db_it = self.databases.valueIterator(); - while (db_it.next()) |db| { - db.*.deinit(); - self.allocator.destroy(db.*); - } - self.databases.deinit(); - - // Free rate limiters - for (self.policies) |policy_info| { - if (policy_info.rate_limiter) |rl| { - self.allocator.destroy(rl); - } - } - self.allocator.free(self.policies); - self.allocator.free(self.policies_with_negation); - self.allocator.free(self.matcher_keys); - - for (self.path_storage.items) |path| { - for (path) |segment| { - self.allocator.free(segment); - } - self.allocator.free(path); - } - self.path_storage.deinit(self.allocator); - - for (self.policy_id_storage.items) |id| { - self.allocator.free(id); - } - self.policy_id_storage.deinit(self.allocator); - } -}; - -// ============================================================================= -// MetricMatcherIndex - Index for metric policies only -// ============================================================================= - -pub const MetricMatcherIndex = struct { - allocator: std.mem.Allocator, - databases: std.HashMap(MetricMatcherKey, *MatcherDatabase, MetricMatcherKeyContext, std.hash_map.default_max_load_percentage), - policies: []PolicyInfo, - policies_with_negation: []PolicyIndex, - matcher_keys: []MetricMatcherKey, - path_storage: std.ArrayListUnmanaged([]const []const u8), - policy_id_storage: std.ArrayListUnmanaged([]const u8), - bus: *EventBus, - - const Self = @This(); - - pub fn build(allocator: std.mem.Allocator, bus: *EventBus, policies_slice: []const Policy) !Self { - var span = bus.started(.info, MatcherIndexBuildStarted{ .policy_count = policies_slice.len, .telemetry_type = .metric }); - - if (policies_slice.len > MAX_POLICIES) { - return error.TooManyPolicies; - } - - var arena = std.heap.ArenaAllocator.init(allocator); - defer arena.deinit(); - - var builder = IndexBuilder(.metric).init(allocator, arena.allocator(), bus); - - for (policies_slice) |*policy| { - try builder.processPolicy(policy); - } - - var index = try builder.finish(); - - span.completed(MatcherIndexBuildCompleted{ - .database_count = index.databases.count(), - .matcher_key_count = index.matcher_keys.len, - .policy_count = index.policies.len, - }); - - return index; - } - - pub fn getDatabase(self: *const Self, key: MetricMatcherKey) ?*MatcherDatabase { - return self.databases.get(key); - } - - pub fn getPolicyByIndex(self: *const Self, index: PolicyIndex) ?PolicyInfo { - if (index >= self.policies.len) return null; - return self.policies[index]; - } - - pub fn getPolicy(self: *const Self, id: []const u8) ?PolicyInfo { - for (self.policies) |info| { - if (std.mem.eql(u8, info.id, id)) return info; - } - return null; - } - - pub fn getMatcherKeys(self: *const Self) []const MetricMatcherKey { - return self.matcher_keys; - } - - pub fn getPolicies(self: *const Self) []const PolicyInfo { - return self.policies; - } - - pub fn getPoliciesWithNegation(self: *const Self) []const PolicyIndex { - return self.policies_with_negation; - } - - pub fn isEmpty(self: *const Self) bool { - return self.databases.count() == 0; - } - - pub fn getDatabaseCount(self: *const Self) usize { - return self.databases.count(); - } - - pub fn getPolicyCount(self: *const Self) usize { - return self.policies.len; - } - - pub fn deinit(self: *Self) void { - var db_it = self.databases.valueIterator(); - while (db_it.next()) |db| { - db.*.deinit(); - self.allocator.destroy(db.*); - } - self.databases.deinit(); - - // Free rate limiters - for (self.policies) |policy_info| { - if (policy_info.rate_limiter) |rl| { - self.allocator.destroy(rl); - } - } - self.allocator.free(self.policies); - self.allocator.free(self.policies_with_negation); - self.allocator.free(self.matcher_keys); - - for (self.path_storage.items) |path| { - for (path) |segment| { - self.allocator.free(segment); - } - self.allocator.free(path); - } - self.path_storage.deinit(self.allocator); - - for (self.policy_id_storage.items) |id| { - self.allocator.free(id); - } - self.policy_id_storage.deinit(self.allocator); - } -}; - -// ============================================================================= -// TraceMatcherIndex - Index for trace policies only (OTLP traces) -// ============================================================================= - -pub const TraceMatcherIndex = struct { - allocator: std.mem.Allocator, - databases: std.HashMap(TraceMatcherKey, *MatcherDatabase, TraceMatcherKeyContext, std.hash_map.default_max_load_percentage), - policies: []PolicyInfo, - policies_with_negation: []PolicyIndex, - matcher_keys: []TraceMatcherKey, - path_storage: std.ArrayListUnmanaged([]const []const u8), - policy_id_storage: std.ArrayListUnmanaged([]const u8), - bus: *EventBus, - - const Self = @This(); - - pub fn build(allocator: std.mem.Allocator, bus: *EventBus, policies_slice: []const Policy) !Self { - var span = bus.started(.info, MatcherIndexBuildStarted{ .policy_count = policies_slice.len, .telemetry_type = .trace }); - - if (policies_slice.len > MAX_POLICIES) { - return error.TooManyPolicies; - } - - var arena = std.heap.ArenaAllocator.init(allocator); - defer arena.deinit(); - - var builder = IndexBuilder(.trace).init(allocator, arena.allocator(), bus); - - for (policies_slice) |*policy| { - try builder.processPolicy(policy); - } - - var index = try builder.finish(); - - span.completed(MatcherIndexBuildCompleted{ - .database_count = index.databases.count(), - .matcher_key_count = index.matcher_keys.len, - .policy_count = index.policies.len, - }); - - return index; - } - - pub fn getDatabase(self: *const Self, key: TraceMatcherKey) ?*MatcherDatabase { - return self.databases.get(key); - } - - pub fn getPolicyByIndex(self: *const Self, index: PolicyIndex) ?PolicyInfo { - if (index >= self.policies.len) return null; - return self.policies[index]; - } - - pub fn getPolicy(self: *const Self, id: []const u8) ?PolicyInfo { - for (self.policies) |info| { - if (std.mem.eql(u8, info.id, id)) return info; - } - return null; - } - - pub fn getMatcherKeys(self: *const Self) []const TraceMatcherKey { - return self.matcher_keys; - } - - pub fn getPolicies(self: *const Self) []const PolicyInfo { - return self.policies; - } - - pub fn getPoliciesWithNegation(self: *const Self) []const PolicyIndex { - return self.policies_with_negation; - } - - pub fn isEmpty(self: *const Self) bool { - return self.databases.count() == 0; - } - - pub fn getDatabaseCount(self: *const Self) usize { - return self.databases.count(); - } - - pub fn getPolicyCount(self: *const Self) usize { - return self.policies.len; - } - - pub fn deinit(self: *Self) void { - var db_it = self.databases.valueIterator(); - while (db_it.next()) |db| { - db.*.deinit(); - self.allocator.destroy(db.*); - } - self.databases.deinit(); - - // Free rate limiters - for (self.policies) |policy_info| { - if (policy_info.rate_limiter) |rl| { - self.allocator.destroy(rl); - } - } - self.allocator.free(self.policies); - self.allocator.free(self.policies_with_negation); - self.allocator.free(self.matcher_keys); - - for (self.path_storage.items) |path| { - for (path) |segment| { - self.allocator.free(segment); - } - self.allocator.free(path); - } - self.path_storage.deinit(self.allocator); - - for (self.policy_id_storage.items) |id| { - self.allocator.free(id); - } - self.policy_id_storage.deinit(self.allocator); - } -}; - -// ============================================================================= -// Database Compilation -// ============================================================================= - -fn compileDatabase( - allocator: std.mem.Allocator, - bus: *EventBus, - positive_collectors: []const PatternCollector, - negated_collectors: []const PatternCollector, -) !*MatcherDatabase { - var positive_db: ?hyperscan.Database = null; - var negated_db: ?hyperscan.Database = null; - var scratch: ?hyperscan.Scratch = null; - - errdefer { - if (scratch) |*s| s.deinit(); - if (positive_db) |*db| db.deinit(); - if (negated_db) |*db| db.deinit(); - } - - var positive_patterns: []PatternMeta = &.{}; - if (positive_collectors.len > 0) { - const result = try compilePatterns(allocator, positive_collectors); - positive_db = result.db; - positive_patterns = result.meta; - } - - var negated_patterns: []PatternMeta = &.{}; - if (negated_collectors.len > 0) { - const result = try compilePatterns(allocator, negated_collectors); - negated_db = result.db; - negated_patterns = result.meta; - } - - if (positive_db) |*db| { - scratch = try hyperscan.Scratch.init(db); - if (negated_db) |*ndb| { - _ = try hyperscan.Scratch.init(ndb); - } - } else if (negated_db) |*db| { - scratch = try hyperscan.Scratch.init(db); - } - - const matcher_db = try allocator.create(MatcherDatabase); - matcher_db.* = .{ - .positive_db = positive_db, - .negated_db = negated_db, - .scratch = scratch, - .mutex = .{}, - .positive_patterns = positive_patterns, - .negated_patterns = negated_patterns, - .allocator = allocator, - .bus = bus, - }; - - return matcher_db; -} - -fn compilePatterns(allocator: std.mem.Allocator, collectors: []const PatternCollector) !struct { db: hyperscan.Database, meta: []PatternMeta } { - // Calculate buffer size: max len+2 per pattern (for anchors) - var buf_size: usize = 0; - for (collectors) |c| { - buf_size += switch (c.match_type) { - .regex, .exists, .contains => 0, - .exact => c.pattern.len + 2, - .starts_with, .ends_with => c.pattern.len + 1, - }; - } - - // Single allocation for hs_patterns + pattern buffer - const hs_size = collectors.len * @sizeOf(hyperscan.Pattern); - const temp = try allocator.alloc(u8, hs_size + buf_size); - defer allocator.free(temp); - - const hs_patterns: []hyperscan.Pattern = @alignCast(std.mem.bytesAsSlice(hyperscan.Pattern, temp[0..hs_size])); - var buf = temp[hs_size..]; - - const meta = try allocator.alloc(PatternMeta, collectors.len); - errdefer allocator.free(meta); - - for (collectors, 0..) |c, i| { - hs_patterns[i] = .{ - .expression = formatPattern(&buf, c.pattern, c.match_type), - .id = @intCast(i), - .flags = .{ .caseless = c.case_insensitive, .single_match = true }, - }; - meta[i] = .{ .policy_index = c.policy_index }; - } - - const db = try hyperscan.Database.compileMulti(allocator, hs_patterns, .{}); - return .{ .db = db, .meta = meta }; -} - -fn formatPattern(buf: *[]u8, pattern: []const u8, match_type: MatchType) []const u8 { - const anchor_start, const anchor_end = switch (match_type) { - .regex => return pattern, - .exists => return "^.+$", - .exact => .{ true, true }, - .starts_with => .{ true, false }, - .ends_with => .{ false, true }, - .contains => return pattern, - }; - - const out = std.fmt.bufPrint(buf.*, "{s}{s}{s}", .{ - if (anchor_start) "^" else "", - pattern, - if (anchor_end) "$" else "", - }) catch unreachable; - - buf.* = buf.*[out.len..]; - return out; -} - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; - -test "LogMatcherKey: hash and equality" { - const key1 = LogMatcherKey{ .field = .{ .log_field = .LOG_FIELD_BODY } }; - const key2 = LogMatcherKey{ .field = .{ .log_field = .LOG_FIELD_BODY } }; - const key3 = LogMatcherKey{ .field = .{ .log_field = .LOG_FIELD_SEVERITY_TEXT } }; - const key4 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"service"}) } } } }; - const key5 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"service"}) } } } }; - const key6 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"env"}) } } } }; - - try testing.expect(key1.eql(key2)); - try testing.expect(key4.eql(key5)); - try testing.expect(!key1.eql(key3)); - try testing.expect(!key4.eql(key6)); - try testing.expectEqual(key1.hash(), key2.hash()); - try testing.expectEqual(key4.hash(), key5.hash()); - - // Test nested paths - const key7 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{ "http", "method" }) } } } }; - const key8 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{ "http", "method" }) } } } }; - const key9 = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{ "http", "status" }) } } } }; - - try testing.expect(key7.eql(key8)); - try testing.expect(!key7.eql(key9)); - try testing.expectEqual(key7.hash(), key8.hash()); -} - -test "MetricMatcherKey: hash and equality" { - const key1 = MetricMatcherKey{ .field = .{ .metric_field = .METRIC_FIELD_NAME } }; - const key2 = MetricMatcherKey{ .field = .{ .metric_field = .METRIC_FIELD_NAME } }; - const key3 = MetricMatcherKey{ .field = .{ .metric_field = .METRIC_FIELD_UNIT } }; - const key4 = MetricMatcherKey{ .field = .{ .datapoint_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"status"}) } } } }; - const key5 = MetricMatcherKey{ .field = .{ .datapoint_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"status"}) } } } }; - const key6 = MetricMatcherKey{ .field = .{ .datapoint_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"env"}) } } } }; - - try testing.expect(key1.eql(key2)); - try testing.expect(key4.eql(key5)); - try testing.expect(!key1.eql(key3)); - try testing.expect(!key4.eql(key6)); - try testing.expectEqual(key1.hash(), key2.hash()); - try testing.expectEqual(key4.hash(), key5.hash()); -} - -test "FieldRef: isKeyed" { - const log_field = FieldRef{ .log_field = .LOG_FIELD_BODY }; - const log_attr = FieldRef{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"service"}) } } }; - const resource_attr = FieldRef{ .resource_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"env"}) } } }; - - try testing.expect(!log_field.isKeyed()); - try testing.expect(log_attr.isKeyed()); - try testing.expect(resource_attr.isKeyed()); -} - -test "KeepValue: parse" { - try testing.expectEqual(KeepValue.all, KeepValue.parse("")); - try testing.expectEqual(KeepValue.all, KeepValue.parse("all")); - try testing.expectEqual(KeepValue.none, KeepValue.parse("none")); - try testing.expectEqual(KeepValue{ .percentage = 50 }, KeepValue.parse("50%")); - try testing.expectEqual(KeepValue{ .per_second = 100 }, KeepValue.parse("100/s")); - try testing.expectEqual(KeepValue{ .per_minute = 1000 }, KeepValue.parse("1000/m")); -} - -test "KeepValue: restrictiveness comparison" { - const all: KeepValue = .all; - const none: KeepValue = .none; - const pct50: KeepValue = .{ .percentage = 50 }; - const pct25: KeepValue = .{ .percentage = 25 }; - const rate: KeepValue = .{ .per_second = 100 }; - - try testing.expect(none.isMoreRestrictiveThan(all)); - try testing.expect(none.isMoreRestrictiveThan(pct50)); - try testing.expect(pct50.isMoreRestrictiveThan(all)); - try testing.expect(pct25.isMoreRestrictiveThan(pct50)); - try testing.expect(!all.isMoreRestrictiveThan(rate)); -} - -test "LogMatcherIndex: build empty" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{}); - defer index.deinit(); - - try testing.expect(index.isEmpty()); - try testing.expectEqual(@as(usize, 0), index.getPolicyCount()); -} - -test "MetricMatcherIndex: build empty" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{}); - defer index.deinit(); - - try testing.expect(index.isEmpty()); - try testing.expectEqual(@as(usize, 0), index.getPolicyCount()); -} - -test "LogMatcherIndex: build with single policy" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - const policy_info = index.getPolicyByIndex(0); - try testing.expect(policy_info != null); - try testing.expectEqual(KeepValue.none, policy_info.?.keep); - try testing.expectEqual(@as(u16, 1), policy_info.?.required_match_count); - - const policy_info_by_id = index.getPolicy("policy-1"); - try testing.expect(policy_info_by_id != null); - try testing.expectEqualStrings("policy-1", policy_info_by_id.?.id); -} - -test "MetricMatcherIndex: build with single policy" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-1"), - .name = try allocator.dupe(u8, "test-metric-policy"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "debug_.*") }, - }); - defer policy.deinit(allocator); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - const policy_info = index.getPolicyByIndex(0); - try testing.expect(policy_info != null); - try testing.expectEqual(KeepValue.none, policy_info.?.keep); -} - -test "LogMatcherIndex: build with keyed matchers" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - // Create AttributePath with "service" as single path segment - var attr_path = proto.policy.AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, "service")); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = attr_path }, - .match = .{ .regex = try allocator.dupe(u8, "payment-api") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - - const expected_key = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"service"}) } } } }; - const db = index.getDatabase(expected_key); - try testing.expect(db != null); -} - -test "LogMatcherIndex: negated matcher creates negated database" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "important") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - - const expected_key = LogMatcherKey{ .field = .{ .log_field = .LOG_FIELD_BODY } }; - const db = index.getDatabase(expected_key); - try testing.expect(db != null); - try testing.expect(db.?.negated_db != null); - try testing.expect(db.?.positive_db == null); -} - -test "LogMatcherIndex: scan database" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }); - try testing.expect(db != null); - - var result_buf: [256]u32 = undefined; - - const match_result = db.?.scanPositive("an error occurred", &result_buf); - try testing.expectEqual(@as(usize, 1), match_result.count); - try testing.expectEqual(@as(u32, 0), match_result.matches()[0]); - - const no_match_result = db.?.scanPositive("everything is fine", &result_buf); - try testing.expectEqual(@as(usize, 0), no_match_result.count); -} - -test "LogMatcherIndex: exists=true matcher uses ^.+$ pattern" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - // Create AttributePath with "trace_id" as single path segment - var attr_path = AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, "trace_id")); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = attr_path }, - .match = .{ .exists = true }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - // exists=true should create a database with ^.+$ pattern - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - // The database should match any non-empty string - const keys = index.getMatcherKeys(); - try testing.expectEqual(@as(usize, 1), keys.len); - - const db = index.getDatabase(keys[0]).?; - var result_buf: [MAX_POLICIES]u32 = undefined; - var result = db.scanPositive("some-trace-id-value", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Empty string should not match - result = db.scanPositive("", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "LogMatcherIndex: exists=false matcher creates negated pattern" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - // Create AttributePath with "trace_id" as single path segment - var attr_path = AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, "trace_id")); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = attr_path }, - .match = .{ .exists = false }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - // exists=false should create a negated pattern entry - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - // Verify the pattern is in the negated database - const key = LogMatcherKey{ .field = .{ .log_attribute = .{ .path = .{ .items = @constCast(&[_][]const u8{"trace_id"}) } } } }; - const db = index.getDatabase(key); - try testing.expect(db != null); - try testing.expectEqual(@as(usize, 1), db.?.negated_patterns.len); - try testing.expectEqual(@as(usize, 0), db.?.positive_patterns.len); -} - -test "MetricMatcherIndex: exists=true matcher uses ^.+$ pattern" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "test-policy"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - // Create AttributePath with "service.name" as single path segment - var attr_path = AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, "service.name")); - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .resource_attribute = attr_path }, - .match = .{ .exists = true }, - }); - defer policy.deinit(allocator); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - // exists=true should create a database with ^.+$ pattern - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - // The database should match any non-empty string - const keys = index.getMatcherKeys(); - try testing.expectEqual(@as(usize, 1), keys.len); - - const db = index.getDatabase(keys[0]).?; - var result_buf: [MAX_POLICIES]u32 = undefined; - var result = db.scanPositive("my-service", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Empty string should not match - result = db.scanPositive("", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "MetricMatcherIndex: metric_type field creates Hyperscan database" { - // metric_type is matched as a string via Hyperscan. The field accessor returns - // the type as a string (e.g., "gauge", "sum", "histogram") which is then matched - // against the regex pattern. - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - const MetricType = proto.policy.MetricType; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "match-gauge-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - // Match on metric_type with exists=true (uses ^.+$ pattern) - // The proto field uses the enum value, but we only care that it's metric_type - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_type = MetricType.METRIC_TYPE_GAUGE }, - .match = .{ .exists = true }, - }); - defer policy.deinit(allocator); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - // metric_type field should create a database with ^.+$ pattern - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - // The policy should have 1 required match - const policy_info = index.getPolicy("policy-1"); - try testing.expect(policy_info != null); - try testing.expectEqual(@as(u16, 1), policy_info.?.required_match_count); - - // The database should match metric type strings - const keys = index.getMatcherKeys(); - try testing.expectEqual(@as(usize, 1), keys.len); - - const db = index.getDatabase(keys[0]).?; - var result_buf: [MAX_POLICIES]u32 = undefined; - - // Should match any non-empty metric type string - var result = db.scanPositive("gauge", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("histogram", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Empty string should not match - result = db.scanPositive("", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "MetricMatcherIndex: metric_type with regex pattern" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - const MetricType = proto.policy.MetricType; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "match-gauge-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - // Match on metric_type with regex pattern for "gauge" - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_type = MetricType.METRIC_TYPE_GAUGE }, - .match = .{ .exact = try allocator.dupe(u8, "gauge") }, - }); - defer policy.deinit(allocator); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - - const keys = index.getMatcherKeys(); - const db = index.getDatabase(keys[0]).?; - var result_buf: [MAX_POLICIES]u32 = undefined; - - // Should match "gauge" - var result = db.scanPositive("gauge", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should NOT match other types - result = db.scanPositive("histogram", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("sum", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "MetricMatcherIndex: aggregation_temporality field creates Hyperscan database" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - const AggregationTemporality = proto.policy.AggregationTemporality; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "match-delta-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - // Match on aggregation_temporality with regex for "delta" - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .aggregation_temporality = AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA }, - .match = .{ .exact = try allocator.dupe(u8, "delta") }, - }); - defer policy.deinit(allocator); - - var index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), &.{policy}); - defer index.deinit(); - - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - try testing.expectEqual(@as(usize, 1), index.getPolicyCount()); - - const policy_info = index.getPolicy("policy-1"); - try testing.expect(policy_info != null); - try testing.expectEqual(@as(u16, 1), policy_info.?.required_match_count); - - const keys = index.getMatcherKeys(); - const db = index.getDatabase(keys[0]).?; - var result_buf: [MAX_POLICIES]u32 = undefined; - - // Should match "delta" - var result = db.scanPositive("delta", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should NOT match "cumulative" - result = db.scanPositive("cumulative", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Mixed log and metric policies: each index only gets its type" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var log_policy = Policy{ - .id = try allocator.dupe(u8, "log-policy-1"), - .name = try allocator.dupe(u8, "test-log"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - try log_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer log_policy.deinit(allocator); - - var metric_policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-1"), - .name = try allocator.dupe(u8, "test-metric"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try metric_policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "debug_.*") }, - }); - defer metric_policy.deinit(allocator); - - const policies = &[_]Policy{ log_policy, metric_policy }; - - // Log index should only have log policy - var log_index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), policies); - defer log_index.deinit(); - try testing.expectEqual(@as(usize, 1), log_index.getPolicyCount()); - try testing.expectEqual(@as(usize, 1), log_index.getDatabaseCount()); - - const log_info = log_index.getPolicy("log-policy-1"); - try testing.expect(log_info != null); - - const metric_in_log = log_index.getPolicy("metric-policy-1"); - try testing.expect(metric_in_log == null); - - // Metric index should only have metric policy - var metric_index = try MetricMatcherIndex.build(allocator, noop_bus.eventBus(), policies); - defer metric_index.deinit(); - try testing.expectEqual(@as(usize, 1), metric_index.getPolicyCount()); - try testing.expectEqual(@as(usize, 1), metric_index.getDatabaseCount()); - - const metric_info = metric_index.getPolicy("metric-policy-1"); - try testing.expect(metric_info != null); - - const log_in_metric = metric_index.getPolicy("log-policy-1"); - try testing.expect(log_in_metric == null); -} - -// ============================================================================= -// Tests for match types (starts_with, ends_with, contains, exact, exists) -// ============================================================================= - -test "formatPattern: regex returns pattern unchanged" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, "^hello.*world$", .regex); - try testing.expectEqualStrings("^hello.*world$", result); -} - -test "formatPattern: exists returns fixed pattern" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, "", .exists); - try testing.expectEqualStrings("^.+$", result); -} - -test "formatPattern: exact adds both anchors" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, "hello", .exact); - try testing.expectEqualStrings("^hello$", result); -} - -test "formatPattern: starts_with adds start anchor" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, "ERROR:", .starts_with); - try testing.expectEqualStrings("^ERROR:", result); -} - -test "formatPattern: ends_with adds end anchor" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, ".json", .ends_with); - try testing.expectEqualStrings(".json$", result); -} - -test "formatPattern: contains returns pattern unchanged" { - var buf: [64]u8 = undefined; - var slice: []u8 = &buf; - const result = formatPattern(&slice, "password", .contains); - try testing.expectEqualStrings("password", result); -} - -test "Log matcher with starts_with" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "starts-with-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .starts_with = try allocator.dupe(u8, "ERROR") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - // Should match strings starting with ERROR - var result = db.scanPositive("ERROR: something failed", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("ERROR", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should not match strings not starting with ERROR - result = db.scanPositive("Warning: ERROR occurred", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("Some ERROR here", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Log matcher with ends_with" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "ends-with-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .ends_with = try allocator.dupe(u8, ".json") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - // Should match strings ending with .json - var result = db.scanPositive("config.json", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("path/to/file.json", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should not match strings not ending with .json - result = db.scanPositive("config.json.bak", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("json file here", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Log matcher with contains" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "contains-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .contains = try allocator.dupe(u8, "password") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - // Should match strings containing password anywhere - var result = db.scanPositive("password", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("user password here", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("my_password_field", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should not match strings without password - result = db.scanPositive("pass word", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("secret", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Log matcher with exact" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "exact-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .exact = try allocator.dupe(u8, "hello") }, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - // Should match exactly "hello" - var result = db.scanPositive("hello", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should not match anything else - result = db.scanPositive("hello world", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("say hello", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); - - result = db.scanPositive("Hello", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Log matcher with case_insensitive" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "case-insensitive-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .exact = try allocator.dupe(u8, "hello") }, - .case_insensitive = true, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - // Should match case variations - var result = db.scanPositive("hello", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("Hello", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("HELLO", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("HeLLo", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - // Should still not match partial - result = db.scanPositive("hello world", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} - -test "Log matcher with starts_with case_insensitive" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var policy = Policy{ - .id = try allocator.dupe(u8, "starts-with-ci-policy"), - .name = try allocator.dupe(u8, "test"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .starts_with = try allocator.dupe(u8, "error") }, - .case_insensitive = true, - }); - defer policy.deinit(allocator); - - var index = try LogMatcherIndex.build(allocator, noop_bus.eventBus(), &[_]Policy{policy}); - defer index.deinit(); - - const db = index.getDatabase(.{ .field = .{ .log_field = .LOG_FIELD_BODY } }).?; - var result_buf: [8]u32 = undefined; - - var result = db.scanPositive("error: failed", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("ERROR: failed", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("Error: failed", &result_buf); - try testing.expectEqual(@as(usize, 1), result.count); - - result = db.scanPositive("warning: error", &result_buf); - try testing.expectEqual(@as(usize, 0), result.count); -} diff --git a/src/policy/parser.zig b/src/policy/parser.zig deleted file mode 100644 index a54b654..0000000 --- a/src/policy/parser.zig +++ /dev/null @@ -1,2132 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); - -const Policy = proto.policy.Policy; -const LogTarget = proto.policy.LogTarget; -const LogMatcher = proto.policy.LogMatcher; -const LogField = proto.policy.LogField; -const LogTransform = proto.policy.LogTransform; -const LogRemove = proto.policy.LogRemove; -const LogRedact = proto.policy.LogRedact; -const LogRename = proto.policy.LogRename; -const LogAdd = proto.policy.LogAdd; -const MetricTarget = proto.policy.MetricTarget; -const MetricMatcher = proto.policy.MetricMatcher; -const MetricField = proto.policy.MetricField; -const TraceTarget = proto.policy.TraceTarget; -const TraceMatcher = proto.policy.TraceMatcher; -const TraceField = proto.policy.TraceField; -const TraceSamplingConfig = proto.policy.TraceSamplingConfig; -const SamplingMode = proto.policy.SamplingMode; -const SpanKind = proto.policy.SpanKind; -const SpanStatusCode = proto.policy.SpanStatusCode; -const AttributePath = proto.policy.AttributePath; -const LogSampleKey = proto.policy.LogSampleKey; - -/// Parse an AttributePath from a JSON value. -/// Supports three formats: -/// - String shorthand: "key" -> ["key"] -/// - Array shorthand: ["http", "method"] -> ["http", "method"] -/// - Canonical: {"path": ["http", "method"]} -> ["http", "method"] -fn parseAttributePath(allocator: std.mem.Allocator, value: std.json.Value) !AttributePath { - var attr_path = AttributePath{}; - errdefer { - for (attr_path.path.items) |segment| { - allocator.free(segment); - } - attr_path.path.deinit(allocator); - } - - switch (value) { - .string => |s| { - // String shorthand: "key" -> single-element path - try attr_path.path.append(allocator, try allocator.dupe(u8, s)); - }, - .array => |arr| { - // Array shorthand: ["http", "method"] - try attr_path.path.ensureTotalCapacity(allocator, arr.items.len); - for (arr.items) |item| { - switch (item) { - .string => |s| { - attr_path.path.appendAssumeCapacity(try allocator.dupe(u8, s)); - }, - else => return error.InvalidAttributePath, - } - } - }, - .object => |obj| { - // Canonical format: {"path": ["http", "method"]} - const path_value = obj.get("path") orelse return error.InvalidAttributePath; - switch (path_value) { - .array => |arr| { - try attr_path.path.ensureTotalCapacity(allocator, arr.items.len); - for (arr.items) |item| { - switch (item) { - .string => |s| { - attr_path.path.appendAssumeCapacity(try allocator.dupe(u8, s)); - }, - else => return error.InvalidAttributePath, - } - } - }, - else => return error.InvalidAttributePath, - } - }, - else => return error.InvalidAttributePath, - } - - if (attr_path.path.items.len == 0) { - return error.EmptyAttributePath; - } - - return attr_path; -} - -/// Create an AttributePath from a simple key string. -/// For backward compatibility, a single key becomes a single-element path. -fn makeAttributePath(allocator: std.mem.Allocator, key: []const u8) !AttributePath { - var attr_path = AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, key)); - return attr_path; -} - -// ============================================================================= -// New JSON Schema - matches YAML format closely -// ============================================================================= - -/// JSON schema for a log matcher -/// Example: { "log_field": "body", "regex": "GET /health" } -/// Example: { "log_attribute": "service", "regex": "payment.*" } -/// Example: { "log_attribute": ["http", "method"], "regex": "GET" } -/// Example: { "log_attribute": {"path": ["http", "method"]}, "regex": "GET" } -/// Example: { "log_field": "body", "starts_with": "ERROR", "case_insensitive": true } -const LogMatcherJson = struct { - // Field selectors (one of these should be set) - log_field: ?[]const u8 = null, // "body", "severity_text", etc. - log_attribute: ?std.json.Value = null, // attribute path (string, array, or object) - resource_attribute: ?std.json.Value = null, // resource attribute path - scope_attribute: ?std.json.Value = null, // scope attribute path - - // Match type (one of these should be set) - regex: ?[]const u8 = null, - exact: ?[]const u8 = null, - exists: ?bool = null, - starts_with: ?[]const u8 = null, - ends_with: ?[]const u8 = null, - contains: ?[]const u8 = null, - - // Optional flags - negate: bool = false, - case_insensitive: bool = false, -}; - -/// JSON schema for a metric matcher -/// Example: { "metric_field": "name", "regex": "^debug\\." } -/// Example: { "datapoint_attribute": "env", "exact": "dev" } -/// Example: { "datapoint_attribute": ["tags", "env"], "exact": "prod" } -const MetricMatcherJson = struct { - // Field selectors (one of these should be set) - metric_field: ?[]const u8 = null, // "name", "unit", etc. - datapoint_attribute: ?std.json.Value = null, // datapoint attribute path - resource_attribute: ?std.json.Value = null, // resource attribute path - scope_attribute: ?std.json.Value = null, // scope attribute path - - // Match type (one of these should be set) - regex: ?[]const u8 = null, - exact: ?[]const u8 = null, - exists: ?bool = null, - starts_with: ?[]const u8 = null, - ends_with: ?[]const u8 = null, - contains: ?[]const u8 = null, - - // Optional flags - negate: bool = false, - case_insensitive: bool = false, -}; - -/// JSON schema for a remove transform -const RemoveJson = struct { - log_field: ?[]const u8 = null, - log_attribute: ?std.json.Value = null, - resource_attribute: ?std.json.Value = null, - scope_attribute: ?std.json.Value = null, -}; - -/// JSON schema for a redact transform -const RedactJson = struct { - log_field: ?[]const u8 = null, - log_attribute: ?std.json.Value = null, - resource_attribute: ?std.json.Value = null, - scope_attribute: ?std.json.Value = null, - replacement: []const u8 = "[REDACTED]", -}; - -/// JSON schema for a rename transform -const RenameJson = struct { - from_log_field: ?[]const u8 = null, - from_log_attribute: ?std.json.Value = null, - from_resource_attribute: ?std.json.Value = null, - from_scope_attribute: ?std.json.Value = null, - to: []const u8, - upsert: bool = true, -}; - -/// JSON schema for an add transform -const AddJson = struct { - log_field: ?[]const u8 = null, - log_attribute: ?std.json.Value = null, - resource_attribute: ?std.json.Value = null, - scope_attribute: ?std.json.Value = null, - value: []const u8, - upsert: bool = true, -}; - -/// JSON schema for transforms -const TransformJson = struct { - remove: ?[]RemoveJson = null, - redact: ?[]RedactJson = null, - rename: ?[]RenameJson = null, - add: ?[]AddJson = null, -}; - -/// JSON schema for log sample key -/// Example: { "log_field": "body" } -/// Example: { "log_attribute": "trace_id" } -/// Example: { "log_attribute": ["request", "id"] } -const LogSampleKeyJson = struct { - log_field: ?[]const u8 = null, - log_attribute: ?std.json.Value = null, - resource_attribute: ?std.json.Value = null, - scope_attribute: ?std.json.Value = null, -}; - -/// JSON schema for log target -/// Example: -/// "log": { -/// "match": [{ "log_field": "body", "regex": "GET /health" }], -/// "keep": "none", -/// "transform": { ... }, -/// "sample_key": { "log_attribute": "trace_id" } -/// } -const LogTargetJson = struct { - match: ?[]LogMatcherJson = null, - keep: []const u8 = "all", - transform: ?TransformJson = null, - sample_key: ?LogSampleKeyJson = null, -}; - -/// JSON schema for metric target -/// Example: -/// "metric": { -/// "match": [{ "metric_field": "name", "regex": "^debug\\." }], -/// "keep": false -/// } -const MetricTargetJson = struct { - match: ?[]MetricMatcherJson = null, - keep: bool = true, -}; - -/// JSON schema for a trace matcher -/// Example: { "trace_field": "TRACE_FIELD_NAME", "regex": "^ping$" } -/// Example: { "span_attribute": "peer.service", "exists": true } -/// Example: { "span_attribute": ["http", "method"], "regex": "GET" } -/// Example: { "span_kind": "SPAN_KIND_INTERNAL", "exists": true } -const TraceMatcherJson = struct { - // Field selectors (one of these should be set) - trace_field: ?[]const u8 = null, // "TRACE_FIELD_NAME", "TRACE_FIELD_TRACE_ID", etc. - span_attribute: ?std.json.Value = null, // span attribute path - resource_attribute: ?std.json.Value = null, // resource attribute path - scope_attribute: ?std.json.Value = null, // scope attribute path - span_kind: ?[]const u8 = null, // "SPAN_KIND_INTERNAL", "SPAN_KIND_SERVER", etc. - span_status: ?[]const u8 = null, // "SPAN_STATUS_CODE_OK", "SPAN_STATUS_CODE_ERROR" - event_name: ?[]const u8 = null, // event name to match - event_attribute: ?std.json.Value = null, // event attribute path - link_trace_id: ?[]const u8 = null, // link trace_id matcher - - // Match type (one of these should be set) - regex: ?[]const u8 = null, - exact: ?[]const u8 = null, - exists: ?bool = null, - starts_with: ?[]const u8 = null, - ends_with: ?[]const u8 = null, - contains: ?[]const u8 = null, - - // Optional flags - negate: bool = false, - case_insensitive: bool = false, -}; - -/// JSON schema for trace sampling config -const TraceSamplingConfigJson = struct { - percentage: f32 = 100.0, - mode: ?[]const u8 = null, // "SAMPLING_MODE_HASH_SEED", "SAMPLING_MODE_PROPORTIONAL", etc. - sampling_precision: ?u32 = null, - hash_seed: ?u32 = null, - fail_closed: ?bool = null, -}; - -/// JSON schema for trace target -/// Example: -/// "trace": { -/// "match": [{ "trace_field": "TRACE_FIELD_NAME", "regex": "^ping$" }], -/// "keep": { "percentage": 50.0, "mode": "SAMPLING_MODE_HASH_SEED" } -/// } -const TraceTargetJson = struct { - match: ?[]TraceMatcherJson = null, - keep: ?TraceSamplingConfigJson = null, -}; - -/// JSON schema for a policy -/// Example: -/// { -/// "id": "drop-debug-metrics", -/// "name": "Drop debug metrics", -/// "metric": { ... } -/// } -/// or: -/// { -/// "id": "drop-health-checks", -/// "name": "Drop health check logs", -/// "log": { ... } -/// } -const PolicyJson = struct { - id: []const u8, - name: []const u8, - description: ?[]const u8 = null, - enabled: bool = true, - - // Target type (one of these should be set) - log: ?LogTargetJson = null, - metric: ?MetricTargetJson = null, - trace: ?TraceTargetJson = null, -}; - -/// JSON schema for a policies file -const PoliciesFileJson = struct { - policies: []PolicyJson, -}; - -// ============================================================================= -// Public API -// ============================================================================= - -/// Parse policies-only JSON file -pub fn parsePoliciesFile(allocator: std.mem.Allocator, path: []const u8) ![]Policy { - const file = try std.fs.cwd().openFile(path, .{}); - defer file.close(); - - const contents = try file.readToEndAlloc(allocator, 1024 * 1024); - defer allocator.free(contents); - - return parsePoliciesBytes(allocator, contents); -} - -/// Parse policies from JSON bytes -pub fn parsePoliciesBytes(allocator: std.mem.Allocator, json_bytes: []const u8) ![]Policy { - const parsed = try std.json.parseFromSlice( - PoliciesFileJson, - allocator, - json_bytes, - .{ .allocate = .alloc_always }, - ); - defer parsed.deinit(); - - const json_policies = parsed.value; - return parsePolicies(allocator, json_policies.policies); -} - -/// Parse policies from JSON array -pub fn parsePolicies(allocator: std.mem.Allocator, json_policies: []PolicyJson) ![]Policy { - var policies = try allocator.alloc(Policy, json_policies.len); - - for (json_policies, 0..) |json_policy, i| { - policies[i] = try parsePolicy(allocator, json_policy); - } - - return policies; -} - -/// Parse a single policy -fn parsePolicy(allocator: std.mem.Allocator, json_policy: PolicyJson) !Policy { - const id = try allocator.dupe(u8, json_policy.id); - const name = try allocator.dupe(u8, json_policy.name); - const description = if (json_policy.description) |desc| try allocator.dupe(u8, desc) else &.{}; - - // Determine target type - var target: ?Policy.target_union = null; - - if (json_policy.log) |log_json| { - target = .{ .log = try parseLogTarget(allocator, log_json) }; - } else if (json_policy.metric) |metric_json| { - target = .{ .metric = try parseMetricTarget(allocator, metric_json) }; - } else if (json_policy.trace) |trace_json| { - target = .{ .trace = try parseTraceTarget(allocator, trace_json) }; - } - - return Policy{ - .id = id, - .name = name, - .description = description, - .enabled = json_policy.enabled, - .target = target, - }; -} - -// ============================================================================= -// Log Target Parsing -// ============================================================================= - -fn parseLogTarget(allocator: std.mem.Allocator, json: LogTargetJson) !LogTarget { - var matchers = std.ArrayListUnmanaged(LogMatcher){}; - - if (json.match) |json_matchers| { - try matchers.ensureTotalCapacity(allocator, json_matchers.len); - for (json_matchers) |jm| { - const matcher = try parseLogMatcher(allocator, jm); - matchers.appendAssumeCapacity(matcher); - } - } - - var transform: ?LogTransform = null; - if (json.transform) |jt| { - transform = try parseLogTransform(allocator, jt); - } - - var sample_key: ?LogSampleKey = null; - if (json.sample_key) |sk| { - sample_key = try parseLogSampleKey(allocator, sk); - } - - return LogTarget{ - .match = matchers, - .keep = try allocator.dupe(u8, json.keep), - .transform = transform, - .sample_key = sample_key, - }; -} - -fn parseLogSampleKey(allocator: std.mem.Allocator, json: LogSampleKeyJson) !LogSampleKey { - const field: LogSampleKey.field_union = blk: { - if (json.log_field) |field_name| { - break :blk .{ .log_field = try parseLogFieldName(field_name) }; - } else if (json.log_attribute) |value| { - break :blk .{ .log_attribute = try parseAttributePath(allocator, value) }; - } else if (json.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (json.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingSampleKeyField; - } - }; - return LogSampleKey{ .field = field }; -} - -fn parseLogMatcher(allocator: std.mem.Allocator, jm: LogMatcherJson) !LogMatcher { - // Parse field - const field: LogMatcher.field_union = blk: { - if (jm.log_field) |field_name| { - break :blk .{ .log_field = try parseLogFieldName(field_name) }; - } else if (jm.log_attribute) |value| { - break :blk .{ .log_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - // Parse match - const match: LogMatcher.match_union = blk: { - if (jm.regex) |pattern| { - break :blk .{ .regex = try allocator.dupe(u8, pattern) }; - } else if (jm.exact) |pattern| { - break :blk .{ .exact = try allocator.dupe(u8, pattern) }; - } else if (jm.exists) |exists| { - break :blk .{ .exists = exists }; - } else if (jm.starts_with) |pattern| { - break :blk .{ .starts_with = try allocator.dupe(u8, pattern) }; - } else if (jm.ends_with) |pattern| { - break :blk .{ .ends_with = try allocator.dupe(u8, pattern) }; - } else if (jm.contains) |pattern| { - break :blk .{ .contains = try allocator.dupe(u8, pattern) }; - } else { - return error.MissingMatch; - } - }; - - return LogMatcher{ - .negate = jm.negate, - .case_insensitive = jm.case_insensitive, - .field = field, - .match = match, - }; -} - -fn parseLogFieldName(name: []const u8) !LogField { - if (std.mem.eql(u8, name, "body")) return .LOG_FIELD_BODY; - if (std.mem.eql(u8, name, "severity_text")) return .LOG_FIELD_SEVERITY_TEXT; - if (std.mem.eql(u8, name, "trace_id")) return .LOG_FIELD_TRACE_ID; - if (std.mem.eql(u8, name, "span_id")) return .LOG_FIELD_SPAN_ID; - if (std.mem.eql(u8, name, "event_name")) return .LOG_FIELD_EVENT_NAME; - if (std.mem.eql(u8, name, "resource_schema_url")) return .LOG_FIELD_RESOURCE_SCHEMA_URL; - if (std.mem.eql(u8, name, "scope_schema_url")) return .LOG_FIELD_SCOPE_SCHEMA_URL; - return error.InvalidLogField; -} - -fn parseLogTransform(allocator: std.mem.Allocator, jt: TransformJson) !LogTransform { - var transform = LogTransform{}; - - if (jt.remove) |removes| { - try transform.remove.ensureTotalCapacity(allocator, removes.len); - for (removes) |jr| { - const remove = try parseLogRemove(allocator, jr); - transform.remove.appendAssumeCapacity(remove); - } - } - - if (jt.redact) |redacts| { - try transform.redact.ensureTotalCapacity(allocator, redacts.len); - for (redacts) |jr| { - const redact = try parseLogRedact(allocator, jr); - transform.redact.appendAssumeCapacity(redact); - } - } - - if (jt.rename) |renames| { - try transform.rename.ensureTotalCapacity(allocator, renames.len); - for (renames) |jr| { - const rename = try parseLogRename(allocator, jr); - transform.rename.appendAssumeCapacity(rename); - } - } - - if (jt.add) |adds| { - try transform.add.ensureTotalCapacity(allocator, adds.len); - for (adds) |ja| { - const add = try parseLogAdd(allocator, ja); - transform.add.appendAssumeCapacity(add); - } - } - - return transform; -} - -fn parseLogRemove(allocator: std.mem.Allocator, jr: RemoveJson) !LogRemove { - const field: LogRemove.field_union = blk: { - if (jr.log_field) |field_name| { - break :blk .{ .log_field = try parseLogFieldName(field_name) }; - } else if (jr.log_attribute) |value| { - break :blk .{ .log_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - return LogRemove{ .field = field }; -} - -fn parseLogRedact(allocator: std.mem.Allocator, jr: RedactJson) !LogRedact { - const field: LogRedact.field_union = blk: { - if (jr.log_field) |field_name| { - break :blk .{ .log_field = try parseLogFieldName(field_name) }; - } else if (jr.log_attribute) |value| { - break :blk .{ .log_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - return LogRedact{ - .field = field, - .replacement = try allocator.dupe(u8, jr.replacement), - }; -} - -fn parseLogRename(allocator: std.mem.Allocator, jr: RenameJson) !LogRename { - const from: LogRename.from_union = blk: { - if (jr.from_log_field) |field_name| { - break :blk .{ .from_log_field = try parseLogFieldName(field_name) }; - } else if (jr.from_log_attribute) |value| { - break :blk .{ .from_log_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.from_resource_attribute) |value| { - break :blk .{ .from_resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jr.from_scope_attribute) |value| { - break :blk .{ .from_scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - return LogRename{ - .from = from, - .to = try allocator.dupe(u8, jr.to), - .upsert = jr.upsert, - }; -} - -fn parseLogAdd(allocator: std.mem.Allocator, ja: AddJson) !LogAdd { - const field: LogAdd.field_union = blk: { - if (ja.log_field) |field_name| { - break :blk .{ .log_field = try parseLogFieldName(field_name) }; - } else if (ja.log_attribute) |value| { - break :blk .{ .log_attribute = try parseAttributePath(allocator, value) }; - } else if (ja.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (ja.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - return LogAdd{ - .field = field, - .value = try allocator.dupe(u8, ja.value), - .upsert = ja.upsert, - }; -} - -// ============================================================================= -// Metric Target Parsing -// ============================================================================= - -fn parseMetricTarget(allocator: std.mem.Allocator, json: MetricTargetJson) !MetricTarget { - var matchers = std.ArrayListUnmanaged(MetricMatcher){}; - - if (json.match) |json_matchers| { - try matchers.ensureTotalCapacity(allocator, json_matchers.len); - for (json_matchers) |jm| { - const matcher = try parseMetricMatcher(allocator, jm); - matchers.appendAssumeCapacity(matcher); - } - } - - return MetricTarget{ - .match = matchers, - .keep = json.keep, - }; -} - -fn parseMetricMatcher(allocator: std.mem.Allocator, jm: MetricMatcherJson) !MetricMatcher { - // Parse field - const field: MetricMatcher.field_union = blk: { - if (jm.metric_field) |field_name| { - break :blk .{ .metric_field = try parseMetricFieldName(field_name) }; - } else if (jm.datapoint_attribute) |value| { - break :blk .{ .datapoint_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else { - return error.MissingField; - } - }; - - // Parse match - const match: MetricMatcher.match_union = blk: { - if (jm.regex) |pattern| { - break :blk .{ .regex = try allocator.dupe(u8, pattern) }; - } else if (jm.exact) |pattern| { - break :blk .{ .exact = try allocator.dupe(u8, pattern) }; - } else if (jm.exists) |exists| { - break :blk .{ .exists = exists }; - } else if (jm.starts_with) |pattern| { - break :blk .{ .starts_with = try allocator.dupe(u8, pattern) }; - } else if (jm.ends_with) |pattern| { - break :blk .{ .ends_with = try allocator.dupe(u8, pattern) }; - } else if (jm.contains) |pattern| { - break :blk .{ .contains = try allocator.dupe(u8, pattern) }; - } else { - return error.MissingMatch; - } - }; - - return MetricMatcher{ - .negate = jm.negate, - .case_insensitive = jm.case_insensitive, - .field = field, - .match = match, - }; -} - -fn parseMetricFieldName(name: []const u8) !MetricField { - if (std.mem.eql(u8, name, "name")) return .METRIC_FIELD_NAME; - if (std.mem.eql(u8, name, "description")) return .METRIC_FIELD_DESCRIPTION; - if (std.mem.eql(u8, name, "unit")) return .METRIC_FIELD_UNIT; - if (std.mem.eql(u8, name, "resource_schema_url")) return .METRIC_FIELD_RESOURCE_SCHEMA_URL; - if (std.mem.eql(u8, name, "scope_schema_url")) return .METRIC_FIELD_SCOPE_SCHEMA_URL; - if (std.mem.eql(u8, name, "scope_name")) return .METRIC_FIELD_SCOPE_NAME; - if (std.mem.eql(u8, name, "scope_version")) return .METRIC_FIELD_SCOPE_VERSION; - return error.InvalidMetricField; -} - -// ============================================================================= -// Trace Target Parsing -// ============================================================================= - -fn parseTraceTarget(allocator: std.mem.Allocator, json: TraceTargetJson) !TraceTarget { - var matchers = std.ArrayListUnmanaged(TraceMatcher){}; - - if (json.match) |json_matchers| { - try matchers.ensureTotalCapacity(allocator, json_matchers.len); - for (json_matchers) |jm| { - const matcher = try parseTraceMatcher(allocator, jm); - matchers.appendAssumeCapacity(matcher); - } - } - - var sampling_config: ?TraceSamplingConfig = null; - if (json.keep) |jk| { - sampling_config = try parseTraceSamplingConfig(jk); - } - - return TraceTarget{ - .match = matchers, - .keep = sampling_config, - }; -} - -fn parseTraceMatcher(allocator: std.mem.Allocator, jm: TraceMatcherJson) !TraceMatcher { - // Parse field - const field: TraceMatcher.field_union = blk: { - if (jm.trace_field) |field_name| { - break :blk .{ .trace_field = try parseTraceFieldName(field_name) }; - } else if (jm.span_attribute) |value| { - break :blk .{ .span_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.resource_attribute) |value| { - break :blk .{ .resource_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.scope_attribute) |value| { - break :blk .{ .scope_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.span_kind) |kind_name| { - break :blk .{ .span_kind = try parseSpanKind(kind_name) }; - } else if (jm.span_status) |status_name| { - break :blk .{ .span_status = try parseSpanStatusCode(status_name) }; - } else if (jm.event_name) |name| { - break :blk .{ .event_name = try allocator.dupe(u8, name) }; - } else if (jm.event_attribute) |value| { - break :blk .{ .event_attribute = try parseAttributePath(allocator, value) }; - } else if (jm.link_trace_id) |id| { - break :blk .{ .link_trace_id = try allocator.dupe(u8, id) }; - } else { - return error.MissingField; - } - }; - - // Parse match - const match: TraceMatcher.match_union = blk: { - if (jm.regex) |pattern| { - break :blk .{ .regex = try allocator.dupe(u8, pattern) }; - } else if (jm.exact) |pattern| { - break :blk .{ .exact = try allocator.dupe(u8, pattern) }; - } else if (jm.exists) |exists| { - break :blk .{ .exists = exists }; - } else if (jm.starts_with) |pattern| { - break :blk .{ .starts_with = try allocator.dupe(u8, pattern) }; - } else if (jm.ends_with) |pattern| { - break :blk .{ .ends_with = try allocator.dupe(u8, pattern) }; - } else if (jm.contains) |pattern| { - break :blk .{ .contains = try allocator.dupe(u8, pattern) }; - } else { - return error.MissingMatch; - } - }; - - return TraceMatcher{ - .negate = jm.negate, - .case_insensitive = jm.case_insensitive, - .field = field, - .match = match, - }; -} - -fn parseTraceFieldName(name: []const u8) !TraceField { - if (std.mem.eql(u8, name, "TRACE_FIELD_NAME")) return .TRACE_FIELD_NAME; - if (std.mem.eql(u8, name, "TRACE_FIELD_TRACE_ID")) return .TRACE_FIELD_TRACE_ID; - if (std.mem.eql(u8, name, "TRACE_FIELD_SPAN_ID")) return .TRACE_FIELD_SPAN_ID; - if (std.mem.eql(u8, name, "TRACE_FIELD_PARENT_SPAN_ID")) return .TRACE_FIELD_PARENT_SPAN_ID; - if (std.mem.eql(u8, name, "TRACE_FIELD_TRACE_STATE")) return .TRACE_FIELD_TRACE_STATE; - if (std.mem.eql(u8, name, "TRACE_FIELD_RESOURCE_SCHEMA_URL")) return .TRACE_FIELD_RESOURCE_SCHEMA_URL; - if (std.mem.eql(u8, name, "TRACE_FIELD_SCOPE_SCHEMA_URL")) return .TRACE_FIELD_SCOPE_SCHEMA_URL; - if (std.mem.eql(u8, name, "TRACE_FIELD_SCOPE_NAME")) return .TRACE_FIELD_SCOPE_NAME; - if (std.mem.eql(u8, name, "TRACE_FIELD_SCOPE_VERSION")) return .TRACE_FIELD_SCOPE_VERSION; - return error.InvalidTraceField; -} - -fn parseSpanKind(name: []const u8) !SpanKind { - if (std.mem.eql(u8, name, "SPAN_KIND_UNSPECIFIED")) return .SPAN_KIND_UNSPECIFIED; - if (std.mem.eql(u8, name, "SPAN_KIND_INTERNAL")) return .SPAN_KIND_INTERNAL; - if (std.mem.eql(u8, name, "SPAN_KIND_SERVER")) return .SPAN_KIND_SERVER; - if (std.mem.eql(u8, name, "SPAN_KIND_CLIENT")) return .SPAN_KIND_CLIENT; - if (std.mem.eql(u8, name, "SPAN_KIND_PRODUCER")) return .SPAN_KIND_PRODUCER; - if (std.mem.eql(u8, name, "SPAN_KIND_CONSUMER")) return .SPAN_KIND_CONSUMER; - return error.InvalidSpanKind; -} - -fn parseSpanStatusCode(name: []const u8) !SpanStatusCode { - if (std.mem.eql(u8, name, "SPAN_STATUS_CODE_UNSPECIFIED")) return .SPAN_STATUS_CODE_UNSPECIFIED; - if (std.mem.eql(u8, name, "SPAN_STATUS_CODE_OK")) return .SPAN_STATUS_CODE_OK; - if (std.mem.eql(u8, name, "SPAN_STATUS_CODE_ERROR")) return .SPAN_STATUS_CODE_ERROR; - return error.InvalidSpanStatusCode; -} - -fn parseSamplingMode(name: []const u8) !SamplingMode { - if (std.mem.eql(u8, name, "SAMPLING_MODE_UNSPECIFIED")) return .SAMPLING_MODE_UNSPECIFIED; - if (std.mem.eql(u8, name, "SAMPLING_MODE_HASH_SEED")) return .SAMPLING_MODE_HASH_SEED; - if (std.mem.eql(u8, name, "SAMPLING_MODE_PROPORTIONAL")) return .SAMPLING_MODE_PROPORTIONAL; - if (std.mem.eql(u8, name, "SAMPLING_MODE_EQUALIZING")) return .SAMPLING_MODE_EQUALIZING; - return error.InvalidSamplingMode; -} - -fn parseTraceSamplingConfig(jk: TraceSamplingConfigJson) !TraceSamplingConfig { - var config = TraceSamplingConfig{ - .percentage = jk.percentage, - }; - - if (jk.mode) |mode_name| { - config.mode = try parseSamplingMode(mode_name); - } - - config.sampling_precision = jk.sampling_precision; - config.hash_seed = jk.hash_seed; - config.fail_closed = jk.fail_closed; - - return config; -} - -// ============================================================================= -// Keep Value Parsing -// ============================================================================= - -/// Parse keep value - validates format -/// Valid formats: "all", "none", "N%", "N/s", "N/m" -pub fn parseKeepValue(s: []const u8) !void { - if (s.len == 0 or std.mem.eql(u8, s, "all") or std.mem.eql(u8, s, "none")) { - return; - } - // Check for percentage: "N%" - if (s.len >= 2 and s[s.len - 1] == '%') { - const num_str = s[0 .. s.len - 1]; - const pct = std.fmt.parseInt(u8, num_str, 10) catch return error.InvalidKeepValue; - if (pct > 100) return error.InvalidKeepValue; - return; - } - // Check for rate limit: "N/s" or "N/m" - if (s.len >= 3 and s[s.len - 2] == '/') { - const num_str = s[0 .. s.len - 2]; - _ = std.fmt.parseInt(u32, num_str, 10) catch return error.InvalidKeepValue; - if (s[s.len - 1] != 's' and s[s.len - 1] != 'm') { - return error.InvalidKeepValue; - } - return; - } - return error.InvalidKeepValue; -} - -// ============================================================================= -// Tests -// ============================================================================= - -test "parseKeepValue" { - try parseKeepValue("all"); - try parseKeepValue("none"); - try parseKeepValue(""); - try parseKeepValue("50%"); - try parseKeepValue("0%"); - try parseKeepValue("100%"); - try parseKeepValue("100/s"); - try parseKeepValue("1000/m"); - - try std.testing.expectError(error.InvalidKeepValue, parseKeepValue("101%")); - try std.testing.expectError(error.InvalidKeepValue, parseKeepValue("invalid")); - try std.testing.expectError(error.InvalidKeepValue, parseKeepValue("100/x")); -} - -test "parsePoliciesBytes: log policy with new format" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "drop-health-checks", - \\ "name": "Drop health check logs", - \\ "log": { - \\ "match": [ - \\ { "log_field": "body", "regex": "GET /health" } - \\ ], - \\ "keep": "none" - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - try std.testing.expectEqualStrings("drop-health-checks", policies[0].id); - try std.testing.expectEqualStrings("Drop health check logs", policies[0].name); - try std.testing.expect(policies[0].enabled); - - // Verify it's a log target - try std.testing.expect(policies[0].target != null); - try std.testing.expect(policies[0].target.? == .log); - - const log_target = policies[0].target.?.log; - try std.testing.expectEqualStrings("none", log_target.keep); - try std.testing.expectEqual(@as(usize, 1), log_target.match.items.len); - - const matcher = log_target.match.items[0]; - try std.testing.expect(matcher.field != null); - try std.testing.expect(matcher.field.? == .log_field); - try std.testing.expectEqual(LogField.LOG_FIELD_BODY, matcher.field.?.log_field); - try std.testing.expect(matcher.match != null); - try std.testing.expect(matcher.match.? == .regex); - try std.testing.expectEqualStrings("GET /health", matcher.match.?.regex); -} - -test "parsePoliciesBytes: metric policy with new format" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "drop-debug-metrics", - \\ "name": "Drop debug metrics", - \\ "metric": { - \\ "match": [ - \\ { "metric_field": "name", "regex": "^debug\\." } - \\ ], - \\ "keep": false - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - try std.testing.expectEqualStrings("drop-debug-metrics", policies[0].id); - try std.testing.expectEqualStrings("Drop debug metrics", policies[0].name); - try std.testing.expect(policies[0].enabled); - - // Verify it's a metric target - try std.testing.expect(policies[0].target != null); - try std.testing.expect(policies[0].target.? == .metric); - - const metric_target = policies[0].target.?.metric; - try std.testing.expectEqual(false, metric_target.keep); - try std.testing.expectEqual(@as(usize, 1), metric_target.match.items.len); - - const matcher = metric_target.match.items[0]; - try std.testing.expect(matcher.field != null); - try std.testing.expect(matcher.field.? == .metric_field); - try std.testing.expectEqual(MetricField.METRIC_FIELD_NAME, matcher.field.?.metric_field); - try std.testing.expect(matcher.match != null); - try std.testing.expect(matcher.match.? == .regex); - try std.testing.expectEqualStrings("^debug\\.", matcher.match.?.regex); -} - -test "parsePoliciesBytes: log policy with attribute matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "drop-dev-logs", - \\ "name": "Drop development logs", - \\ "log": { - \\ "match": [ - \\ { "log_attribute": "environment", "exact": "development" } - \\ ], - \\ "keep": "none" - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - try std.testing.expect(matcher.field.? == .log_attribute); - try std.testing.expectEqualStrings("environment", matcher.field.?.log_attribute.path.items[0]); - try std.testing.expect(matcher.match.? == .exact); - try std.testing.expectEqualStrings("development", matcher.match.?.exact); -} - -test "parsePoliciesBytes: metric policy with datapoint attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "drop-dev-metrics", - \\ "name": "Drop development metrics", - \\ "metric": { - \\ "match": [ - \\ { "datapoint_attribute": "env", "regex": "dev" } - \\ ], - \\ "keep": false - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const metric_target = policies[0].target.?.metric; - const matcher = metric_target.match.items[0]; - try std.testing.expect(matcher.field.? == .datapoint_attribute); - try std.testing.expectEqualStrings("env", matcher.field.?.datapoint_attribute.path.items[0]); - try std.testing.expect(matcher.match.? == .regex); - try std.testing.expectEqualStrings("dev", matcher.match.?.regex); -} - -test "parsePoliciesBytes: log policy with transform" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "redact-sensitive", - \\ "name": "Redact sensitive data", - \\ "log": { - \\ "match": [ - \\ { "log_field": "body", "regex": "password" } - \\ ], - \\ "keep": "all", - \\ "transform": { - \\ "redact": [ - \\ { "log_attribute": "password", "replacement": "***" } - \\ ], - \\ "remove": [ - \\ { "log_attribute": "secret_key" } - \\ ] - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - try std.testing.expectEqualStrings("all", log_target.keep); - try std.testing.expect(log_target.transform != null); - - const transform = log_target.transform.?; - try std.testing.expectEqual(@as(usize, 1), transform.redact.items.len); - try std.testing.expectEqual(@as(usize, 1), transform.remove.items.len); - - // Check redact - const redact = transform.redact.items[0]; - try std.testing.expect(redact.field.? == .log_attribute); - try std.testing.expectEqualStrings("password", redact.field.?.log_attribute.path.items[0]); - try std.testing.expectEqualStrings("***", redact.replacement); - - // Check remove - const remove = transform.remove.items[0]; - try std.testing.expect(remove.field.? == .log_attribute); - try std.testing.expectEqualStrings("secret_key", remove.field.?.log_attribute.path.items[0]); -} - -test "parsePoliciesBytes: mixed log and metric policies" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "log-policy", - \\ "name": "Log Policy", - \\ "log": { - \\ "match": [ - \\ { "log_field": "body", "regex": "error" } - \\ ], - \\ "keep": "none" - \\ } - \\ }, - \\ { - \\ "id": "metric-policy", - \\ "name": "Metric Policy", - \\ "metric": { - \\ "match": [ - \\ { "metric_field": "name", "regex": "test_.*" } - \\ ], - \\ "keep": true - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 2), policies.len); - - // First policy should be log - try std.testing.expectEqualStrings("log-policy", policies[0].id); - try std.testing.expect(policies[0].target != null); - try std.testing.expect(policies[0].target.? == .log); - - // Second policy should be metric - try std.testing.expectEqualStrings("metric-policy", policies[1].id); - try std.testing.expect(policies[1].target != null); - try std.testing.expect(policies[1].target.? == .metric); -} - -test "parsePoliciesBytes: negated matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "keep-non-debug", - \\ "name": "Keep non-debug logs", - \\ "log": { - \\ "match": [ - \\ { "log_field": "severity_text", "regex": "DEBUG", "negate": true } - \\ ], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - try std.testing.expect(matcher.negate); - try std.testing.expect(matcher.field.? == .log_field); - try std.testing.expectEqual(LogField.LOG_FIELD_SEVERITY_TEXT, matcher.field.?.log_field); -} - -test "parsePoliciesBytes: exists matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "require-trace", - \\ "name": "Require trace ID", - \\ "log": { - \\ "match": [ - \\ { "log_attribute": "trace_id", "exists": true } - \\ ], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - try std.testing.expect(matcher.field.? == .log_attribute); - try std.testing.expectEqualStrings("trace_id", matcher.field.?.log_attribute.path.items[0]); - try std.testing.expect(matcher.match.? == .exists); - try std.testing.expectEqual(true, matcher.match.?.exists); -} - -test "parsePoliciesBytes: disabled policy" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "disabled-policy", - \\ "name": "Disabled Policy", - \\ "enabled": false, - \\ "log": { - \\ "match": [ - \\ { "log_field": "body", "regex": "test" } - \\ ], - \\ "keep": "none" - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - try std.testing.expect(!policies[0].enabled); -} - -test "parsePoliciesBytes: trace policy with span name matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "sample-ping-spans", - \\ "name": "Sample ping spans at 50%", - \\ "trace": { - \\ "match": [ - \\ { "trace_field": "TRACE_FIELD_NAME", "regex": "^ping$" } - \\ ], - \\ "keep": { - \\ "percentage": 50.0, - \\ "mode": "SAMPLING_MODE_HASH_SEED", - \\ "sampling_precision": 4 - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - try std.testing.expectEqualStrings("sample-ping-spans", policies[0].id); - - // Verify it's a trace target - try std.testing.expect(policies[0].target != null); - try std.testing.expect(policies[0].target.? == .trace); - - const trace_target = policies[0].target.?.trace; - try std.testing.expectEqual(@as(usize, 1), trace_target.match.items.len); - - // Check matcher - const matcher = trace_target.match.items[0]; - try std.testing.expect(matcher.field != null); - try std.testing.expect(matcher.field.? == .trace_field); - try std.testing.expectEqual(TraceField.TRACE_FIELD_NAME, matcher.field.?.trace_field); - try std.testing.expect(matcher.match != null); - try std.testing.expect(matcher.match.? == .regex); - try std.testing.expectEqualStrings("^ping$", matcher.match.?.regex); - - // Check sampling config - try std.testing.expect(trace_target.keep != null); - const sampling = trace_target.keep.?; - try std.testing.expectEqual(@as(f32, 50.0), sampling.percentage); - try std.testing.expect(sampling.mode != null); - try std.testing.expectEqual(SamplingMode.SAMPLING_MODE_HASH_SEED, sampling.mode.?); - try std.testing.expect(sampling.sampling_precision != null); - try std.testing.expectEqual(@as(u32, 4), sampling.sampling_precision.?); -} - -test "parsePoliciesBytes: trace policy with span kind matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "sample-internal-spans", - \\ "name": "Sample internal spans", - \\ "trace": { - \\ "match": [ - \\ { "span_kind": "SPAN_KIND_INTERNAL", "exists": true } - \\ ], - \\ "keep": { - \\ "percentage": 75.0, - \\ "mode": "SAMPLING_MODE_EQUALIZING" - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const trace_target = policies[0].target.?.trace; - const matcher = trace_target.match.items[0]; - try std.testing.expect(matcher.field.? == .span_kind); - try std.testing.expectEqual(SpanKind.SPAN_KIND_INTERNAL, matcher.field.?.span_kind); - try std.testing.expect(matcher.match.? == .exists); - try std.testing.expectEqual(true, matcher.match.?.exists); - - const sampling = trace_target.keep.?; - try std.testing.expectEqual(@as(f32, 75.0), sampling.percentage); - try std.testing.expectEqual(SamplingMode.SAMPLING_MODE_EQUALIZING, sampling.mode.?); -} - -test "parsePoliciesBytes: trace policy with span attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "sample-peer-service", - \\ "name": "Sample spans with peer.service", - \\ "trace": { - \\ "match": [ - \\ { "span_attribute": "peer.service", "exists": true } - \\ ], - \\ "keep": { - \\ "percentage": 10.0, - \\ "hash_seed": 12345 - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const trace_target = policies[0].target.?.trace; - const matcher = trace_target.match.items[0]; - try std.testing.expect(matcher.field.? == .span_attribute); - try std.testing.expectEqualStrings("peer.service", matcher.field.?.span_attribute.path.items[0]); - - const sampling = trace_target.keep.?; - try std.testing.expectEqual(@as(f32, 10.0), sampling.percentage); - try std.testing.expect(sampling.hash_seed != null); - try std.testing.expectEqual(@as(u32, 12345), sampling.hash_seed.?); -} - -test "parsePoliciesBytes: trace policy with resource attribute and exact match" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "sample-test-service", - \\ "name": "Sample test-service spans", - \\ "trace": { - \\ "match": [ - \\ { "resource_attribute": "service.name", "exact": "test-service" } - \\ ], - \\ "keep": { - \\ "percentage": 25.0, - \\ "mode": "SAMPLING_MODE_PROPORTIONAL", - \\ "sampling_precision": 6 - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const trace_target = policies[0].target.?.trace; - const matcher = trace_target.match.items[0]; - try std.testing.expect(matcher.field.? == .resource_attribute); - try std.testing.expectEqualStrings("service.name", matcher.field.?.resource_attribute.path.items[0]); - try std.testing.expect(matcher.match.? == .exact); - try std.testing.expectEqualStrings("test-service", matcher.match.?.exact); - - const sampling = trace_target.keep.?; - try std.testing.expectEqual(@as(f32, 25.0), sampling.percentage); - try std.testing.expectEqual(SamplingMode.SAMPLING_MODE_PROPORTIONAL, sampling.mode.?); - try std.testing.expectEqual(@as(u32, 6), sampling.sampling_precision.?); -} - -test "parsePoliciesBytes: trace policy with span status matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [ - \\ { - \\ "id": "keep-error-spans", - \\ "name": "Keep all error spans", - \\ "trace": { - \\ "match": [ - \\ { "span_status": "SPAN_STATUS_CODE_ERROR", "exists": true } - \\ ], - \\ "keep": { - \\ "percentage": 100.0 - \\ } - \\ } - \\ } - \\ ] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const trace_target = policies[0].target.?.trace; - const matcher = trace_target.match.items[0]; - try std.testing.expect(matcher.field.? == .span_status); - try std.testing.expectEqual(SpanStatusCode.SPAN_STATUS_CODE_ERROR, matcher.field.?.span_status); - - const sampling = trace_target.keep.?; - try std.testing.expectEqual(@as(f32, 100.0), sampling.percentage); -} - -// ============================================================================= -// Tests for AttributePath parsing formats -// ============================================================================= - -test "parseAttributePath: string shorthand" { - const allocator = std.testing.allocator; - - // Parse JSON value representing a string - const json_str = - \\"service" - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - var attr_path = try parseAttributePath(allocator, parsed.value); - defer { - for (attr_path.path.items) |segment| { - allocator.free(segment); - } - attr_path.path.deinit(allocator); - } - - try std.testing.expectEqual(@as(usize, 1), attr_path.path.items.len); - try std.testing.expectEqualStrings("service", attr_path.path.items[0]); -} - -test "parseAttributePath: array shorthand" { - const allocator = std.testing.allocator; - - // Parse JSON value representing an array - const json_str = - \\["http", "method"] - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - var attr_path = try parseAttributePath(allocator, parsed.value); - defer { - for (attr_path.path.items) |segment| { - allocator.free(segment); - } - attr_path.path.deinit(allocator); - } - - try std.testing.expectEqual(@as(usize, 2), attr_path.path.items.len); - try std.testing.expectEqualStrings("http", attr_path.path.items[0]); - try std.testing.expectEqualStrings("method", attr_path.path.items[1]); -} - -test "parseAttributePath: canonical object format" { - const allocator = std.testing.allocator; - - // Parse JSON value representing canonical format - const json_str = - \\{"path": ["request", "headers", "content-type"]} - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - var attr_path = try parseAttributePath(allocator, parsed.value); - defer { - for (attr_path.path.items) |segment| { - allocator.free(segment); - } - attr_path.path.deinit(allocator); - } - - try std.testing.expectEqual(@as(usize, 3), attr_path.path.items.len); - try std.testing.expectEqualStrings("request", attr_path.path.items[0]); - try std.testing.expectEqualStrings("headers", attr_path.path.items[1]); - try std.testing.expectEqualStrings("content-type", attr_path.path.items[2]); -} - -test "parseAttributePath: empty array returns error" { - const allocator = std.testing.allocator; - - const json_str = - \\[] - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - const result = parseAttributePath(allocator, parsed.value); - try std.testing.expectError(error.EmptyAttributePath, result); -} - -test "parseAttributePath: invalid type returns error" { - const allocator = std.testing.allocator; - - const json_str = - \\123 - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - const result = parseAttributePath(allocator, parsed.value); - try std.testing.expectError(error.InvalidAttributePath, result); -} - -test "parseAttributePath: array with non-string element returns error" { - const allocator = std.testing.allocator; - - const json_str = - \\["http", 123, "method"] - ; - const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); - defer parsed.deinit(); - - const result = parseAttributePath(allocator, parsed.value); - try std.testing.expectError(error.InvalidAttributePath, result); -} - -test "parsePoliciesBytes: log policy with array attribute path" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-get-requests", - \\ "name": "Drop GET requests", - \\ "log": { - \\ "match": [{ - \\ "log_attribute": ["http", "method"], - \\ "regex": "GET" - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - // Verify the path has two segments - const path = matcher.field.?.log_attribute.path.items; - try std.testing.expectEqual(@as(usize, 2), path.len); - try std.testing.expectEqualStrings("http", path[0]); - try std.testing.expectEqualStrings("method", path[1]); -} - -test "parsePoliciesBytes: log policy with canonical attribute path" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-json-content", - \\ "name": "Drop JSON content type", - \\ "log": { - \\ "match": [{ - \\ "log_attribute": {"path": ["request", "headers", "content-type"]}, - \\ "regex": "application/json" - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - // Verify the path has three segments - const path = matcher.field.?.log_attribute.path.items; - try std.testing.expectEqual(@as(usize, 3), path.len); - try std.testing.expectEqualStrings("request", path[0]); - try std.testing.expectEqualStrings("headers", path[1]); - try std.testing.expectEqualStrings("content-type", path[2]); -} - -test "parsePoliciesBytes: metric policy with nested datapoint attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "filter-by-nested-tag", - \\ "name": "Filter by nested tag", - \\ "metric": { - \\ "match": [{ - \\ "datapoint_attribute": ["tags", "env"], - \\ "exact": "production" - \\ }], - \\ "keep": false - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const metric_target = policies[0].target.?.metric; - const matcher = metric_target.match.items[0]; - - // Verify the path has two segments - const path = matcher.field.?.datapoint_attribute.path.items; - try std.testing.expectEqual(@as(usize, 2), path.len); - try std.testing.expectEqualStrings("tags", path[0]); - try std.testing.expectEqualStrings("env", path[1]); -} - -test "parsePoliciesBytes: trace policy with nested span attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-http-status", - \\ "name": "Sample by HTTP status", - \\ "trace": { - \\ "match": [{ - \\ "span_attribute": ["http", "response", "status_code"], - \\ "regex": "5[0-9]{2}" - \\ }], - \\ "keep": {"percentage": 100.0} - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const trace_target = policies[0].target.?.trace; - const matcher = trace_target.match.items[0]; - - // Verify the path has three segments - const path = matcher.field.?.span_attribute.path.items; - try std.testing.expectEqual(@as(usize, 3), path.len); - try std.testing.expectEqualStrings("http", path[0]); - try std.testing.expectEqualStrings("response", path[1]); - try std.testing.expectEqualStrings("status_code", path[2]); -} - -// ============================================================================= -// Tests for optimized literal matchers and case_insensitive -// ============================================================================= - -test "parsePoliciesBytes: log policy with starts_with matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-error-logs", - \\ "name": "Drop error logs", - \\ "log": { - \\ "match": [{ - \\ "log_field": "body", - \\ "starts_with": "ERROR:" - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - try std.testing.expect(matcher.match.? == .starts_with); - try std.testing.expectEqualStrings("ERROR:", matcher.match.?.starts_with); - try std.testing.expect(!matcher.case_insensitive); -} - -test "parsePoliciesBytes: log policy with ends_with matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-json-logs", - \\ "name": "Drop JSON logs", - \\ "log": { - \\ "match": [{ - \\ "log_field": "body", - \\ "ends_with": ".json" - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - try std.testing.expect(matcher.match.? == .ends_with); - try std.testing.expectEqualStrings(".json", matcher.match.?.ends_with); -} - -test "parsePoliciesBytes: log policy with contains matcher" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-secret-logs", - \\ "name": "Drop secret logs", - \\ "log": { - \\ "match": [{ - \\ "log_field": "body", - \\ "contains": "password" - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - try std.testing.expect(matcher.match.? == .contains); - try std.testing.expectEqualStrings("password", matcher.match.?.contains); -} - -test "parsePoliciesBytes: log policy with case_insensitive flag" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "drop-error-logs-ci", - \\ "name": "Drop error logs case insensitive", - \\ "log": { - \\ "match": [{ - \\ "log_field": "body", - \\ "starts_with": "error", - \\ "case_insensitive": true - \\ }], - \\ "keep": "none" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - const matcher = log_target.match.items[0]; - - try std.testing.expect(matcher.match.? == .starts_with); - try std.testing.expectEqualStrings("error", matcher.match.?.starts_with); - try std.testing.expect(matcher.case_insensitive); -} - -test "parsePoliciesBytes: metric policy with contains and case_insensitive" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "filter-debug-metrics", - \\ "name": "Filter debug metrics", - \\ "metric": { - \\ "match": [{ - \\ "metric_field": "name", - \\ "contains": "debug", - \\ "case_insensitive": true - \\ }], - \\ "keep": false - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| { - p.deinit(allocator); - } - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const metric_target = policies[0].target.?.metric; - const matcher = metric_target.match.items[0]; - - try std.testing.expect(matcher.match.? == .contains); - try std.testing.expectEqualStrings("debug", matcher.match.?.contains); - try std.testing.expect(matcher.case_insensitive); -} - -// ============================================================================= -// Tests for sample_key parsing -// ============================================================================= - -test "parsePoliciesBytes: log policy with sample_key log_field" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-body", - \\ "name": "Sample by body", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%", - \\ "sample_key": { "log_field": "body" } - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - try std.testing.expectEqual(@as(usize, 1), policies.len); - - const log_target = policies[0].target.?.log; - try std.testing.expect(log_target.sample_key != null); - - const sample_key = log_target.sample_key.?; - try std.testing.expect(sample_key.field != null); - try std.testing.expect(sample_key.field.? == .log_field); - try std.testing.expectEqual(LogField.LOG_FIELD_BODY, sample_key.field.?.log_field); -} - -test "parsePoliciesBytes: log policy with sample_key log_attribute string" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-trace", - \\ "name": "Sample by trace_id", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%", - \\ "sample_key": { "log_attribute": "trace_id" } - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - const log_target = policies[0].target.?.log; - try std.testing.expect(log_target.sample_key != null); - - const sample_key = log_target.sample_key.?; - try std.testing.expect(sample_key.field.? == .log_attribute); - try std.testing.expectEqual(@as(usize, 1), sample_key.field.?.log_attribute.path.items.len); - try std.testing.expectEqualStrings("trace_id", sample_key.field.?.log_attribute.path.items[0]); -} - -test "parsePoliciesBytes: log policy with sample_key log_attribute array" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-request-id", - \\ "name": "Sample by request.id", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%", - \\ "sample_key": { "log_attribute": ["request", "id"] } - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - const log_target = policies[0].target.?.log; - const sample_key = log_target.sample_key.?; - - try std.testing.expect(sample_key.field.? == .log_attribute); - try std.testing.expectEqual(@as(usize, 2), sample_key.field.?.log_attribute.path.items.len); - try std.testing.expectEqualStrings("request", sample_key.field.?.log_attribute.path.items[0]); - try std.testing.expectEqualStrings("id", sample_key.field.?.log_attribute.path.items[1]); -} - -test "parsePoliciesBytes: log policy with sample_key resource_attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-host", - \\ "name": "Sample by host", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%", - \\ "sample_key": { "resource_attribute": "host.name" } - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - const log_target = policies[0].target.?.log; - const sample_key = log_target.sample_key.?; - - try std.testing.expect(sample_key.field.? == .resource_attribute); - try std.testing.expectEqualStrings("host.name", sample_key.field.?.resource_attribute.path.items[0]); -} - -test "parsePoliciesBytes: log policy with sample_key scope_attribute" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "sample-by-scope", - \\ "name": "Sample by scope name", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%", - \\ "sample_key": { "scope_attribute": "name" } - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - const log_target = policies[0].target.?.log; - const sample_key = log_target.sample_key.?; - - try std.testing.expect(sample_key.field.? == .scope_attribute); - try std.testing.expectEqualStrings("name", sample_key.field.?.scope_attribute.path.items[0]); -} - -test "parsePoliciesBytes: log policy without sample_key" { - const allocator = std.testing.allocator; - - const json = - \\{ - \\ "policies": [{ - \\ "id": "no-sample-key", - \\ "name": "No sample key", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": ".*" }], - \\ "keep": "50%" - \\ } - \\ }] - \\} - ; - - const policies = try parsePoliciesBytes(allocator, json); - defer { - for (policies) |*p| p.deinit(allocator); - allocator.free(policies); - } - - const log_target = policies[0].target.?.log; - try std.testing.expect(log_target.sample_key == null); -} diff --git a/src/policy/policy_engine.zig b/src/policy/policy_engine.zig deleted file mode 100644 index 7d67928..0000000 --- a/src/policy/policy_engine.zig +++ /dev/null @@ -1,3700 +0,0 @@ -//! Policy Engine - Hyperscan-based policy evaluation -//! -//! This module provides efficient policy evaluation using an inverted index -//! of Hyperscan databases. Instead of iterating through policies and checking -//! matchers, we: -//! -//! 1. Iterate through matcher keys (MatchCase, attribute_key) -//! 2. Scan field values against pre-compiled Hyperscan databases -//! 3. Aggregate match counts per policy using O(1) array operations -//! 4. Select the highest priority policy where all matchers matched -//! -//! ## Policy Stages -//! -//! Policies now contain both filter and transform stages: -//! 1. **Filter Stage**: Determines keep/drop based on `keep` field -//! 2. **Transform Stage**: Applies modifications (redact, remove, rename, add) -//! -//! The engine evaluates the filter stage first. If the decision is to drop, -//! evaluation stops early. Otherwise, matched policies are returned for -//! transform processing. -//! -//! ## Performance Characteristics -//! -//! - O(k * n) where k = number of unique matcher keys, n = input text length -//! - O(1) per-pattern match aggregation using numeric policy indices -//! - Independent of the number of policies or patterns per key -//! - Lock-free reads from atomic snapshot pointer - -const std = @import("std"); -const proto = @import("proto"); -const matcher_index = @import("./matcher_index.zig"); -const policy_mod = @import("./root.zig"); -const policy_types = @import("./types.zig"); -const log_transform = @import("./log_transform.zig"); -const sampler_mod = @import("./sampler.zig"); -const rate_limiter_mod = @import("./rate_limiter.zig"); - -const o11y = @import("../observability/root.zig"); -const NoopEventBus = o11y.NoopEventBus; -const EventBus = o11y.EventBus; - -const LogMatcher = proto.policy.LogMatcher; - -const KeepValue = matcher_index.KeepValue; -const PolicyIndex = matcher_index.PolicyIndex; -const PolicyInfo = matcher_index.PolicyInfo; -const MAX_POLICIES = matcher_index.MAX_POLICIES; -const Sampler = sampler_mod.Sampler; -const RateLimiter = rate_limiter_mod.RateLimiter; - -const MatcherDatabase = matcher_index.MatcherDatabase; -pub const PolicyRegistry = policy_mod.Registry; -pub const PolicySnapshot = policy_mod.Snapshot; - -// Re-export types for callers -pub const FieldRef = policy_types.FieldRef; -pub const MetricFieldRef = policy_types.MetricFieldRef; -pub const LogFieldAccessor = policy_types.LogFieldAccessor; -pub const LogFieldMutator = policy_types.LogFieldMutator; -pub const MetricFieldAccessor = policy_types.MetricFieldAccessor; -pub const MetricFieldMutator = policy_types.MetricFieldMutator; -pub const TraceFieldRef = policy_types.TraceFieldRef; -pub const TraceFieldAccessor = policy_types.TraceFieldAccessor; -pub const TraceFieldMutator = policy_types.TraceFieldMutator; -pub const MutateOp = policy_types.MutateOp; -pub const MetricMutateOp = policy_types.MetricMutateOp; -pub const TraceMutateOp = policy_types.TraceMutateOp; -pub const TelemetryType = policy_types.TelemetryType; - -// Proto types -const Policy = proto.policy.Policy; -const LogTarget = proto.policy.LogTarget; -const MetricTarget = proto.policy.MetricTarget; -const TraceTarget = proto.policy.TraceTarget; - -/// Maximum number of pattern matches to track per scan -pub const MAX_MATCHES_PER_SCAN: usize = 256; - -/// Helper to extract the log target from a policy (handles target union) -pub fn getLogTarget(policy: *const Policy) ?*const LogTarget { - const target_ptr = &(policy.target orelse return null); - return switch (target_ptr.*) { - .log => |*log| log, - .metric, .trace => null, - }; -} - -/// Helper to extract the metric target from a policy (handles target union) -pub fn getMetricTarget(policy: *const Policy) ?*const MetricTarget { - const target_ptr = &(policy.target orelse return null); - return switch (target_ptr.*) { - .metric => |*metric| metric, - .log, .trace => null, - }; -} - -/// Helper to extract the trace target from a policy (handles target union) -pub fn getTraceTarget(policy: *const Policy) ?*const TraceTarget { - const target_ptr = &(policy.target orelse return null); - return switch (target_ptr.*) { - .trace => |*trace| trace, - .log, .metric => null, - }; -} - -// ============================================================================= -// FilterDecision - Result of filter stage evaluation -// ============================================================================= - -/// Decision from the filter stage of policy evaluation -pub const FilterDecision = enum { - /// Keep the telemetry (explicitly matched a keep policy) - keep, - /// Drop the telemetry (matched a drop policy) - drop, - /// No policy matched - default behavior (keep) - unset, - - /// Returns true if telemetry should continue to next stage - pub fn shouldContinue(self: FilterDecision) bool { - return self != .drop; - } -}; - -// ============================================================================= -// PolicyResult - Complete evaluation result -// ============================================================================= - -/// Result of policy evaluation containing filter decision and matched policies -pub const PolicyResult = struct { - /// The filter decision (keep/drop/unset) - decision: FilterDecision, - /// IDs of policies that matched (for transform stage lookup) - /// Only populated when decision is keep or unset - matched_policy_ids: []const []const u8, - /// Whether any transformations were applied to the telemetry - /// Callers should use this to determine if re-encoding is needed - was_transformed: bool = false, - - /// Empty result for dropped telemetry - pub const dropped = PolicyResult{ - .decision = .drop, - .matched_policy_ids = &.{}, - .was_transformed = false, - }; - - /// Default result when no policies match - pub const unmatched = PolicyResult{ - .decision = .unset, - .matched_policy_ids = &.{}, - .was_transformed = false, - }; -}; - -// ============================================================================= -// Comptime Type Helpers - Select types based on telemetry type -// ============================================================================= - -/// Returns the field reference type for the given telemetry type -fn FieldRefType(comptime T: TelemetryType) type { - return switch (T) { - .log => FieldRef, - .metric => MetricFieldRef, - .trace => TraceFieldRef, - }; -} - -/// Returns the field accessor function type for the given telemetry type -fn FieldAccessorType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogFieldAccessor, - .metric => MetricFieldAccessor, - .trace => TraceFieldAccessor, - }; -} - -/// Returns the field mutator function type for the given telemetry type -fn FieldMutatorType(comptime T: TelemetryType) type { - return switch (T) { - .log => LogFieldMutator, - .metric => MetricFieldMutator, - .trace => TraceFieldMutator, - }; -} - -// ============================================================================= -// Observability Events -// ============================================================================= - -const EvaluateEmpty = struct {}; -const EvaluateStart = struct { matcher_key_count: usize, policy_count: usize }; -const MatcherKeyFieldNotPresent = struct { - telemetry_type: TelemetryType, - field: MatcherFieldRef, -}; -const MatcherKeyFieldValue = struct { - telemetry_type: TelemetryType, - field: MatcherFieldRef, - value: []const u8, -}; -const MatcherFieldRef = union(TelemetryType) { - log: FieldRef, - metric: MetricFieldRef, - trace: TraceFieldRef, -}; -const MatcherKeyNoDatabase = struct {}; -const ScanResult = struct { positive_count: usize, negated_count: usize }; -const PolicyFullMatch = struct { policy_index: PolicyIndex, policy_id: []const u8 }; -const PolicyNegationFailed = struct { policy_index: PolicyIndex }; -const EvaluateResult = struct { decision: FilterDecision, matched_count: usize }; -const TransformApplied = struct { - policy_id: []const u8, - removes: usize, - redacts: usize, - renames: usize, - adds: usize, -}; - -// ============================================================================= -// PolicyEngine - Main evaluation engine -// ============================================================================= - -/// Policy engine that evaluates telemetry against policies using Hyperscan. -/// Uses an inverted index for O(k*n) evaluation regardless of policy count. -/// -/// The engine runs two stages: -/// 1. Filter stage: Determines keep/drop decision -/// 2. Transform stage: Returns matched policies for modification (caller handles) -pub const PolicyEngine = struct { - /// Event bus for observability - bus: *EventBus, - /// Policy registry for getting snapshots and recording stats/errors - registry: *PolicyRegistry, - - const Self = @This(); - - pub fn init(bus: *EventBus, registry: *PolicyRegistry) Self { - return .{ - .bus = bus, - .registry = registry, - }; - } - - /// Evaluate telemetry against all policies in the current snapshot. - /// - /// Returns a PolicyResult containing: - /// - The filter decision (keep/drop/unset) - /// - List of matched policy IDs (for transform stage lookup) - /// - /// A policy "fully matches" when: - /// - All positive matchers have their patterns found in the field value - /// - No negated matchers have their patterns found in the field value - /// - /// Thread-safe: uses only stack-allocated buffers for match aggregation. - /// Automatically gets the current snapshot from the registry. - /// - /// The `policy_id_buf` parameter is used to store matched policy IDs. - /// Caller provides the buffer to avoid allocation. - /// - /// If `field_mutator` is provided, transforms from matched policies will be applied. - /// Pass null to skip transform application. - /// - /// If `byte_counter` is provided, bytes before/after each transform will be measured - /// and recorded in policy stats. Pass null to skip byte tracking. - /// - /// Full policy evaluation with filter decision, matched policy IDs, and optional transforms. - /// Returns PolicyResult containing the filter decision and list of matched policy IDs. - /// If field_mutator is provided, transforms are applied to matched policies. - /// Result of scanning all matcher keys against field values - const ScanState = struct { - match_counts: [MAX_MATCHES_PER_SCAN]u16, - active_policies: [MAX_MATCHES_PER_SCAN]PolicyIndex, - is_active: [MAX_MATCHES_PER_SCAN]bool, - active_count: usize, - }; - - /// Result of finding matching policies from scan state - const MatchState = struct { - matched_indices: [MAX_MATCHES_PER_SCAN]PolicyIndex, - matched_policies: [MAX_MATCHES_PER_SCAN]PolicyInfo, - matched_decisions: [MAX_MATCHES_PER_SCAN]FilterDecision, - matched_count: usize, - decision: FilterDecision, - }; - - pub fn evaluate( - self: *const Self, - comptime T: TelemetryType, - ctx: *anyopaque, - field_accessor: FieldAccessorType(T), - field_mutator: ?FieldMutatorType(T), - policy_id_buf: [][]const u8, - ) PolicyResult { - // Get current snapshot from registry (lock-free) - const snapshot = self.registry.getSnapshot() orelse { - self.bus.debug(EvaluateEmpty{}); - return PolicyResult.unmatched; - }; - - // Select the appropriate index based on telemetry type (compile-time dispatch) - const index = switch (T) { - .log => &snapshot.log_index, - .metric => &snapshot.metric_index, - .trace => &snapshot.trace_index, - }; - - if (index.isEmpty()) { - self.bus.debug(EvaluateEmpty{}); - return PolicyResult.unmatched; - } - - self.bus.debug(EvaluateStart{ .matcher_key_count = index.getMatcherKeys().len, .policy_count = index.getPolicyCount() }); - - // Phase 1: Scan all matcher keys and compute match counts - var scan_state = self.scanMatcherKeys(T, ctx, field_accessor, index); - - // Phase 2: Find matching policies and determine decision - const match_state = self.findMatchingPolicies(T, ctx, field_accessor, index, &scan_state, policy_id_buf); - - self.bus.debug(EvaluateResult{ .decision = match_state.decision, .matched_count = match_state.matched_count }); - - // Record hit/miss stats using lock-free atomics - self.recordMatchedPolicyStats(snapshot, &match_state); - - if (match_state.decision == .drop) { - return PolicyResult.dropped; - } - - // Phase 3: Apply transforms (log only) and record stats - var was_transformed = false; - if (T == .log) { - if (field_mutator) |mutator| { - for (0..match_state.matched_count) |i| { - const policy_index = match_state.matched_indices[i]; - const result = self.applyLogTransforms( - ctx, - field_accessor, - mutator, - snapshot, - policy_index, - policy_id_buf[i], - ); - if (result.totalApplied() > 0) { - was_transformed = true; - // Record transform stats using lock-free atomics - if (snapshot.getStats(policy_index)) |stats| { - stats.addTransform(@intCast(result.totalApplied())); - } - } - } - } - } - - return PolicyResult{ - .decision = match_state.decision, - .matched_policy_ids = policy_id_buf[0..match_state.matched_count], - .was_transformed = was_transformed, - }; - } - - /// Scan all matcher keys and compute match counts for each policy. - /// Returns state needed for determining which policies matched. - inline fn scanMatcherKeys( - self: *const Self, - comptime T: TelemetryType, - ctx: *anyopaque, - field_accessor: FieldAccessorType(T), - index: *const matcher_index.MatcherIndexType(T), - ) ScanState { - var state = ScanState{ - .match_counts = undefined, - .active_policies = undefined, - .is_active = undefined, - .active_count = 0, - }; - @memset(&state.match_counts, 0); - @memset(&state.is_active, false); - - // Initialize match counts for policies with negated patterns - // No telemetry type filtering needed - index only contains policies of type T - for (index.getPoliciesWithNegation()) |policy_index| { - const policy_info = index.getPolicyByIndex(policy_index) orelse continue; - state.match_counts[policy_index] = policy_info.negated_count; - state.is_active[policy_index] = true; - state.active_policies[state.active_count] = policy_index; - state.active_count += 1; - } - - var result_buf: [MAX_MATCHES_PER_SCAN]u32 = undefined; - - // Iterate type-specific matcher keys - no runtime type filtering needed - for (index.getMatcherKeys()) |matcher_key| { - const field_ref = matcher_key.field; - - const value = field_accessor(ctx, field_ref) orelse { - self.bus.debug(MatcherKeyFieldNotPresent{ - .telemetry_type = T, - .field = switch (T) { - .log => .{ .log = field_ref }, - .metric => .{ .metric = field_ref }, - .trace => .{ .trace = field_ref }, - }, - }); - continue; - }; - - self.bus.debug(MatcherKeyFieldValue{ - .telemetry_type = T, - .field = switch (T) { - .log => .{ .log = field_ref }, - .metric => .{ .metric = field_ref }, - .trace => .{ .trace = field_ref }, - }, - .value = if (value.len > 100) value[0..100] else value, - }); - - const db = index.getDatabase(matcher_key) orelse { - self.bus.debug(MatcherKeyNoDatabase{}); - continue; - }; - - // Scan positive patterns - increment match counts - const positive_result = db.scanPositive(value, &result_buf); - for (positive_result.matches()) |pattern_id| { - if (pattern_id < db.positive_patterns.len) { - const meta = db.positive_patterns[pattern_id]; - state.match_counts[meta.policy_index] += 1; - if (!state.is_active[meta.policy_index]) { - state.is_active[meta.policy_index] = true; - state.active_policies[state.active_count] = meta.policy_index; - state.active_count += 1; - } - } - } - - // Scan negated patterns - decrement match counts - const negated_result = db.scanNegated(value, &result_buf); - for (negated_result.matches()) |pattern_id| { - if (pattern_id < db.negated_patterns.len) { - const meta = db.negated_patterns[pattern_id]; - state.match_counts[meta.policy_index] -= 1; - if (!state.is_active[meta.policy_index]) { - state.is_active[meta.policy_index] = true; - state.active_policies[state.active_count] = meta.policy_index; - state.active_count += 1; - } - self.bus.debug(PolicyNegationFailed{ .policy_index = meta.policy_index }); - } - } - - self.bus.debug(ScanResult{ .positive_count = positive_result.count, .negated_count = negated_result.count }); - } - - return state; - } - - /// Compute hash for deterministic sampling based on telemetry type and policy config. - /// - Traces: hash trace_id so all spans in a trace get the same sampling decision - /// - Logs with sample_key: hash the specified field value for consistent sampling - /// - Otherwise: use context pointer as fallback - inline fn getSamplingHash( - comptime T: TelemetryType, - ctx: *anyopaque, - field_accessor: FieldAccessorType(T), - policy_info: PolicyInfo, - ) u64 { - if (T == .trace) { - const trace_id_ref: FieldRefType(T) = .{ .trace_field = .TRACE_FIELD_TRACE_ID }; - if (field_accessor(ctx, trace_id_ref)) |trace_id_hex| { - return hashTraceId(trace_id_hex); - } - } else if (T == .log) { - if (policy_info.sample_key) |sample_key| { - if (FieldRef.fromSampleKeyField(sample_key.field)) |field_ref| { - if (field_accessor(ctx, field_ref)) |value| { - return hashString(value); - } - } - } - } - return @intFromPtr(ctx); - } - - /// Find all matching policies, apply sampling/rate limiting, and determine final decision. - /// Drop always beats keep: if any policy returns drop, final decision is drop. - inline fn findMatchingPolicies( - self: *const Self, - comptime T: TelemetryType, - ctx: *anyopaque, - field_accessor: FieldAccessorType(T), - index: *const matcher_index.MatcherIndexType(T), - scan_state: *const ScanState, - policy_id_buf: [][]const u8, - ) MatchState { - var state = MatchState{ - .matched_indices = undefined, - .matched_policies = undefined, - .matched_decisions = undefined, - .matched_count = 0, - .decision = .unset, - }; - - for (scan_state.active_policies[0..scan_state.active_count]) |policy_index| { - const policy_info = index.getPolicyByIndex(policy_index) orelse continue; - - if (!policy_info.enabled) continue; - - if (scan_state.match_counts[policy_index] == policy_info.required_match_count) { - self.bus.debug(PolicyFullMatch{ .policy_index = policy_info.index, .policy_id = policy_info.id }); - - const hash_input = getSamplingHash(T, ctx, field_accessor, policy_info); - - // Apply sampling/rate limiting to get this policy's decision - const decision = applyKeepValue(policy_info, hash_input); - - if (state.matched_count < policy_id_buf.len) { - policy_id_buf[state.matched_count] = policy_info.id; - state.matched_indices[state.matched_count] = policy_index; - state.matched_policies[state.matched_count] = policy_info; - state.matched_decisions[state.matched_count] = decision; - state.matched_count += 1; - } - - // Update final decision: drop beats keep, keep beats unset - if (decision == .drop) { - state.decision = .drop; - } else if (decision == .keep and state.decision == .unset) { - state.decision = .keep; - } - } - } - - return state; - } - - /// Apply transforms to log context for a matched policy. - /// Returns the transform result for stats recording. - inline fn applyLogTransforms( - self: *const Self, - ctx: *anyopaque, - field_accessor: LogFieldAccessor, - field_mutator: LogFieldMutator, - snapshot: *const PolicySnapshot, - policy_index: PolicyIndex, - policy_id: []const u8, - ) log_transform.TransformResult { - const policy = snapshot.getPolicy(policy_index) orelse return .{}; - const log_target = getLogTarget(policy) orelse return .{}; - const transform = log_target.transform orelse return .{}; - - const result = log_transform.applyTransforms(&transform, ctx, field_accessor, field_mutator); - - if (result.totalApplied() > 0) { - self.bus.debug(TransformApplied{ - .policy_id = policy_id, - .removes = result.removes_applied, - .redacts = result.redacts_applied, - .renames = result.renames_applied, - .adds = result.adds_applied, - }); - } - - return result; - } - - /// Record stats for all matched policies using lock-free atomics. - /// Hit if policy's decision matches final decision, miss otherwise. - inline fn recordMatchedPolicyStats( - self: *const Self, - snapshot: *const PolicySnapshot, - match_state: *const MatchState, - ) void { - _ = self; // Observability bus not used for stats currently - for (0..match_state.matched_count) |i| { - const policy_index = match_state.matched_indices[i]; - const policy_decision = match_state.matched_decisions[i]; - - if (snapshot.getStats(policy_index)) |stats| { - if (policy_decision == match_state.decision) { - stats.addHit(); - } else { - stats.addMiss(); - } - } - } - } - - /// Apply policy's keep value with sampling/rate limiting to get decision. - /// - none: always drop - /// - all: always keep - /// - percentage: hash-based deterministic sampling - /// - per_second/per_minute: uses the policy's RateLimiter - fn applyKeepValue(policy_info: PolicyInfo, hash_input: u64) FilterDecision { - return switch (policy_info.keep) { - .none => .drop, - .all => .keep, - .percentage => |pct| { - const sampler = Sampler{ .percentage = pct }; - return if (sampler.shouldKeep(hash_input)) .keep else .drop; - }, - .per_second, .per_minute => { - if (policy_info.rate_limiter) |rl| { - return if (rl.shouldKeep()) .keep else .drop; - } - return .keep; // No rate limiter configured, default to keep - }, - }; - } -}; - -/// Hash a trace ID hex string to u64 for deterministic sampling. -/// This ensures all spans with the same trace_id get the same sampling decision. -/// -/// Following OTel probability sampling spec, we use the rightmost 56 bits -/// of the trace ID for randomness. For a 32-char hex string (16 bytes), -/// we use the last 14 hex chars (56 bits). -fn hashTraceId(trace_id_hex: []const u8) u64 { - if (trace_id_hex.len == 0) return 0; - - var hash: u64 = 0; - - // Use the last 14 hex characters (56 bits) if available - // This follows OTel spec which uses rightmost bits for randomness - const start = if (trace_id_hex.len > 14) trace_id_hex.len - 14 else 0; - - for (trace_id_hex[start..]) |c| { - const nibble: u64 = switch (c) { - '0'...'9' => c - '0', - 'a'...'f' => c - 'a' + 10, - 'A'...'F' => c - 'A' + 10, - else => 0, - }; - hash = (hash << 4) | nibble; - } - - return hash; -} - -/// Hash a string value for deterministic sampling. -/// Uses FNV-1a for good distribution and speed. -fn hashString(s: []const u8) u64 { - if (s.len == 0) return 0; - - // FNV-1a 64-bit - var hash: u64 = 0xcbf29ce484222325; // FNV offset basis - for (s) |byte| { - hash ^= byte; - hash *%= 0x100000001b3; // FNV prime - } - return hash; -} - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; -const SourceType = policy_mod.SourceType; -const LogField = proto.policy.LogField; -const AttributePath = proto.policy.AttributePath; - -/// Helper to create AttributePath for tests -fn testMakeAttrPath(allocator: std.mem.Allocator, key: []const u8) !AttributePath { - var attr_path = AttributePath{}; - try attr_path.path.append(allocator, try allocator.dupe(u8, key)); - return attr_path; -} - -/// Test context for unit tests - simple struct with known fields -const TestLogContext = struct { - level: ?[]const u8 = null, - message: ?[]const u8 = null, - service: ?[]const u8 = null, - ddtags: ?[]const u8 = null, - env: ?[]const u8 = null, - trace_id: ?[]const u8 = null, - - pub fn fieldAccessor(ctx_ptr: *const anyopaque, field: FieldRef) ?[]const u8 { - const self: *const TestLogContext = @ptrCast(@alignCast(ctx_ptr)); - return switch (field) { - .log_field => |lf| switch (lf) { - .LOG_FIELD_BODY => self.message, - .LOG_FIELD_SEVERITY_TEXT => self.level, - else => null, - }, - .log_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return null; - if (std.mem.eql(u8, key, "service")) return self.service; - if (std.mem.eql(u8, key, "ddtags")) return self.ddtags; - if (std.mem.eql(u8, key, "message")) return self.message; - if (std.mem.eql(u8, key, "env")) return self.env; - if (std.mem.eql(u8, key, "trace_id")) return self.trace_id; - return null; - }, - .resource_attribute, .scope_attribute => null, - }; - } -}; - -test "PolicyEngine: empty registry returns unset" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var test_log = TestLogContext{ .message = "hello" }; - var policy_id_buf: [16][]const u8 = undefined; - - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.unset, result.decision); - try testing.expectEqual(@as(usize, 0), result.matched_policy_ids.len); -} - -test "PolicyEngine: single policy drop match" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Matching log should be dropped - var error_log = TestLogContext{ .message = "an error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - - const result = engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result.decision); - // Dropped results don't include policy IDs (no transform needed) - try testing.expectEqual(@as(usize, 0), result.matched_policy_ids.len); - - // Non-matching log should be unset (no policy matched) - var info_log = TestLogContext{ .message = "all good" }; - const result2 = engine.evaluate(.log, &info_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result2.decision); -} - -test "PolicyEngine: single policy keep match returns policy ID" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Matching log should be kept with policy ID returned - var error_log = TestLogContext{ .message = "an error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - - const result = engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 1), result.matched_policy_ids.len); - try testing.expectEqualStrings("policy-1", result.matched_policy_ids[0]); -} - -test "PolicyEngine: multiple matchers AND logic" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-payment-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - // Two matchers - both must match - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .match = .{ .regex = try allocator.dupe(u8, "payment") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Both match - dropped - var payment_error = TestLogContext{ .message = "an error occurred", .service = "payment-api" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &payment_error, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Only message matches - unset - var other_error = TestLogContext{ .message = "an error occurred", .service = "auth-api" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &other_error, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Only service matches - unset - var payment_info = TestLogContext{ .message = "request completed", .service = "payment-api" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &payment_info, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: negated matcher" { - const allocator = testing.allocator; - - // Drop logs that do NOT contain "important" - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-non-important"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "important") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Non-important log should be dropped (negate: pattern NOT found = success) - var boring = TestLogContext{ .message = "just a regular log" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &boring, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Important log should be unset (negate: pattern found = failure, no match) - var important = TestLogContext{ .message = "this is important data" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &important, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: mixed negated and non-negated matchers" { - const allocator = testing.allocator; - - // Drop errors that are NOT from production - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-non-prod-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - // Must contain "error" - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - // Must NOT be from production - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "env") }, - .match = .{ .regex = try allocator.dupe(u8, "prod") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Error from staging - dropped (error matches, prod not found = both conditions satisfied) - var staging_error = TestLogContext{ .message = "an error occurred", .env = "staging" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &staging_error, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Error from production - unset (error matches, but prod IS found = negation failed) - var prod_error = TestLogContext{ .message = "an error occurred", .env = "production" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &prod_error, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Non-error from staging - unset (error doesn't match) - var staging_info = TestLogContext{ .message = "all good", .env = "staging" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &staging_info, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: most restrictive wins - drop beats keep" { - const allocator = testing.allocator; - - // Policy that keeps errors - var keep_policy = Policy{ - .id = try allocator.dupe(u8, "keep-errors"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - try keep_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer keep_policy.deinit(allocator); - - // Policy that drops errors from payment service (more specific AND more restrictive) - var drop_policy = Policy{ - .id = try allocator.dupe(u8, "drop-payment-errors"), - .name = try allocator.dupe(u8, "drop-payment-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try drop_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - try drop_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .match = .{ .regex = try allocator.dupe(u8, "payment") }, - }); - defer drop_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ keep_policy, drop_policy }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Error from payment - both policies match, most restrictive (DROP) wins - var payment_error = TestLogContext{ .message = "an error occurred", .service = "payment-api" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &payment_error, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Error from auth - only keep_policy matches (KEEP) - var auth_error = TestLogContext{ .message = "an error occurred", .service = "auth-api" }; - const result = engine.evaluate(.log, &auth_error, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 1), result.matched_policy_ids.len); -} - -test "PolicyEngine: disabled policies are skipped" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "disabled-drop"), - .enabled = false, // Disabled! - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Would match but policy is disabled - unset - var error_log = TestLogContext{ .message = "an error occurred" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: regex pattern matching" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-error-pattern"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - // Regex pattern: matches "error" or "Error" case-insensitive with (?i) - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "^.*rror") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Various error formats should match - var error1 = TestLogContext{ .message = "an error occurred" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error1, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - var error2 = TestLogContext{ .message = "Error: something went wrong" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error2, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Non-matching should be unset - var info = TestLogContext{ .message = "everything is fine" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &info, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: missing field with negated matcher succeeds" { - const allocator = testing.allocator; - - // Drop logs where service attribute does NOT contain "critical" - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-non-critical"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .match = .{ .regex = try allocator.dupe(u8, "^critical-s.*$") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // No service attribute = pattern cannot be found = negation succeeds = dropped - var no_service = TestLogContext{ .message = "hello" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &no_service, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Service without "critical" = negation succeeds = dropped - var non_critical = TestLogContext{ .message = "hello", .service = "normal-service" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &non_critical, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Service with "critical" = negation fails = unset - var critical = TestLogContext{ .message = "hello", .service = "critical-service" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &critical, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: multiple policies with different matcher keys" { - const allocator = testing.allocator; - - // Policy 1: Drop based on log_body - var policy1 = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy1.deinit(allocator); - - // Policy 2: Drop based on log_attribute - var policy2 = Policy{ - .id = try allocator.dupe(u8, "policy-2"), - .name = try allocator.dupe(u8, "drop-debug-service"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .match = .{ .regex = try allocator.dupe(u8, "debug") }, - }); - defer policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Matches policy1 - var error_log = TestLogContext{ .message = "an error occurred", .service = "payment" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Matches policy2 - var debug_log = TestLogContext{ .message = "all good", .service = "debug-service" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &debug_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Matches neither - var normal_log = TestLogContext{ .message = "all good", .service = "payment" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &normal_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: evaluate with null mutator" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Test evaluate with null mutator returns full PolicyResult - var error_log = TestLogContext{ .message = "an error occurred" }; - var policy_id_buf: [MAX_POLICIES][]const u8 = undefined; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - var info_log = TestLogContext{ .message = "all good" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &info_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "FilterDecision: shouldContinue" { - try testing.expect(FilterDecision.keep.shouldContinue()); - try testing.expect(FilterDecision.unset.shouldContinue()); - try testing.expect(!FilterDecision.drop.shouldContinue()); -} - -// ============================================================================= -// Edge case tests for active policy tracking optimization -// ============================================================================= - -test "PolicyEngine: all policies positive only - none start active" { - // Edge case: No policies have negated patterns, so policies_with_negation is empty. - // Policies should only become active when positive patterns match. - const allocator = testing.allocator; - - var policy1 = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy1.deinit(allocator); - - var policy2 = Policy{ - .id = try allocator.dupe(u8, "policy-2"), - .name = try allocator.dupe(u8, "drop-warning"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "warning") }, - }); - defer policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // No match - no policies become active - var normal = TestLogContext{ .message = "all good" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &normal, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Match policy1 only - var error_log = TestLogContext{ .message = "error occurred" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Match policy2 only - var warning_log = TestLogContext{ .message = "warning issued" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &warning_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: all policies negated only - all start active" { - // Edge case: All policies have only negated patterns. - // All policies start active and match if their negated patterns don't match. - const allocator = testing.allocator; - - var policy1 = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-non-important"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "important") }, - .negate = true, - }); - defer policy1.deinit(allocator); - - var policy2 = Policy{ - .id = try allocator.dupe(u8, "policy-2"), - .name = try allocator.dupe(u8, "drop-non-critical"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "critical") }, - .negate = true, - }); - defer policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Neither "important" nor "critical" - both policies match - var boring = TestLogContext{ .message = "just a normal log" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &boring, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "important" - policy1 fails, policy2 still matches - var important = TestLogContext{ .message = "important data here" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &important, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "critical" - policy1 still matches, policy2 fails - var critical = TestLogContext{ .message = "critical issue" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &critical, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains both - both policies fail - var both = TestLogContext{ .message = "important and critical" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &both, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: mix of positive-only and negated policies" { - // Edge case: Some policies have negated patterns (start active), others don't. - // Verifies both paths work correctly together. - const allocator = testing.allocator; - - // Policy with only positive pattern - var positive_policy = Policy{ - .id = try allocator.dupe(u8, "positive-policy"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try positive_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer positive_policy.deinit(allocator); - - // Policy with only negated pattern - var negated_policy = Policy{ - .id = try allocator.dupe(u8, "negated-policy"), - .name = try allocator.dupe(u8, "drop-non-debug"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try negated_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "debug") }, - .negate = true, - }); - defer negated_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ positive_policy, negated_policy }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // No "error", no "debug" - negated policy matches (drops) - var normal = TestLogContext{ .message = "normal log" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &normal, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "error", no "debug" - both policies match - var error_log = TestLogContext{ .message = "error occurred" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "debug" - negated policy fails, positive policy doesn't match - var debug_log = TestLogContext{ .message = "debug info" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &debug_log, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains both "error" and "debug" - positive matches, negated fails - var error_debug = TestLogContext{ .message = "error in debug mode" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_debug, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: multiple negated patterns same policy" { - // Edge case: Policy with multiple negated patterns - all must "pass" (not match) - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-non-special"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - // Must NOT contain "skip" AND must NOT contain "ignore" - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "skip") }, - .negate = true, - }); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "ignore") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Neither word - both negations pass - policy matches - var normal = TestLogContext{ .message = "normal message" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &normal, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "skip" - first negation fails - policy doesn't match - var skip = TestLogContext{ .message = "skip this one" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &skip, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains "ignore" - second negation fails - policy doesn't match - var ignore = TestLogContext{ .message = "ignore this" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &ignore, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Contains both - both negations fail - policy doesn't match - var both = TestLogContext{ .message = "skip and ignore" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &both, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: policy becomes active via positive then fails via negated" { - // Edge case: Policy has both positive and negated patterns. - // Positive matches first (becomes active), then negated also matches (fails). - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-errors-not-debug"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - // Must contain "error" AND must NOT contain "debug" - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "debug") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Has "error", no "debug" - positive matches, negation passes - policy matches - var error_only = TestLogContext{ .message = "error occurred" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &error_only, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Has both "error" and "debug" - positive matches but negation fails - // required_match_count = 2 (1 positive + 1 negated) - // match_counts starts at 1 (negated_count) - // positive match: +1 -> 2 - // negated match: -1 -> 1 - // Final: 1 != 2 - policy doesn't match - var error_debug = TestLogContext{ .message = "debug error message" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &error_debug, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Has "debug" but no "error" - positive doesn't match, negation fails - // match_counts starts at 1, negated match: -1 -> 0, final: 0 != 2 - var debug_only = TestLogContext{ .message = "debug info" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &debug_only, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Has neither - positive doesn't match, negation passes - // match_counts stays at 1 (negated_count), final: 1 != 2 - var neither = TestLogContext{ .message = "normal log" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &neither, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -// ============================================================================= -// Tests for evaluate() with transforms -// ============================================================================= - -/// Mutable test context that supports both FieldAccessor and FieldMutator -const MutableTestLogContext = struct { - level: ?[]const u8 = null, - message: ?[]const u8 = null, - service: ?[]const u8 = null, - ddtags: ?[]const u8 = null, - env: ?[]const u8 = null, - - // Dynamic attributes stored in a hash map - attributes: std.StringHashMap([]const u8), - allocator: std.mem.Allocator, - - pub fn init(allocator: std.mem.Allocator) MutableTestLogContext { - return .{ - .attributes = std.StringHashMap([]const u8).init(allocator), - .allocator = allocator, - }; - } - - pub fn deinit(self: *MutableTestLogContext) void { - var it = self.attributes.iterator(); - while (it.next()) |entry| { - self.allocator.free(entry.key_ptr.*); - self.allocator.free(entry.value_ptr.*); - } - self.attributes.deinit(); - } - - pub fn setAttribute(self: *MutableTestLogContext, key: []const u8, value: []const u8) !void { - const value_copy = try self.allocator.dupe(u8, value); - errdefer self.allocator.free(value_copy); - - const gop = try self.attributes.getOrPut(key); - if (gop.found_existing) { - self.allocator.free(gop.value_ptr.*); - gop.value_ptr.* = value_copy; - } else { - gop.key_ptr.* = try self.allocator.dupe(u8, key); - gop.value_ptr.* = value_copy; - } - } - - pub fn removeAttribute(self: *MutableTestLogContext, key: []const u8) bool { - if (self.attributes.fetchRemove(key)) |removed| { - self.allocator.free(removed.key); - self.allocator.free(removed.value); - return true; - } - return false; - } - - pub fn fieldAccessor(ctx_ptr: *const anyopaque, field: FieldRef) ?[]const u8 { - const self: *const MutableTestLogContext = @ptrCast(@alignCast(ctx_ptr)); - return switch (field) { - .log_field => |lf| switch (lf) { - .LOG_FIELD_BODY => self.message, - .LOG_FIELD_SEVERITY_TEXT => self.level, - else => null, - }, - .log_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return null; - // Check fixed fields first - if (std.mem.eql(u8, key, "service")) return self.service; - if (std.mem.eql(u8, key, "ddtags")) return self.ddtags; - if (std.mem.eql(u8, key, "message")) return self.message; - if (std.mem.eql(u8, key, "env")) return self.env; - // Check dynamic attributes - return self.attributes.get(key); - }, - .resource_attribute, .scope_attribute => null, - }; - } - - pub fn fieldMutator(ctx_ptr: *anyopaque, op: policy_types.MutateOp) bool { - const self: *MutableTestLogContext = @ptrCast(@alignCast(ctx_ptr)); - switch (op) { - .remove => |field| { - switch (field) { - .log_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return false; - // Handle fixed fields - if (std.mem.eql(u8, key, "service")) { - if (self.service != null) { - self.service = null; - return true; - } - return false; - } - if (std.mem.eql(u8, key, "env")) { - if (self.env != null) { - self.env = null; - return true; - } - return false; - } - // Handle dynamic attributes - return self.removeAttribute(key); - }, - else => return false, - } - }, - .set => |s| { - switch (s.field) { - .log_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return false; - // For fixed fields, just update the pointer - if (std.mem.eql(u8, key, "service")) { - self.service = s.value; - return true; - } - if (std.mem.eql(u8, key, "env")) { - self.env = s.value; - return true; - } - // For dynamic attributes, store a copy - self.setAttribute(key, s.value) catch return false; - return true; - }, - else => return false, - } - }, - .rename => |r| { - switch (r.from) { - .log_attribute => |attr_path| { - const from_key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return false; - // Get the source value - var value: ?[]const u8 = null; - if (std.mem.eql(u8, from_key, "service")) { - value = self.service; - if (value != null) self.service = null; - } else if (std.mem.eql(u8, from_key, "env")) { - value = self.env; - if (value != null) self.env = null; - } else { - if (self.attributes.get(from_key)) |v| { - value = v; - _ = self.removeAttribute(from_key); - } - } - - if (value == null) return false; - - // Set the target - self.setAttribute(r.to, value.?) catch return false; - return true; - }, - else => return false, - } - }, - } - } -}; - -test "evaluate: policy with keep=all and no transform" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "keep-policy"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - ctx.service = "payment-api"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 1), result.matched_policy_ids.len); - try testing.expectEqualStrings("keep-policy", result.matched_policy_ids[0]); - // No transform, so context unchanged - try testing.expectEqualStrings("payment-api", ctx.service.?); -} - -test "evaluate: policy with keep=all and remove transform" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.remove.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "env") }, - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "transform-policy"), - .name = try allocator.dupe(u8, "remove-env"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - ctx.service = "payment-api"; - ctx.env = "production"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // Transform should have removed 'env' - try testing.expect(ctx.env == null); - // Other fields unchanged - try testing.expectEqualStrings("payment-api", ctx.service.?); -} - -test "evaluate: policy with keep=all and redact transform" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.redact.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .replacement = try allocator.dupe(u8, "[REDACTED]"), - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "redact-policy"), - .name = try allocator.dupe(u8, "redact-service"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "sensitive") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "sensitive data here"; - ctx.service = "secret-service"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // Transform should have redacted 'service' - try testing.expectEqualStrings("[REDACTED]", ctx.service.?); -} - -test "evaluate: policy with keep=all and add transform" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "processed") }, - .value = try allocator.dupe(u8, "true"), - .upsert = true, - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "add-policy"), - .name = try allocator.dupe(u8, "add-processed"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // Transform should have added 'processed' - try testing.expectEqualStrings("true", ctx.attributes.get("processed").?); -} - -test "evaluate: policy with no keep (drop) skips transform" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.remove.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "env") }, - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "drop-policy"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ - .log = .{ - .keep = try allocator.dupe(u8, "none"), - .transform = transform, // Transform should NOT be applied for drops - }, - }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - ctx.env = "production"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.drop, result.decision); - // Transform should NOT have been applied (log is dropped) - try testing.expectEqualStrings("production", ctx.env.?); -} - -test "evaluate: multiple policies with different transforms" { - const allocator = testing.allocator; - - // Policy 1: matches "error", adds tag - var transform1 = proto.policy.LogTransform{}; - try transform1.add.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "error_tag") }, - .value = try allocator.dupe(u8, "true"), - .upsert = true, - }); - - var policy1 = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "tag-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform1, - } }, - }; - try policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - - // Policy 2: matches "payment", removes env - var transform2 = proto.policy.LogTransform{}; - try transform2.remove.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "env") }, - }); - - var policy2 = Policy{ - .id = try allocator.dupe(u8, "policy-2"), - .name = try allocator.dupe(u8, "clean-payment"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform2, - } }, - }; - try policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "service") }, - .match = .{ .regex = try allocator.dupe(u8, "payment") }, - }); - - defer policy1.deinit(allocator); - defer policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ policy1, policy2 }, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log matches BOTH policies - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "payment error occurred"; - ctx.service = "payment-api"; - ctx.env = "production"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 2), result.matched_policy_ids.len); - - // Both transforms should have been applied - try testing.expectEqualStrings("true", ctx.attributes.get("error_tag").?); - try testing.expect(ctx.env == null); -} - -test "evaluate: policy with unset keep applies transform" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.add.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "tagged") }, - .value = try allocator.dupe(u8, "yes"), - .upsert = true, - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "unset-policy"), - .name = try allocator.dupe(u8, "tag-only"), - .enabled = true, - .target = .{ - .log = .{ - // keep is null (unset) - should still apply transforms - .transform = transform, - }, - }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "info") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "info log message"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - // When keep is not specified, it defaults to "all" which means keep - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 1), result.matched_policy_ids.len); - try testing.expectEqualStrings("yes", ctx.attributes.get("tagged").?); -} - -test "evaluate: null mutator skips transforms" { - const allocator = testing.allocator; - - var transform = proto.policy.LogTransform{}; - try transform.remove.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "env") }, - }); - - var policy = Policy{ - .id = try allocator.dupe(u8, "transform-policy"), - .name = try allocator.dupe(u8, "remove-env"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = transform, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - ctx.env = "production"; - - var policy_id_buf: [16][]const u8 = undefined; - // Pass null for mutator - transforms should be skipped - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // Transform should NOT have been applied (null mutator) - try testing.expectEqualStrings("production", ctx.env.?); -} - -test "evaluate: policy without transform field" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "no-transform"), - .name = try allocator.dupe(u8, "just-keep"), - .enabled = true, - .target = .{ - .log = .{ - .keep = try allocator.dupe(u8, "all"), - // No transform field - }, - }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "an error occurred"; - ctx.env = "production"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // No transform, env unchanged - try testing.expectEqualStrings("production", ctx.env.?); -} - -test "evaluate: mixed keep and drop policies - only keep applies transforms" { - const allocator = testing.allocator; - - // Policy 1: drop errors (no transform should apply) - var drop_transform = proto.policy.LogTransform{}; - try drop_transform.add.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "dropped") }, - .value = try allocator.dupe(u8, "should-not-appear"), - .upsert = true, - }); - - var drop_policy = Policy{ - .id = try allocator.dupe(u8, "drop-policy"), - .name = try allocator.dupe(u8, "drop-debug"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - .transform = drop_transform, - } }, - }; - try drop_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "debug") }, - }); - - // Policy 2: keep errors with transform - var keep_transform = proto.policy.LogTransform{}; - try keep_transform.add.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "kept") }, - .value = try allocator.dupe(u8, "yes"), - .upsert = true, - }); - - var keep_policy = Policy{ - .id = try allocator.dupe(u8, "keep-policy"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - .transform = keep_transform, - } }, - }; - try keep_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - - defer drop_policy.deinit(allocator); - defer keep_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ drop_policy, keep_policy }, "test", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Test 1: Log matches only drop policy - { - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "debug message"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.drop, result.decision); - // Transform should NOT be applied for drop - try testing.expect(ctx.attributes.get("dropped") == null); - } - - // Test 2: Log matches only keep policy - { - var ctx = MutableTestLogContext.init(allocator); - defer ctx.deinit(); - ctx.message = "error occurred"; - - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &ctx, MutableTestLogContext.fieldAccessor, MutableTestLogContext.fieldMutator, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - // Transform should be applied for keep - try testing.expectEqualStrings("yes", ctx.attributes.get("kept").?); - } -} - -// ============================================================================= -// Stats Recording Tests -// ============================================================================= - -const policy_provider = @import("./provider.zig"); - -/// Test provider that tracks recordPolicyStats calls -const StatsTrackingProvider = struct { - allocator: std.mem.Allocator, - stats_calls: std.ArrayListUnmanaged(StatsCall), - - const StatsCall = struct { - policy_id: []const u8, - hits: i64, - misses: i64, - transform_result: log_transform.TransformResult, - }; - - pub fn init(allocator: std.mem.Allocator) StatsTrackingProvider { - return .{ - .allocator = allocator, - .stats_calls = .{}, - }; - } - - pub fn deinit(self: *StatsTrackingProvider) void { - for (self.stats_calls.items) |call| { - self.allocator.free(call.policy_id); - } - self.stats_calls.deinit(self.allocator); - } - - pub fn getId(self: *StatsTrackingProvider) []const u8 { - _ = self; - return "stats-tracking-provider"; - } - - pub fn subscribe(self: *StatsTrackingProvider, callback: policy_mod.PolicyCallback) !void { - _ = self; - _ = callback; - } - - pub fn recordPolicyError(self: *StatsTrackingProvider, policy_id: []const u8, error_message: []const u8) void { - _ = self; - _ = policy_id; - _ = error_message; - } - - pub fn recordPolicyStats(self: *StatsTrackingProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: log_transform.TransformResult) void { - const id_copy = self.allocator.dupe(u8, policy_id) catch return; - self.stats_calls.append(self.allocator, .{ - .policy_id = id_copy, - .hits = hits, - .misses = misses, - .transform_result = transform_result, - }) catch { - self.allocator.free(id_copy); - }; - } - - pub fn provider(self: *StatsTrackingProvider) policy_provider.PolicyProvider { - return policy_provider.PolicyProvider.init(self); - } - - /// Find stats for a given policy ID - pub fn getStats(self: *const StatsTrackingProvider, policy_id: []const u8) ?StatsCall { - for (self.stats_calls.items) |call| { - if (std.mem.eql(u8, call.policy_id, policy_id)) { - return call; - } - } - return null; - } - - /// Count total stats calls - pub fn callCount(self: *const StatsTrackingProvider) usize { - return self.stats_calls.items.len; - } -}; - -test "PolicyEngine stats: all DROP policies get hits" { - const allocator = testing.allocator; - - // Two DROP policies that both match - var drop_policy1 = Policy{ - .id = try allocator.dupe(u8, "drop-1"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try drop_policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer drop_policy1.deinit(allocator); - - var drop_policy2 = Policy{ - .id = try allocator.dupe(u8, "drop-2"), - .name = try allocator.dupe(u8, "drop-critical"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try drop_policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "critical") }, - }); - defer drop_policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ drop_policy1, drop_policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log matches both DROP policies - var test_log = TestLogContext{ .message = "critical error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.drop, result.decision); - - // Both policies should get hits via lock-free atomic stats on snapshot - const snapshot = registry.getSnapshot().?; - - // Policy 0 (drop-1) should have 1 hit - const stats0 = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 1), stats0.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats0.misses.load(.monotonic)); - - // Policy 1 (drop-2) should have 1 hit - const stats1 = snapshot.getStats(1).?; - try testing.expectEqual(@as(i64, 1), stats1.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats1.misses.load(.monotonic)); -} - -test "PolicyEngine stats: all KEEP policies get hits" { - const allocator = testing.allocator; - - // Two KEEP policies that both match - var keep_policy1 = Policy{ - .id = try allocator.dupe(u8, "keep-1"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try keep_policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer keep_policy1.deinit(allocator); - - var keep_policy2 = Policy{ - .id = try allocator.dupe(u8, "keep-2"), - .name = try allocator.dupe(u8, "keep-critical"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try keep_policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "critical") }, - }); - defer keep_policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ keep_policy1, keep_policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log matches both KEEP policies - var test_log = TestLogContext{ .message = "critical error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - - // Both policies should get hits via lock-free atomic stats on snapshot - const snapshot = registry.getSnapshot().?; - - // Policy 0 (keep-1) should have 1 hit - const stats0 = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 1), stats0.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats0.misses.load(.monotonic)); - - // Policy 1 (keep-2) should have 1 hit - const stats1 = snapshot.getStats(1).?; - try testing.expectEqual(@as(i64, 1), stats1.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats1.misses.load(.monotonic)); -} - -test "PolicyEngine stats: mixed KEEP and DROP - DROP gets hits, KEEP gets misses" { - const allocator = testing.allocator; - - // One KEEP and one DROP policy that both match - var keep_policy = Policy{ - .id = try allocator.dupe(u8, "keep-policy"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try keep_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer keep_policy.deinit(allocator); - - var drop_policy = Policy{ - .id = try allocator.dupe(u8, "drop-policy"), - .name = try allocator.dupe(u8, "drop-critical"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try drop_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "critical") }, - }); - defer drop_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ keep_policy, drop_policy }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log matches both policies (KEEP and DROP) - var test_log = TestLogContext{ .message = "critical error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - // DROP wins (most restrictive) - try testing.expectEqual(FilterDecision.drop, result.decision); - - // Both policies should have stats recorded via lock-free atomics - const snapshot = registry.getSnapshot().?; - - // KEEP policy (index 0) gets miss (its decision differs from final decision) - const keep_stats = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 0), keep_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 1), keep_stats.misses.load(.monotonic)); - - // DROP policy (index 1) gets hit (its decision matches final decision) - const drop_stats = snapshot.getStats(1).?; - try testing.expectEqual(@as(i64, 1), drop_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), drop_stats.misses.load(.monotonic)); -} - -test "PolicyEngine stats: single policy match gets hit" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "single-policy"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var test_log = TestLogContext{ .message = "an error occurred" }; - var policy_id_buf: [16][]const u8 = undefined; - _ = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - // Single policy should get a hit via lock-free atomic stats - const snapshot = registry.getSnapshot().?; - const stats = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 1), stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats.misses.load(.monotonic)); -} - -test "PolicyEngine stats: multiple KEEPs and DROPs - all DROPs get hits, all KEEPs get misses" { - const allocator = testing.allocator; - - // Two KEEP policies - var keep_policy1 = Policy{ - .id = try allocator.dupe(u8, "keep-1"), - .name = try allocator.dupe(u8, "keep-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try keep_policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer keep_policy1.deinit(allocator); - - var keep_policy2 = Policy{ - .id = try allocator.dupe(u8, "keep-2"), - .name = try allocator.dupe(u8, "keep-critical"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "all") } }, - }; - try keep_policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "critical") }, - }); - defer keep_policy2.deinit(allocator); - - // Two DROP policies - var drop_policy1 = Policy{ - .id = try allocator.dupe(u8, "drop-1"), - .name = try allocator.dupe(u8, "drop-warning"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try drop_policy1.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "warning") }, - }); - defer drop_policy1.deinit(allocator); - - var drop_policy2 = Policy{ - .id = try allocator.dupe(u8, "drop-2"), - .name = try allocator.dupe(u8, "drop-debug"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try drop_policy2.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "debug") }, - }); - defer drop_policy2.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ keep_policy1, keep_policy2, drop_policy1, drop_policy2 }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log matches all 4 policies (2 KEEP, 2 DROP) - var test_log = TestLogContext{ .message = "critical error with warning and debug info" }; - var policy_id_buf: [16][]const u8 = undefined; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - // DROP wins (most restrictive) - try testing.expectEqual(FilterDecision.drop, result.decision); - - // All 4 policies should have stats recorded via lock-free atomics - const snapshot = registry.getSnapshot().?; - - // Both KEEP policies (index 0,1) get misses (their decision differs from final decision) - const keep1_stats = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 0), keep1_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 1), keep1_stats.misses.load(.monotonic)); - - const keep2_stats = snapshot.getStats(1).?; - try testing.expectEqual(@as(i64, 0), keep2_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 1), keep2_stats.misses.load(.monotonic)); - - // Both DROP policies (index 2,3) get hits (their decision matches final decision) - const drop1_stats = snapshot.getStats(2).?; - try testing.expectEqual(@as(i64, 1), drop1_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), drop1_stats.misses.load(.monotonic)); - - const drop2_stats = snapshot.getStats(3).?; - try testing.expectEqual(@as(i64, 1), drop2_stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), drop2_stats.misses.load(.monotonic)); -} - -test "PolicyEngine stats: no match records no stats" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "no-match-policy"), - .name = try allocator.dupe(u8, "drop-errors"), - .enabled = true, - .target = .{ .log = .{ .keep = try allocator.dupe(u8, "none") } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Log doesn't match the policy - var test_log = TestLogContext{ .message = "all good here" }; - var policy_id_buf: [16][]const u8 = undefined; - _ = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - // No stats should be recorded - policy should have 0 hits and 0 misses - const snapshot = registry.getSnapshot().?; - const stats = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 0), stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats.misses.load(.monotonic)); -} - -// ============================================================================= -// Metric Policy Tests -// ============================================================================= - -const MetricField = proto.policy.MetricField; -const MetricMatcher = proto.policy.MetricMatcher; - -/// Test context for metric unit tests - simple struct with known fields -const TestMetricContext = struct { - name: ?[]const u8 = null, - description: ?[]const u8 = null, - unit: ?[]const u8 = null, - scope_name: ?[]const u8 = null, - datapoint_attributes: ?std.StringHashMap([]const u8) = null, - resource_attributes: ?std.StringHashMap([]const u8) = null, - - pub fn fieldAccessor(ctx_ptr: *const anyopaque, field: MetricFieldRef) ?[]const u8 { - const self: *const TestMetricContext = @ptrCast(@alignCast(ctx_ptr)); - return switch (field) { - .metric_field => |mf| switch (mf) { - .METRIC_FIELD_NAME => self.name, - .METRIC_FIELD_DESCRIPTION => self.description, - .METRIC_FIELD_UNIT => self.unit, - .METRIC_FIELD_SCOPE_NAME => self.scope_name, - else => null, - }, - .datapoint_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return null; - if (self.datapoint_attributes) |attrs| { - return attrs.get(key); - } - return null; - }, - .resource_attribute => |attr_path| { - const key = if (attr_path.path.items.len > 0) attr_path.path.items[0] else return null; - if (self.resource_attributes) |attrs| { - return attrs.get(key); - } - return null; - }, - .scope_attribute => null, - .metric_type => null, // Test context doesn't track metric type - .aggregation_temporality => null, // Test context doesn't track temporality - }; - } -}; - -test "MetricPolicyEngine: empty registry returns unset" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var test_metric = TestMetricContext{ .name = "http_requests_total" }; - var policy_id_buf: [16][]const u8 = undefined; - - const result = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.unset, result.decision); - try testing.expectEqual(@as(usize, 0), result.matched_policy_ids.len); -} - -test "MetricPolicyEngine: single policy drop match" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-1"), - .name = try allocator.dupe(u8, "drop-debug-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "debug_.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - // Verify the registry has the policy - const snapshot = registry.getSnapshot().?; - try testing.expectEqual(@as(usize, 1), snapshot.policies.len); - - // Verify the metric index has the matcher key - const index = &snapshot.metric_index; - try testing.expect(!index.isEmpty()); - try testing.expectEqual(@as(usize, 1), index.getDatabaseCount()); - - // Verify we can get the database for the metric key - const db = index.getDatabase(.{ .field = .{ .metric_field = .METRIC_FIELD_NAME } }); - try testing.expect(db != null); - - // Test that scanning works directly - var result_buf: [256]u32 = undefined; - const scan_result = db.?.scanPositive("debug_memory_usage", &result_buf); - try testing.expect(scan_result.count > 0); // Pattern should match - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Matching metric should be dropped - var debug_metric = TestMetricContext{ .name = "debug_memory_usage" }; - const result = engine.evaluate(.metric, &debug_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result.decision); - - // Non-matching metric should pass - var normal_metric = TestMetricContext{ .name = "http_requests_total" }; - const result2 = engine.evaluate(.metric, &normal_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result2.decision); -} - -test "MetricPolicyEngine: single policy keep match returns policy ID" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-keep"), - .name = try allocator.dupe(u8, "keep-important-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = true, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "http_.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - var test_metric = TestMetricContext{ .name = "http_requests_total" }; - var policy_id_buf: [16][]const u8 = undefined; - - const result = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.keep, result.decision); - try testing.expectEqual(@as(usize, 1), result.matched_policy_ids.len); - try testing.expectEqualStrings("metric-policy-keep", result.matched_policy_ids[0]); -} - -test "MetricPolicyEngine: multiple matchers AND logic" { - const allocator = testing.allocator; - - // Policy requires BOTH metric name AND unit to match - // Use anchored regex patterns to ensure exact matching - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-and"), - .name = try allocator.dupe(u8, "drop-slow-requests"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "^request_duration$") }, - }); - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_UNIT }, - .match = .{ .regex = try allocator.dupe(u8, "^seconds$") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Both match - should drop - var both_match = TestMetricContext{ .name = "request_duration", .unit = "seconds" }; - const result1 = engine.evaluate(.metric, &both_match, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result1.decision); - - // Only name matches - should pass (unit "milliseconds" doesn't match "^seconds$") - var name_only = TestMetricContext{ .name = "request_duration", .unit = "milliseconds" }; - const result2 = engine.evaluate(.metric, &name_only, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result2.decision); - - // Only unit matches - should pass (name "response_size" doesn't match "^request_duration$") - var unit_only = TestMetricContext{ .name = "response_size", .unit = "seconds" }; - const result3 = engine.evaluate(.metric, &unit_only, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result3.decision); -} - -test "MetricPolicyEngine: negated matcher" { - const allocator = testing.allocator; - - // Keep metrics that do NOT have "internal" in the name - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-negate"), - .name = try allocator.dupe(u8, "drop-internal-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "internal_.*") }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Internal metric matches pattern, negation fails -> policy doesn't match -> passes - var internal_metric = TestMetricContext{ .name = "internal_queue_size" }; - const result1 = engine.evaluate(.metric, &internal_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result1.decision); - - // Non-internal metric doesn't match pattern, negation succeeds -> policy matches -> drops - var public_metric = TestMetricContext{ .name = "http_requests_total" }; - const result2 = engine.evaluate(.metric, &public_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result2.decision); -} - -test "MetricPolicyEngine: datapoint attribute matching" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-dp-attr"), - .name = try allocator.dupe(u8, "drop-error-status"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .datapoint_attribute = try testMakeAttrPath(allocator, "status_code") }, - .match = .{ .regex = try allocator.dupe(u8, "5[0-9][0-9]") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Metric with 500 status should be dropped - var dp_attrs = std.StringHashMap([]const u8).init(allocator); - defer dp_attrs.deinit(); - try dp_attrs.put("status_code", "503"); - - var error_metric = TestMetricContext{ - .name = "http_response", - .datapoint_attributes = dp_attrs, - }; - const result1 = engine.evaluate(.metric, &error_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result1.decision); - - // Metric with 200 status should pass - var ok_attrs = std.StringHashMap([]const u8).init(allocator); - defer ok_attrs.deinit(); - try ok_attrs.put("status_code", "200"); - - var ok_metric = TestMetricContext{ - .name = "http_response", - .datapoint_attributes = ok_attrs, - }; - const result2 = engine.evaluate(.metric, &ok_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result2.decision); -} - -test "MetricPolicyEngine: resource attribute matching" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-res-attr"), - .name = try allocator.dupe(u8, "drop-test-env"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .resource_attribute = try testMakeAttrPath(allocator, "deployment.environment") }, - .match = .{ .regex = try allocator.dupe(u8, "test|staging") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Metric from test environment should be dropped - var test_attrs = std.StringHashMap([]const u8).init(allocator); - defer test_attrs.deinit(); - try test_attrs.put("deployment.environment", "test"); - - var test_metric = TestMetricContext{ - .name = "http_requests_total", - .resource_attributes = test_attrs, - }; - const result1 = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result1.decision); - - // Metric from production environment should pass - var prod_attrs = std.StringHashMap([]const u8).init(allocator); - defer prod_attrs.deinit(); - try prod_attrs.put("deployment.environment", "production"); - - var prod_metric = TestMetricContext{ - .name = "http_requests_total", - .resource_attributes = prod_attrs, - }; - const result2 = engine.evaluate(.metric, &prod_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result2.decision); -} - -test "MetricPolicyEngine: log policies don't affect metrics" { - const allocator = testing.allocator; - - // Create a log policy that would match if applied to metrics - var log_policy = Policy{ - .id = try allocator.dupe(u8, "log-policy-only"), - .name = try allocator.dupe(u8, "drop-error-logs"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try log_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "error") }, - }); - defer log_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{log_policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Metric evaluation should not be affected by log policies - var test_metric = TestMetricContext{ .name = "error_count" }; - const result = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.unset, result.decision); - try testing.expectEqual(@as(usize, 0), result.matched_policy_ids.len); -} - -test "MetricPolicyEngine: metric policies don't affect logs" { - const allocator = testing.allocator; - - // Create a metric policy - var metric_policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-only"), - .name = try allocator.dupe(u8, "drop-debug-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try metric_policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "debug_.*") }, - }); - defer metric_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{metric_policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Log evaluation should not be affected by metric policies - var test_log = TestLogContext{ .message = "debug_info: something happened" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.unset, result.decision); - try testing.expectEqual(@as(usize, 0), result.matched_policy_ids.len); -} - -test "MetricPolicyEngine: most restrictive wins - drop beats keep" { - const allocator = testing.allocator; - - // Keep policy for all http metrics - var keep_policy = Policy{ - .id = try allocator.dupe(u8, "metric-keep-http"), - .name = try allocator.dupe(u8, "keep-http-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = true, - } }, - }; - try keep_policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "http_.*") }, - }); - defer keep_policy.deinit(allocator); - - // Drop policy for error metrics - var drop_policy = Policy{ - .id = try allocator.dupe(u8, "metric-drop-errors"), - .name = try allocator.dupe(u8, "drop-error-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try drop_policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "http_errors") }, - }); - defer drop_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ keep_policy, drop_policy }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // http_errors matches both policies - drop should win - var error_metric = TestMetricContext{ .name = "http_errors" }; - const result1 = engine.evaluate(.metric, &error_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result1.decision); - - // http_requests matches only keep policy - var requests_metric = TestMetricContext{ .name = "http_requests_total" }; - const result2 = engine.evaluate(.metric, &requests_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.keep, result2.decision); -} - -test "MetricPolicyEngine: disabled policies are skipped" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy-disabled"), - .name = try allocator.dupe(u8, "disabled-drop-policy"), - .enabled = false, // Disabled! - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, ".+") }, // Match any non-empty name - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Even though the pattern matches, the disabled policy should be skipped - var test_metric = TestMetricContext{ .name = "any_metric_name" }; - const result = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - - try testing.expectEqual(FilterDecision.unset, result.decision); -} - -test "MetricPolicyEngine: mixed log and metric policies" { - const allocator = testing.allocator; - - // Metric drop policy - var metric_policy = Policy{ - .id = try allocator.dupe(u8, "metric-drop"), - .name = try allocator.dupe(u8, "drop-debug-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try metric_policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "debug_.*") }, - }); - defer metric_policy.deinit(allocator); - - // Log drop policy - var log_policy = Policy{ - .id = try allocator.dupe(u8, "log-drop"), - .name = try allocator.dupe(u8, "drop-debug-logs"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try log_policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "DEBUG:.*") }, - }); - defer log_policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{ metric_policy, log_policy }, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Debug metric should be dropped by metric policy - var debug_metric = TestMetricContext{ .name = "debug_memory" }; - const result1 = engine.evaluate(.metric, &debug_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result1.decision); - - // Debug log should be dropped by log policy - var debug_log = TestLogContext{ .message = "DEBUG: test message" }; - const result2 = engine.evaluate(.log, &debug_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result2.decision); - - // Non-debug metric should pass - var normal_metric = TestMetricContext{ .name = "http_requests" }; - const result3 = engine.evaluate(.metric, &normal_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result3.decision); - - // Non-debug log should pass - var normal_log = TestLogContext{ .message = "INFO: test message" }; - const result4 = engine.evaluate(.log, &normal_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result4.decision); -} - -test "MetricPolicyEngine: regex pattern matching" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-regex"), - .name = try allocator.dupe(u8, "drop-by-regex"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - // Match any metric starting with "internal_" or ending with "_debug" - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "(^internal_|_debug$)") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Matches: starts with internal_ - var m1 = TestMetricContext{ .name = "internal_queue_size" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.metric, &m1, TestMetricContext.fieldAccessor, null, &policy_id_buf).decision); - - // Matches: ends with _debug - var m2 = TestMetricContext{ .name = "http_latency_debug" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.metric, &m2, TestMetricContext.fieldAccessor, null, &policy_id_buf).decision); - - // Does not match - var m3 = TestMetricContext{ .name = "http_requests_total" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.metric, &m3, TestMetricContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "MetricPolicyEngine: stats recording for matched policies" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-stats-test"), - .name = try allocator.dupe(u8, "drop-test-metrics"), - .enabled = true, - .target = .{ .metric = .{ - .keep = false, - } }, - }; - try policy.target.?.metric.match.append(allocator, .{ - .field = .{ .metric_field = .METRIC_FIELD_NAME }, - .match = .{ .regex = try allocator.dupe(u8, "test_.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Matching metric - should record stats - var test_metric = TestMetricContext{ .name = "test_counter" }; - const result = engine.evaluate(.metric, &test_metric, TestMetricContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result.decision); - - // Verify stats were recorded via lock-free atomics - const snapshot = registry.getSnapshot().?; - const stats = snapshot.getStats(0).?; - try testing.expectEqual(@as(i64, 1), stats.hits.load(.monotonic)); - try testing.expectEqual(@as(i64, 0), stats.misses.load(.monotonic)); -} - -// ============================================================================= -// Sampling and Rate Limiting Tests -// ============================================================================= - -test "PolicyEngine: percentage sampling - 0% drops all" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "sample-0-percent"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "0%"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // 0% sampling should drop all matching logs - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result.decision); -} - -test "PolicyEngine: percentage sampling - 100% keeps all" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "sample-100-percent"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "100%"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // 100% sampling should keep all matching logs - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.keep, result.decision); -} - -test "PolicyEngine: percentage sampling - deterministic per context" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "sample-50-percent"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "50%"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Same context should produce same decision (deterministic) - var test_log = TestLogContext{ .message = "test message" }; - const result1 = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - const result2 = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(result1.decision, result2.decision); -} - -test "PolicyEngine: rate limiting - respects limit" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "rate-limit-5-per-second"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "5/s"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // First 5 should be kept - var kept_count: u32 = 0; - for (0..10) |_| { - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - if (result.decision == .keep) { - kept_count += 1; - } - } - - // Should keep exactly 5 (rate limit) - try testing.expectEqual(@as(u32, 5), kept_count); -} - -test "PolicyEngine: rate limiting per minute" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "rate-limit-3-per-minute"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "3/m"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // First 3 should be kept - var kept_count: u32 = 0; - for (0..10) |_| { - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - if (result.decision == .keep) { - kept_count += 1; - } - } - - // Should keep exactly 3 (rate limit per minute) - try testing.expectEqual(@as(u32, 3), kept_count); -} - -test "PolicyEngine: rate limiting with zero limit drops all" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "rate-limit-0"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "0/s"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // 0/s rate limit should drop all - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.drop, result.decision); -} - -test "PolicyEngine: sampling does not affect non-matching logs" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "sample-policy"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "50%"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "specific_pattern") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Non-matching log should return unset (not affected by sampling) - var test_log = TestLogContext{ .message = "different message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &policy_id_buf); - try testing.expectEqual(FilterDecision.unset, result.decision); -} - -test "PolicyEngine: more matching policies than policy_id_buf capacity" { - // When more policies match than fit in policy_id_buf, the engine should: - // 1. Still compute the correct final decision (including from policies beyond buffer) - // 2. Only return as many policy IDs as fit in the buffer - // 3. Not crash or have undefined behavior - const allocator = testing.allocator; - - // Create 5 KEEP policies that all match, but we'll only provide a buffer for 2 - var policies: [5]Policy = undefined; - for (&policies, 0..) |*p, i| { - var id_buf: [16]u8 = undefined; - const id = std.fmt.bufPrint(&id_buf, "policy-{d}", .{i}) catch unreachable; - - p.* = Policy{ - .id = try allocator.dupe(u8, id), - .name = try allocator.dupe(u8, id), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "all"), - } }, - }; - // All policies match on "test" in body - try p.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "test") }, - }); - } - defer for (&policies) |*p| p.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&policies, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - - // Buffer only fits 2 policy IDs, but 5 policies will match - var small_policy_id_buf: [2][]const u8 = undefined; - - var test_log = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &test_log, TestLogContext.fieldAccessor, null, &small_policy_id_buf); - - // Decision should be KEEP (all 5 policies want to keep) - try testing.expectEqual(FilterDecision.keep, result.decision); - - // Only 2 policy IDs returned (buffer capacity), even though 5 matched - try testing.expectEqual(@as(usize, 2), result.matched_policy_ids.len); -} - -test "PolicyEngine: exists=false matches when field is missing or empty" { - const allocator = testing.allocator; - - // Drop logs where trace_id does NOT exist (exists: false) - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-missing-trace"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "trace_id") }, - .match = .{ .exists = false }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // No trace_id attribute = field missing = exists:false matches = dropped - var no_trace = TestLogContext{ .message = "log without trace" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &no_trace, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // Has trace_id = field exists = exists:false does NOT match = unset - var with_trace = TestLogContext{ .message = "log with trace", .trace_id = "abc123" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &with_trace, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: exists=false with negate=true matches when field exists" { - const allocator = testing.allocator; - - // Drop logs where trace_id DOES exist (exists: false + negate: true = double negation) - var policy = Policy{ - .id = try allocator.dupe(u8, "policy-1"), - .name = try allocator.dupe(u8, "drop-with-trace"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "none"), - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_attribute = try testMakeAttrPath(allocator, "trace_id") }, - .match = .{ .exists = false }, - .negate = true, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Has trace_id = field exists = exists:false+negate:true matches = dropped - var with_trace = TestLogContext{ .message = "log with trace", .trace_id = "abc123" }; - try testing.expectEqual(FilterDecision.drop, engine.evaluate(.log, &with_trace, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); - - // No trace_id = field missing = exists:false+negate:true does NOT match = unset - var no_trace = TestLogContext{ .message = "log without trace" }; - try testing.expectEqual(FilterDecision.unset, engine.evaluate(.log, &no_trace, TestLogContext.fieldAccessor, null, &policy_id_buf).decision); -} - -test "PolicyEngine: sample_key provides deterministic sampling" { - const allocator = testing.allocator; - - // Policy with 50% sampling using trace_id as sample_key - var policy = Policy{ - .id = try allocator.dupe(u8, "sample-policy"), - .name = try allocator.dupe(u8, "sample-by-trace"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "50%"), - .sample_key = .{ .field = .{ .log_attribute = try testMakeAttrPath(allocator, "trace_id") } }, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "^.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Same trace_id should always get the same decision - var log1 = TestLogContext{ .message = "log one", .trace_id = "trace-abc-123" }; - const decision1 = engine.evaluate(.log, &log1, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - - var log2 = TestLogContext{ .message = "log two", .trace_id = "trace-abc-123" }; - const decision2 = engine.evaluate(.log, &log2, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - - var log3 = TestLogContext{ .message = "log three", .trace_id = "trace-abc-123" }; - const decision3 = engine.evaluate(.log, &log3, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - - // All logs with same trace_id get same decision - try testing.expectEqual(decision1, decision2); - try testing.expectEqual(decision2, decision3); - - // Different trace_id may get different decision (though not guaranteed with only 2 values) - // But the decision for each trace_id is consistent - var log4 = TestLogContext{ .message = "log four", .trace_id = "trace-xyz-789" }; - const decision4a = engine.evaluate(.log, &log4, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - const decision4b = engine.evaluate(.log, &log4, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - try testing.expectEqual(decision4a, decision4b); -} - -test "PolicyEngine: sample_key with log_field" { - const allocator = testing.allocator; - - // Policy with 50% sampling using body as sample_key - var policy = Policy{ - .id = try allocator.dupe(u8, "sample-by-body"), - .name = try allocator.dupe(u8, "sample-by-body"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "50%"), - .sample_key = .{ .field = .{ .log_field = .LOG_FIELD_BODY } }, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "^.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Same message body should always get the same decision - var log1 = TestLogContext{ .message = "exact same message" }; - const decision1 = engine.evaluate(.log, &log1, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - - var log2 = TestLogContext{ .message = "exact same message" }; - const decision2 = engine.evaluate(.log, &log2, TestLogContext.fieldAccessor, null, &policy_id_buf).decision; - - try testing.expectEqual(decision1, decision2); -} - -test "PolicyEngine: sample_key missing field falls back to default" { - const allocator = testing.allocator; - - // Policy with sample_key pointing to a field that doesn't exist - var policy = Policy{ - .id = try allocator.dupe(u8, "sample-missing-key"), - .name = try allocator.dupe(u8, "sample-missing-key"), - .enabled = true, - .target = .{ .log = .{ - .keep = try allocator.dupe(u8, "50%"), - .sample_key = .{ .field = .{ .log_attribute = try testMakeAttrPath(allocator, "nonexistent_field") } }, - } }, - }; - try policy.target.?.log.match.append(allocator, .{ - .field = .{ .log_field = .LOG_FIELD_BODY }, - .match = .{ .regex = try allocator.dupe(u8, "^.*") }, - }); - defer policy.deinit(allocator); - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const engine = PolicyEngine.init(noop_bus.eventBus(), ®istry); - var policy_id_buf: [16][]const u8 = undefined; - - // Should still work (falls back to context pointer hash) - var log1 = TestLogContext{ .message = "test message" }; - const result = engine.evaluate(.log, &log1, TestLogContext.fieldAccessor, null, &policy_id_buf); - - // Should get a decision (either keep or drop based on hash) - try testing.expect(result.decision == .keep or result.decision == .drop); -} - -test "hashString: deterministic" { - const hash1 = hashString("trace-abc-123"); - const hash2 = hashString("trace-abc-123"); - const hash3 = hashString("trace-xyz-789"); - - try testing.expectEqual(hash1, hash2); - try testing.expect(hash1 != hash3); -} - -test "hashString: empty string" { - try testing.expectEqual(@as(u64, 0), hashString("")); -} diff --git a/src/policy/provider.zig b/src/policy/provider.zig deleted file mode 100644 index 1bbf073..0000000 --- a/src/policy/provider.zig +++ /dev/null @@ -1,138 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); -const types = @import("types.zig"); - -const Policy = proto.policy.Policy; - -/// Re-export TransformResult for use by providers -pub const TransformResult = types.TransformResult; - -/// Update notification sent by providers to subscribers -pub const PolicyUpdate = struct { - policies: []const Policy, - /// ID of the provider that sent this update - provider_id: []const u8, -}; - -/// Callback signature for policy updates -/// Context is provider-specific state, onUpdate is called when policies change -pub const PolicyCallback = struct { - context: *anyopaque, - onUpdate: *const fn (context: *anyopaque, update: PolicyUpdate) anyerror!void, - - pub fn call(self: PolicyCallback, update: PolicyUpdate) !void { - try self.onUpdate(self.context, update); - } -}; - -/// PolicyProvider interface - implemented by file, http, and future providers -/// Uses vtable pattern for polymorphism without heap allocation -pub const PolicyProvider = struct { - ptr: *anyopaque, - vtable: *const VTable, - - pub const VTable = struct { - getId: *const fn (ptr: *anyopaque) []const u8, - subscribe: *const fn (ptr: *anyopaque, callback: PolicyCallback) anyerror!void, - recordPolicyError: *const fn (ptr: *anyopaque, policy_id: []const u8, error_message: []const u8) void, - recordPolicyStats: *const fn (ptr: *anyopaque, policy_id: []const u8, hits: i64, misses: i64, transform_result: TransformResult) void, - deinit: *const fn (ptr: *anyopaque) void, - }; - - /// Get the unique identifier for this provider - pub fn getId(self: PolicyProvider) []const u8 { - return self.vtable.getId(self.ptr); - } - - /// Subscribe to policy updates from this provider - /// Provider will call callback immediately with current policies, - /// then on each subsequent update - pub fn subscribe(self: PolicyProvider, callback: PolicyCallback) !void { - try self.vtable.subscribe(self.ptr, callback); - } - - /// Report an error encountered when applying a policy. - /// How this is handled depends on the provider: - /// - HttpProvider: Records error to send in next sync request - /// - FileProvider: Logs error to stderr - pub fn recordPolicyError(self: PolicyProvider, policy_id: []const u8, error_message: []const u8) void { - self.vtable.recordPolicyError(self.ptr, policy_id, error_message); - } - - /// Report statistics about policy hits, misses, and transform results. - /// How this is handled depends on the provider: - /// - HttpProvider: Records stats to send in next sync request - /// - FileProvider: Logs stats to stdout - pub fn recordPolicyStats(self: PolicyProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: TransformResult) void { - self.vtable.recordPolicyStats(self.ptr, policy_id, hits, misses, transform_result); - } - - /// Cleanup provider resources - pub fn deinit(self: PolicyProvider) void { - self.vtable.deinit(self.ptr); - } - - /// Create a PolicyProvider from a concrete provider implementation - /// Provider must implement: - /// - getId(*Self) []const u8 - /// - subscribe(*Self, PolicyCallback) !void - /// - recordPolicyError(*Self, []const u8, []const u8) void - /// - recordPolicyStats(*Self, []const u8, i64, i64, TransformResult) void - /// - deinit(*Self) void - pub fn init(provider: anytype) PolicyProvider { - const Ptr = @TypeOf(provider); - const ptr_info = @typeInfo(Ptr); - - if (ptr_info != .pointer) @compileError("provider must be a pointer"); - if (ptr_info.pointer.size != .one) @compileError("provider must be single-item pointer"); - - const T = ptr_info.pointer.child; - - // Verify provider has required methods - if (!@hasDecl(T, "getId")) @compileError("provider must have getId method"); - if (!@hasDecl(T, "subscribe")) @compileError("provider must have subscribe method"); - if (!@hasDecl(T, "recordPolicyError")) @compileError("provider must have recordPolicyError method"); - if (!@hasDecl(T, "recordPolicyStats")) @compileError("provider must have recordPolicyStats method"); - if (!@hasDecl(T, "deinit")) @compileError("provider must have deinit method"); - - const gen = struct { - fn getIdImpl(ptr: *anyopaque) []const u8 { - const self: Ptr = @ptrCast(@alignCast(ptr)); - return self.getId(); - } - - fn subscribeImpl(ptr: *anyopaque, callback: PolicyCallback) anyerror!void { - const self: Ptr = @ptrCast(@alignCast(ptr)); - return self.subscribe(callback); - } - - fn recordPolicyErrorImpl(ptr: *anyopaque, policy_id: []const u8, error_message: []const u8) void { - const self: Ptr = @ptrCast(@alignCast(ptr)); - self.recordPolicyError(policy_id, error_message); - } - - fn recordPolicyStatsImpl(ptr: *anyopaque, policy_id: []const u8, hits: i64, misses: i64, transform_result: TransformResult) void { - const self: Ptr = @ptrCast(@alignCast(ptr)); - self.recordPolicyStats(policy_id, hits, misses, transform_result); - } - - fn deinitImpl(ptr: *anyopaque) void { - const self: Ptr = @ptrCast(@alignCast(ptr)); - self.deinit(); - } - - const vtable = VTable{ - .getId = getIdImpl, - .subscribe = subscribeImpl, - .recordPolicyError = recordPolicyErrorImpl, - .recordPolicyStats = recordPolicyStatsImpl, - .deinit = deinitImpl, - }; - }; - - return .{ - .ptr = provider, - .vtable = &gen.vtable, - }; - } -}; diff --git a/src/policy/provider_file.zig b/src/policy/provider_file.zig deleted file mode 100644 index a21dbbc..0000000 --- a/src/policy/provider_file.zig +++ /dev/null @@ -1,638 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const proto = @import("proto"); -const policy_provider = @import("./provider.zig"); -const parser = @import("./parser.zig"); -const o11y = @import("../observability/root.zig"); - -const Policy = proto.policy.Policy; -const PolicyCallback = policy_provider.PolicyCallback; -const EventBus = o11y.EventBus; - -const Sha256 = std.crypto.hash.sha2.Sha256; - -// ============================================================================= -// Observability Events -// ============================================================================= - -const PolicyError = struct { policy_id: []const u8, message: []const u8 }; -const TransformResult = policy_provider.TransformResult; -const PolicyStats = struct { - policy_id: []const u8, - hits: i64, - misses: i64, - transform_result: TransformResult, -}; -const PoliciesLoading = struct { path: []const u8 }; -const PoliciesLoaded = struct { count: usize, path: []const u8 }; -const PoliciesUnchanged = struct { hash: []const u8 }; -const FileWatcherError = struct { err: []const u8 }; -const FileWatcherUnsupported = struct {}; -const PolicyReloadFailed = struct { err: []const u8 }; - -/// File-based policy provider that watches a config file for changes -pub const FileProvider = struct { - allocator: std.mem.Allocator, - /// Unique identifier for this provider - id: []const u8, - config_path: []const u8, - callback: ?PolicyCallback, - watch_thread: ?std.Thread, - shutdown_flag: std.atomic.Value(bool), - /// SHA256 hash of the last loaded file contents - content_hash: ?[Sha256.digest_length]u8, - /// Event bus for observability - bus: *EventBus, - - pub fn init(allocator: std.mem.Allocator, bus: *EventBus, id: []const u8, config_path: []const u8) !*FileProvider { - const self = try allocator.create(FileProvider); - errdefer allocator.destroy(self); - - const id_copy = try allocator.dupe(u8, id); - errdefer allocator.free(id_copy); - - const path_copy = try allocator.dupe(u8, config_path); - errdefer allocator.free(path_copy); - - self.* = .{ - .allocator = allocator, - .id = id_copy, - .config_path = path_copy, - .callback = null, - .watch_thread = null, - .shutdown_flag = std.atomic.Value(bool).init(false), - .content_hash = null, - .bus = bus, - }; - - return self; - } - - /// Get the unique identifier for this provider - pub fn getId(self: *FileProvider) []const u8 { - return self.id; - } - - pub fn subscribe(self: *FileProvider, callback: PolicyCallback) !void { - self.callback = callback; - - // Initial load and notify - try self.loadAndNotify(); - - // Start watching for changes - self.watch_thread = try std.Thread.spawn(.{}, watchLoop, .{self}); - } - - pub fn shutdown(self: *FileProvider) void { - self.shutdown_flag.store(true, .release); - - if (self.watch_thread) |thread| { - thread.join(); - self.watch_thread = null; - } - } - - pub fn deinit(self: *FileProvider) void { - // Ensure shutdown is called first - self.shutdown(); - - self.allocator.free(self.id); - self.allocator.free(self.config_path); - self.allocator.destroy(self); - } - - /// Report an error encountered when applying a policy. - /// For file provider, this logs to stderr since there's no remote server to report to. - pub fn recordPolicyError(self: *FileProvider, policy_id: []const u8, error_message: []const u8) void { - self.bus.err(PolicyError{ .policy_id = policy_id, .message = error_message }); - } - - /// Report statistics about policy hits, misses, and transform results. - /// For file provider, this logs to stdout since there's no remote server to report to. - pub fn recordPolicyStats(self: *FileProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: TransformResult) void { - self.bus.debug(PolicyStats{ .policy_id = policy_id, .hits = hits, .misses = misses, .transform_result = transform_result }); - } - - fn loadAndNotify(self: *FileProvider) !void { - self.bus.info(PoliciesLoading{ .path = self.config_path }); - - // Read file contents and compute hash - const file = try std.fs.cwd().openFile(self.config_path, .{}); - defer file.close(); - - const contents = try file.readToEndAlloc(self.allocator, 10 * 1024 * 1024); // 10MB max - defer self.allocator.free(contents); - - var new_hash: [Sha256.digest_length]u8 = undefined; - Sha256.hash(contents, &new_hash, .{}); - - // Check if content has changed - if (self.content_hash) |old_hash| { - if (std.mem.eql(u8, &old_hash, &new_hash)) { - self.bus.debug(PoliciesUnchanged{ .hash = &new_hash }); - return; - } - } - - // Update stored hash - self.content_hash = new_hash; - - const policies = try parser.parsePoliciesBytes(self.allocator, contents); - defer { - // Registry duplicates policies, so we must free our parsed copies - for (policies) |*policy| { - @constCast(policy).deinit(self.allocator); - } - self.allocator.free(policies); - } - - if (self.callback) |cb| { - try cb.call(.{ - .policies = policies, - .provider_id = self.id, - }); - } - - self.bus.info(PoliciesLoaded{ .count = policies.len, .path = self.config_path }); - } - - fn watchLoop(self: *FileProvider) void { - if (builtin.os.tag == .linux or builtin.os.tag == .macos) { - self.watchLoopPoll() catch |err| { - self.bus.err(FileWatcherError{ .err = @errorName(err) }); - }; - } else { - self.bus.warn(FileWatcherUnsupported{}); - } - } - - fn watchLoopPoll(self: *FileProvider) !void { - var last_mtime: i128 = 0; - - while (!self.shutdown_flag.load(.acquire)) { - std.Thread.sleep(1 * std.time.ns_per_s); // Check every second - - const file = std.fs.cwd().openFile(self.config_path, .{}) catch continue; - defer file.close(); - - const stat = file.stat() catch continue; - - // Only attempt reload if mtime changed (optimization to avoid reading file every second) - if (stat.mtime != last_mtime) { - last_mtime = stat.mtime; - // loadAndNotify will check content hash and skip if unchanged - self.loadAndNotify() catch |err| { - self.bus.err(PolicyReloadFailed{ .err = @errorName(err) }); - }; - } - } - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; -const Registry = @import("./registry.zig").PolicyRegistry; -const NoopEventBus = o11y.NoopEventBus; - -test "FileProvider: init and deinit" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - "/nonexistent/path/policies.json", - ); - defer provider.deinit(); - - try testing.expectEqualStrings("test-provider", provider.getId()); -} - -test "FileProvider: subscribe fails when file does not exist" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - "/nonexistent/path/policies.json", - ); - defer provider.deinit(); - - // Subscribe should fail because file doesn't exist - const result = provider.subscribe(.{ - .context = undefined, - .onUpdate = struct { - fn cb(_: *anyopaque, _: policy_provider.PolicyUpdate) !void {} - }.cb, - }); - - try testing.expectError(error.FileNotFound, result); -} - -test "FileProvider: subscribe fails with invalid JSON" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - // Create a temporary file with invalid JSON - var tmp_dir = testing.tmpDir(.{}); - defer tmp_dir.cleanup(); - - const file = try tmp_dir.dir.createFile("invalid.json", .{}); - try file.writeAll("{ this is not valid json }"); - file.close(); - - // Get the full path - var path_buf: [std.fs.max_path_bytes]u8 = undefined; - const tmp_path = try tmp_dir.dir.realpath("invalid.json", &path_buf); - - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - tmp_path, - ); - defer provider.deinit(); - - // Subscribe should fail because JSON is invalid - const result = provider.subscribe(.{ - .context = undefined, - .onUpdate = struct { - fn cb(_: *anyopaque, _: policy_provider.PolicyUpdate) !void {} - }.cb, - }); - - try testing.expectError(error.SyntaxError, result); -} - -test "FileProvider: subscribe fails with invalid policy structure" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - // Create a temporary file with valid JSON but invalid policy structure - var tmp_dir = testing.tmpDir(.{}); - defer tmp_dir.cleanup(); - - const file = try tmp_dir.dir.createFile("bad_policy.json", .{}); - // Missing required fields like "id" - try file.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "name": "missing-id-policy", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "test" }] - \\ } - \\ } - \\ ] - \\} - ); - file.close(); - - var path_buf: [std.fs.max_path_bytes]u8 = undefined; - const tmp_path = try tmp_dir.dir.realpath("bad_policy.json", &path_buf); - - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - tmp_path, - ); - defer provider.deinit(); - - // Subscribe should fail because policy structure is invalid - const result = provider.subscribe(.{ - .context = undefined, - .onUpdate = struct { - fn cb(_: *anyopaque, _: policy_provider.PolicyUpdate) !void {} - }.cb, - }); - - try testing.expectError(error.MissingField, result); -} - -test "FileProvider: registry remains usable after provider fails to load" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - // Create registry - var registry = Registry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Try to load a provider with a non-existent file - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "failing-provider", - "/nonexistent/path/policies.json", - ); - defer provider.deinit(); - - // This should fail - const subscribe_result = provider.subscribe(.{ - .context = undefined, - .onUpdate = struct { - fn cb(_: *anyopaque, _: policy_provider.PolicyUpdate) !void {} - }.cb, - }); - try testing.expectError(error.FileNotFound, subscribe_result); - - // Registry should still be usable - no policies loaded - try testing.expectEqual(@as(usize, 0), registry.getPolicyCount()); - try testing.expect(registry.getSnapshot() == null); - - // Now load a valid policy file and verify registry works - var tmp_dir = testing.tmpDir(.{}); - defer tmp_dir.cleanup(); - - const file = try tmp_dir.dir.createFile("valid.json", .{}); - try file.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "id": "test-policy", - \\ "name": "test-policy", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "test" }], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ); - file.close(); - - var path_buf: [std.fs.max_path_bytes]u8 = undefined; - const tmp_path = try tmp_dir.dir.realpath("valid.json", &path_buf); - - const good_provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "good-provider", - tmp_path, - ); - defer good_provider.deinit(); - - // Create callback that updates registry - const CallbackContext = struct { - registry: *Registry, - - fn handleUpdate(ctx: *anyopaque, update: policy_provider.PolicyUpdate) !void { - const self: *@This() = @ptrCast(@alignCast(ctx)); - try self.registry.updatePolicies(update.policies, update.provider_id, .file); - } - }; - - var ctx = CallbackContext{ .registry = ®istry }; - - try good_provider.subscribe(.{ - .context = @ptrCast(&ctx), - .onUpdate = CallbackContext.handleUpdate, - }); - - // Registry should now have the policy - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("test-policy", snapshot.?.policies[0].name); -} - -test "FileProvider: registry retains policies after reload with invalid JSON" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = Registry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create a valid policy file - var tmp_dir = testing.tmpDir(.{}); - defer tmp_dir.cleanup(); - - const file = try tmp_dir.dir.createFile("policies.json", .{}); - try file.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "id": "test-policy", - \\ "name": "test-policy", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "test" }], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ); - file.close(); - - var path_buf: [std.fs.max_path_bytes]u8 = undefined; - const tmp_path = try tmp_dir.dir.realpath("policies.json", &path_buf); - - const provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - tmp_path, - ); - defer provider.deinit(); - - // Create callback that updates registry - const CallbackContext = struct { - registry: *Registry, - - fn handleUpdate(ctx: *anyopaque, update: policy_provider.PolicyUpdate) !void { - const self: *@This() = @ptrCast(@alignCast(ctx)); - try self.registry.updatePolicies(update.policies, update.provider_id, .file); - } - }; - - var ctx = CallbackContext{ .registry = ®istry }; - - // Subscribe - this should load the valid policy - try provider.subscribe(.{ - .context = @ptrCast(&ctx), - .onUpdate = CallbackContext.handleUpdate, - }); - - // Verify the policy was loaded - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - { - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("test-policy", snapshot.?.policies[0].name); - } - - // Now overwrite the file with invalid JSON - const file2 = try tmp_dir.dir.createFile("policies.json", .{}); - try file2.writeAll("{ this is not valid json }"); - file2.close(); - - // Manually trigger a reload (simulates what the watch loop does) - // This should fail but not crash - const reload_result = provider.loadAndNotify(); - try testing.expectError(error.SyntaxError, reload_result); - - // Registry should still have the original policy - reload failure doesn't clear it - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - { - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("test-policy", snapshot.?.policies[0].name); - } - - // Now overwrite with valid JSON but invalid policy structure (missing "id" field) - const file2b = try tmp_dir.dir.createFile("policies.json", .{}); - try file2b.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "name": "missing-id-policy", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "test" }] - \\ } - \\ } - \\ ] - \\} - ); - file2b.close(); - - // Reload should fail due to missing required field - const reload_result2 = provider.loadAndNotify(); - try testing.expectError(error.MissingField, reload_result2); - - // Registry should still have the original policy - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - { - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("test-policy", snapshot.?.policies[0].name); - } - - // Fix the file with valid JSON again - const file3 = try tmp_dir.dir.createFile("policies.json", .{}); - try file3.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "id": "updated-policy", - \\ "name": "updated-policy", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "updated" }], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ); - file3.close(); - - // Reload should now succeed - try provider.loadAndNotify(); - - // Registry should now have the updated policy - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - { - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("updated-policy", snapshot.?.policies[0].name); - } -} - -test "FileProvider: multiple providers, one fails, registry has policies from successful one" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - - var registry = Registry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create a valid policy file - var tmp_dir = testing.tmpDir(.{}); - defer tmp_dir.cleanup(); - - const file = try tmp_dir.dir.createFile("valid.json", .{}); - try file.writeAll( - \\{ - \\ "policies": [ - \\ { - \\ "id": "policy-from-valid-provider", - \\ "name": "policy-from-valid-provider", - \\ "log": { - \\ "match": [{ "log_field": "body", "regex": "info" }], - \\ "keep": "all" - \\ } - \\ } - \\ ] - \\} - ); - file.close(); - - var path_buf: [std.fs.max_path_bytes]u8 = undefined; - const tmp_path = try tmp_dir.dir.realpath("valid.json", &path_buf); - - // First provider - will fail - const failing_provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "failing-provider", - "/nonexistent/policies.json", - ); - defer failing_provider.deinit(); - - const fail_result = failing_provider.subscribe(.{ - .context = undefined, - .onUpdate = struct { - fn cb(_: *anyopaque, _: policy_provider.PolicyUpdate) !void {} - }.cb, - }); - try testing.expectError(error.FileNotFound, fail_result); - - // Second provider - will succeed - const good_provider = try FileProvider.init( - allocator, - noop_bus.eventBus(), - "good-provider", - tmp_path, - ); - defer good_provider.deinit(); - - const CallbackContext = struct { - registry: *Registry, - - fn handleUpdate(ctx: *anyopaque, update: policy_provider.PolicyUpdate) !void { - const self: *@This() = @ptrCast(@alignCast(ctx)); - try self.registry.updatePolicies(update.policies, update.provider_id, .file); - } - }; - - var ctx = CallbackContext{ .registry = ®istry }; - - try good_provider.subscribe(.{ - .context = @ptrCast(&ctx), - .onUpdate = CallbackContext.handleUpdate, - }); - - // Registry should have policy from the successful provider only - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("policy-from-valid-provider", snapshot.?.policies[0].name); -} diff --git a/src/policy/provider_http.zig b/src/policy/provider_http.zig deleted file mode 100644 index ab5fab4..0000000 --- a/src/policy/provider_http.zig +++ /dev/null @@ -1,686 +0,0 @@ -const std = @import("std"); -const policy_provider = @import("./provider.zig"); -const types = @import("./types.zig"); -const proto = @import("proto"); -const protobuf = @import("protobuf"); -const o11y = @import("../observability/root.zig"); - -const PolicyCallback = policy_provider.PolicyCallback; -const TransformResult = policy_provider.TransformResult; -const SyncRequest = proto.policy.SyncRequest; -const SyncResponse = proto.policy.SyncResponse; -const ClientMetadata = proto.policy.ClientMetadata; -const PolicySyncStatus = proto.policy.PolicySyncStatus; -const TransformStageStatus = proto.policy.TransformStageStatus; -const PolicyStage = proto.policy.PolicyStage; -const KeyValue = proto.common.KeyValue; -const AnyValue = proto.common.AnyValue; -const ServiceMetadata = types.ServiceMetadata; -const EventBus = o11y.EventBus; - -/// A header to be sent with HTTP requests -pub const Header = struct { - name: []const u8 = "", - value: []const u8 = "", -}; - -// ============================================================================= -// Observability Events -// ============================================================================= - -const PolicyErrorRecordFailed = struct { policy_id: []const u8 }; -const HttpInitialFetchFailed = struct { err: []const u8 }; -const HttpFetchFailed = struct { url: []const u8, err: []const u8 }; -const HttpJsonDecodeFailed = struct { err: []const u8, body_preview: []const u8 }; -const HttpPoliciesUnchanged = struct { reason: []const u8 }; -const HttpPolicyHashUpdated = struct { hash: []const u8 }; -const HttpPoliciesLoaded = struct { count: usize, url: []const u8, sync_timestamp: u64 }; -const HttpSyncRequestFailed = struct { url: []const u8, status: u16 }; -const HTTPFetchStarted = struct {}; -const HTTPFetchCompleted = struct {}; - -/// Tracks status for a specific policy (hits, misses, errors, transform results) -const PolicyStatusRecord = struct { - hits: i64 = 0, - misses: i64 = 0, - errors: std.ArrayListUnmanaged([]const u8) = .{}, - /// Accumulated transform results (attempted/applied counts) - transform_result: TransformResult = .{}, - - fn deinit(self: *PolicyStatusRecord, allocator: std.mem.Allocator) void { - for (self.errors.items) |msg| { - allocator.free(msg); - } - self.errors.deinit(allocator); - } - - fn addTransformResult(self: *PolicyStatusRecord, result: TransformResult) void { - self.transform_result.removes_attempted += result.removes_attempted; - self.transform_result.removes_applied += result.removes_applied; - self.transform_result.redacts_attempted += result.redacts_attempted; - self.transform_result.redacts_applied += result.redacts_applied; - self.transform_result.renames_attempted += result.renames_attempted; - self.transform_result.renames_applied += result.renames_applied; - self.transform_result.adds_attempted += result.adds_attempted; - self.transform_result.adds_applied += result.adds_applied; - } -}; - -/// HTTP-based policy provider that polls a remote endpoint -pub const HttpProvider = struct { - allocator: std.mem.Allocator, - /// Unique identifier for this provider - id: []const u8, - http_client: std.http.Client, - config_url: []const u8, - poll_interval_ns: u64, - callback: ?PolicyCallback, - poll_thread: ?std.Thread, - shutdown_flag: std.atomic.Value(bool), - - // Service metadata for sync requests (not owned, references config) - service: ServiceMetadata, - last_sync_timestamp: u64, - last_successful_hash: ?[]u8, - - // Policy status tracking: maps policy_id -> PolicyStatusRecord - // Used to report hits/misses/errors encountered when applying policies - policy_statuses: std.StringHashMapUnmanaged(PolicyStatusRecord), - - // Custom headers to send with HTTP requests (owned, copied from config) - custom_headers: []Header, - - // Mutex for thread-safe access to synced state - sync_state_mutex: std.Thread.Mutex, - - // Event bus for observability - bus: *EventBus, - - pub fn init( - allocator: std.mem.Allocator, - bus: *EventBus, - id: []const u8, - config_url: []const u8, - poll_interval_seconds: u64, - service: ServiceMetadata, - headers: []const Header, - ) !*HttpProvider { - const self = try allocator.create(HttpProvider); - errdefer allocator.destroy(self); - - const id_copy = try allocator.dupe(u8, id); - errdefer allocator.free(id_copy); - - const url_copy = try allocator.dupe(u8, config_url); - errdefer allocator.free(url_copy); - - // Copy headers (both the slice and the string contents) - const headers_copy = try allocator.alloc(Header, headers.len); - errdefer allocator.free(headers_copy); - - var headers_initialized: usize = 0; - errdefer { - for (headers_copy[0..headers_initialized]) |h| { - allocator.free(h.name); - allocator.free(h.value); - } - } - - for (headers, 0..) |h, i| { - const name_copy = try allocator.dupe(u8, h.name); - errdefer allocator.free(name_copy); - const value_copy = try allocator.dupe(u8, h.value); - headers_copy[i] = .{ .name = name_copy, .value = value_copy }; - headers_initialized = i + 1; - } - - self.* = .{ - .allocator = allocator, - .id = id_copy, - .http_client = std.http.Client{ .allocator = allocator }, - .config_url = url_copy, - .poll_interval_ns = poll_interval_seconds * std.time.ns_per_s, - .callback = null, - .poll_thread = null, - .shutdown_flag = std.atomic.Value(bool).init(false), - .service = service, - .last_sync_timestamp = 0, - .last_successful_hash = null, - .policy_statuses = .{}, - .custom_headers = headers_copy, - .sync_state_mutex = .{}, - .bus = bus, - }; - - return self; - } - - /// Get the unique identifier for this provider - pub fn getId(self: *HttpProvider) []const u8 { - return self.id; - } - - /// Record the hash from a successful sync. - /// This hash will be sent in subsequent sync requests. - pub fn recordSyncedHash(self: *HttpProvider, hash: []const u8) !void { - self.sync_state_mutex.lock(); - defer self.sync_state_mutex.unlock(); - - // Free old hash if exists - if (self.last_successful_hash) |old_hash| { - self.allocator.free(old_hash); - } - - self.last_successful_hash = try self.allocator.dupe(u8, hash); - } - - /// Record an error for a specific policy. - /// These errors will be sent in subsequent sync requests. - /// Conforms to PolicyProvider interface (void return, logs errors internally). - pub fn recordPolicyError(self: *HttpProvider, policy_id: []const u8, error_message: []const u8) void { - self.sync_state_mutex.lock(); - defer self.sync_state_mutex.unlock(); - - const msg_copy = self.allocator.dupe(u8, error_message) catch { - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - - if (self.policy_statuses.getPtr(policy_id)) |record| { - // Append to existing error list - record.errors.append(self.allocator, msg_copy) catch { - self.allocator.free(msg_copy); - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - } else { - // Create new entry - const id_copy = self.allocator.dupe(u8, policy_id) catch { - self.allocator.free(msg_copy); - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - - var record = PolicyStatusRecord{}; - record.errors.append(self.allocator, msg_copy) catch { - self.allocator.free(msg_copy); - self.allocator.free(id_copy); - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - - self.policy_statuses.put(self.allocator, id_copy, record) catch { - self.allocator.free(msg_copy); - self.allocator.free(id_copy); - record.deinit(self.allocator); - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - } - } - - /// Record statistics about policy hits, misses, and transform stats. - /// These stats will be sent in subsequent sync requests. - /// Conforms to PolicyProvider interface (void return, logs errors internally). - pub fn recordPolicyStats(self: *HttpProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: TransformResult) void { - self.sync_state_mutex.lock(); - defer self.sync_state_mutex.unlock(); - - if (self.policy_statuses.getPtr(policy_id)) |record| { - // Update existing record - record.hits += hits; - record.misses += misses; - record.addTransformResult(transform_result); - } else { - // Create new entry - const id_copy = self.allocator.dupe(u8, policy_id) catch { - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - - self.policy_statuses.put(self.allocator, id_copy, .{ - .hits = hits, - .misses = misses, - .transform_result = transform_result, - }) catch { - self.allocator.free(id_copy); - self.bus.err(PolicyErrorRecordFailed{ .policy_id = policy_id }); - return; - }; - } - } - - /// Clear all recorded policy statuses. - /// Call this after statuses have been successfully reported to the server. - pub fn clearPolicyStatuses(self: *HttpProvider) void { - self.sync_state_mutex.lock(); - defer self.sync_state_mutex.unlock(); - - var it = self.policy_statuses.iterator(); - while (it.next()) |entry| { - self.allocator.free(entry.key_ptr.*); - entry.value_ptr.deinit(self.allocator); - } - self.policy_statuses.clearRetainingCapacity(); - } - - pub fn subscribe(self: *HttpProvider, callback: PolicyCallback) !void { - self.callback = callback; - - // Initial fetch and notify (non-fatal if it fails) - self.fetchAndNotify() catch |err| { - self.bus.warn(HttpInitialFetchFailed{ .err = @errorName(err) }); - }; - - // Start polling - self.poll_thread = try std.Thread.spawn(.{}, pollLoop, .{self}); - } - - pub fn shutdown(self: *HttpProvider) void { - self.shutdown_flag.store(true, .release); - - if (self.poll_thread) |thread| { - thread.join(); - self.poll_thread = null; - } - } - - pub fn deinit(self: *HttpProvider) void { - // Ensure shutdown is called first - self.shutdown(); - - if (self.last_successful_hash) |hash| { - self.allocator.free(hash); - } - - // Free policy statuses - var ps_it = self.policy_statuses.iterator(); - while (ps_it.next()) |entry| { - self.allocator.free(entry.key_ptr.*); - entry.value_ptr.deinit(self.allocator); - } - self.policy_statuses.deinit(self.allocator); - - // Free custom headers - for (self.custom_headers) |h| { - self.allocator.free(h.name); - self.allocator.free(h.value); - } - self.allocator.free(self.custom_headers); - - self.http_client.deinit(); - self.allocator.free(self.id); - self.allocator.free(self.config_url); - - self.allocator.destroy(self); - } - - fn pollLoop(self: *HttpProvider) void { - while (!self.shutdown_flag.load(.acquire)) { - // Sleep in small increments so we can respond quickly to shutdown - const sleep_increment_ns = 100 * std.time.ns_per_ms; // 100ms - var slept_ns: u64 = 0; - - while (slept_ns < self.poll_interval_ns and !self.shutdown_flag.load(.acquire)) { - std.Thread.sleep(sleep_increment_ns); - slept_ns += sleep_increment_ns; - } - - if (self.shutdown_flag.load(.acquire)) break; - - self.fetchAndNotify() catch |err| { - self.bus.err(HttpFetchFailed{ .url = self.config_url, .err = @errorName(err) }); - }; - } - } - - const FetchResult = struct { - parsed: std.json.Parsed(SyncResponse), - response_body: []u8, - }; - - fn fetchAndNotify(self: *HttpProvider) !void { - var span = self.bus.started(.debug, HTTPFetchStarted{}); - defer span.completed(HTTPFetchCompleted{}); - var result = try self.fetchPolicies(); - defer result.parsed.deinit(); - defer self.allocator.free(result.response_body); - - const response = result.parsed.value; - - // Update last sync timestamp - self.last_sync_timestamp = response.sync_timestamp_unix_nano; - - // Check if content has changed by comparing hashes - const hash_unchanged = blk: { - if (response.hash.len == 0) break :blk false; - if (self.last_successful_hash) |old_hash| { - break :blk std.mem.eql(u8, old_hash, response.hash); - } - break :blk false; - }; - - if (hash_unchanged) { - self.bus.debug(HttpPoliciesUnchanged{ .reason = "hash" }); - return; - } - - // Record the hash for future sync requests - if (response.hash.len > 0) { - try self.recordSyncedHash(response.hash); - self.bus.info(HttpPolicyHashUpdated{ .hash = response.hash }); - } - - // Notify callback with policies from response - if (self.callback) |cb| { - try cb.call(.{ - .policies = response.policies.items, - .provider_id = self.id, - }); - } - - self.bus.info(HttpPoliciesLoaded{ - .count = response.policies.items.len, - .url = self.config_url, - .sync_timestamp = response.sync_timestamp_unix_nano, - }); - - // Clear policy statuses after successful sync - self.clearPolicyStatuses(); - } - - fn fetchPolicies(self: *HttpProvider) !FetchResult { - // Use arena allocator for all temporary structures during fetch. - // This reduces fragmentation by freeing all temporary memory at once. - var arena = std.heap.ArenaAllocator.init(self.allocator); - defer arena.deinit(); - const temp_allocator = arena.allocator(); - - // Build resource_attributes with required fields: - // - service.name - // - service.instance.id - // - service.version - // - service.namespace - const resource_attributes = [_]KeyValue{ - .{ .key = "service.name", .value = .{ .value = .{ .string_value = self.service.name } } }, - .{ .key = "service.instance.id", .value = .{ .value = .{ .string_value = self.service.instance_id } } }, - .{ .key = "service.version", .value = .{ .value = .{ .string_value = self.service.version } } }, - .{ .key = "service.namespace", .value = .{ .value = .{ .string_value = self.service.namespace } } }, - }; - - // Labels (empty - workspace.id is no longer required) - const labels = [_]KeyValue{}; - - // Build supported_policy_stages from service metadata - // Different binaries support different stages (e.g., OTLP supports traces, Datadog does not) - const supported_policy_stages = self.service.supported_stages; - - // Build policy_statuses from our tracked state - var policy_statuses_list = std.ArrayListUnmanaged(PolicySyncStatus){}; - // No defer needed - arena handles cleanup - - // Get last successful hash (if any) - read under lock - var last_hash: []const u8 = &.{}; - { - self.sync_state_mutex.lock(); - defer self.sync_state_mutex.unlock(); - - // Build PolicySyncStatus entries from tracked policy statuses - var ps_it = self.policy_statuses.iterator(); - while (ps_it.next()) |entry| { - const tr = entry.value_ptr.transform_result; - // Convert TransformResult to TransformStageStatus: hits = applied, misses = attempted - applied - try policy_statuses_list.append(temp_allocator, .{ - .id = entry.key_ptr.*, - .match_hits = entry.value_ptr.hits, - .match_misses = entry.value_ptr.misses, - .errors = entry.value_ptr.errors, - .remove = if (tr.removes_attempted > 0) - TransformStageStatus{ - .hits = @intCast(tr.removes_applied), - .misses = @intCast(tr.removes_attempted - tr.removes_applied), - } - else - null, - .redact = if (tr.redacts_attempted > 0) - TransformStageStatus{ - .hits = @intCast(tr.redacts_applied), - .misses = @intCast(tr.redacts_attempted - tr.redacts_applied), - } - else - null, - .rename = if (tr.renames_attempted > 0) - TransformStageStatus{ - .hits = @intCast(tr.renames_applied), - .misses = @intCast(tr.renames_attempted - tr.renames_applied), - } - else - null, - .add = if (tr.adds_attempted > 0) - TransformStageStatus{ - .hits = @intCast(tr.adds_applied), - .misses = @intCast(tr.adds_attempted - tr.adds_applied), - } - else - null, - }); - } - - last_hash = self.last_successful_hash orelse &.{}; - } - - // Create SyncRequest with the new structure - const sync_request = SyncRequest{ - .client_metadata = ClientMetadata{ - .supported_policy_stages = .{ .items = @constCast(supported_policy_stages), .capacity = supported_policy_stages.len }, - .resource_attributes = .{ .items = @constCast(&resource_attributes), .capacity = resource_attributes.len }, - .labels = .{ .items = @constCast(&labels), .capacity = labels.len }, - }, - .full_sync = self.last_sync_timestamp == 0, - .last_sync_timestamp_unix_nano = self.last_sync_timestamp, - .last_successful_hash = last_hash, - .policy_statuses = policy_statuses_list, - }; - - // Encode SyncRequest to JSON - protobuf.json.pb_options.emit_oneof_field_name = false; - const request_body = try sync_request.jsonEncode(.{}, temp_allocator); - // No defer needed - arena handles cleanup - - // Prepare headers: content-type + custom headers - const max_builtin_headers: usize = 1; - const total_headers = max_builtin_headers + self.custom_headers.len; - const headers_buffer = try temp_allocator.alloc(std.http.Header, total_headers); - // No defer needed - arena handles cleanup - - var headers_count: usize = 0; - - headers_buffer[headers_count] = .{ - .name = "content-type", - .value = "application/json", - }; - headers_count += 1; - - // Add custom headers - for (self.custom_headers) |h| { - headers_buffer[headers_count] = .{ - .name = h.name, - .value = h.value, - }; - headers_count += 1; - } - - const extra_headers = headers_buffer[0..headers_count]; - - var body: std.Io.Writer.Allocating = .init(self.allocator); - defer body.deinit(); - - // Create request - const result = try self.http_client.fetch(.{ - .location = .{ .url = self.config_url }, - .extra_headers = extra_headers, - .method = .POST, - .payload = request_body, - .response_writer = &body.writer, - }); - - // Check status code - if (result.status != .ok) { - self.bus.err(HttpSyncRequestFailed{ - .url = self.config_url, - .status = @intFromEnum(result.status), - }); - return error.HttpRequestFailed; - } - - // Read response body - take ownership to keep memory alive for parsed result - const response_body = try body.toOwnedSlice(); - errdefer self.allocator.free(response_body); - - // Decode SyncResponse from JSON - const parsed = SyncResponse.jsonDecode(response_body, .{}, self.allocator) catch |err| { - // Log the error with a preview of the response body for debugging - const preview_len = @min(response_body.len, 200); - self.bus.err(HttpJsonDecodeFailed{ - .err = @errorName(err), - .body_preview = response_body[0..preview_len], - }); - return err; - }; - - return .{ - .parsed = parsed, - .response_body = response_body, - }; - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; - -test "HttpProvider: recordPolicyStats accumulates hits and misses" { - const allocator = testing.allocator; - - var noop_bus: o11y.NoopEventBus = undefined; - noop_bus.init(); - - var provider = try HttpProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - "http://test.local/policies", - 60, - .{}, - &.{}, - ); - defer provider.deinit(); - - // Record initial stats - provider.recordPolicyStats("policy-1", 10, 5, .{}); - - // Verify stats were recorded - { - provider.sync_state_mutex.lock(); - defer provider.sync_state_mutex.unlock(); - - const record = provider.policy_statuses.get("policy-1"); - try testing.expect(record != null); - try testing.expectEqual(@as(i64, 10), record.?.hits); - try testing.expectEqual(@as(i64, 5), record.?.misses); - } - - // Accumulate more stats for the same policy - provider.recordPolicyStats("policy-1", 20, 10, .{}); - - // Verify stats were accumulated - { - provider.sync_state_mutex.lock(); - defer provider.sync_state_mutex.unlock(); - - const record = provider.policy_statuses.get("policy-1"); - try testing.expect(record != null); - try testing.expectEqual(@as(i64, 30), record.?.hits); - try testing.expectEqual(@as(i64, 15), record.?.misses); - } -} - -test "HttpProvider: clearPolicyStatuses resets all counters" { - const allocator = testing.allocator; - - var noop_bus: o11y.NoopEventBus = undefined; - noop_bus.init(); - - var provider = try HttpProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - "http://test.local/policies", - 60, - .{}, - &.{}, - ); - defer provider.deinit(); - - // Record stats for multiple policies - provider.recordPolicyStats("policy-1", 10, 5, .{}); - provider.recordPolicyStats("policy-2", 20, 10, .{}); - provider.recordPolicyStats("policy-3", 30, 15, .{}); - - // Verify all policies have stats - { - provider.sync_state_mutex.lock(); - defer provider.sync_state_mutex.unlock(); - try testing.expectEqual(@as(usize, 3), provider.policy_statuses.count()); - } - - // Clear all statuses - provider.clearPolicyStatuses(); - - // Verify all stats are cleared - { - provider.sync_state_mutex.lock(); - defer provider.sync_state_mutex.unlock(); - try testing.expectEqual(@as(usize, 0), provider.policy_statuses.count()); - } -} - -test "HttpProvider: recordPolicyStats after clear starts fresh" { - const allocator = testing.allocator; - - var noop_bus: o11y.NoopEventBus = undefined; - noop_bus.init(); - - var provider = try HttpProvider.init( - allocator, - noop_bus.eventBus(), - "test-provider", - "http://test.local/policies", - 60, - .{}, - &.{}, - ); - defer provider.deinit(); - - // Record initial stats - provider.recordPolicyStats("policy-1", 100, 50, .{}); - - // Clear - provider.clearPolicyStatuses(); - - // Record new stats for the same policy - provider.recordPolicyStats("policy-1", 5, 2, .{}); - - // Verify stats start fresh (not accumulated with previous values) - { - provider.sync_state_mutex.lock(); - defer provider.sync_state_mutex.unlock(); - - const record = provider.policy_statuses.get("policy-1"); - try testing.expect(record != null); - try testing.expectEqual(@as(i64, 5), record.?.hits); - try testing.expectEqual(@as(i64, 2), record.?.misses); - } -} diff --git a/src/policy/rate_limiter.zig b/src/policy/rate_limiter.zig deleted file mode 100644 index 671d701..0000000 --- a/src/policy/rate_limiter.zig +++ /dev/null @@ -1,671 +0,0 @@ -//! Lock-free rate limiting for telemetry policies. -//! -//! Uses atomic operations for thread-safe access without locks. -//! Designed to be embedded directly in policy structs (24 bytes). -//! -//! ## Design Principles -//! -//! - Lock-free: All operations use atomics, no mutexes -//! - Predictable memory: Fixed size, no allocations -//! - Embeddable: 24 bytes, embed directly in policy structs -//! -//! ## Usage -//! -//! ```zig -//! var limiter = RateLimiter.initPerSecond(100); -//! if (limiter.shouldKeep()) { -//! // Under rate limit -//! } -//! ``` - -const std = @import("std"); -const testing = std.testing; - -/// Lock-free rate limiter for a single policy. -/// -/// Designed to be embedded directly in policy structs (24 bytes). -/// Uses atomic operations for thread-safe access without locks. -/// -/// Window reset happens inline on first request after expiry via CAS, -/// eliminating the need for a background reset task. -/// -/// ## Memory Ordering -/// -/// - `window_start`: acquire/release to synchronize window boundaries -/// - `count`: monotonic for increment (relaxed ordering acceptable for counters) -/// -/// ## Race Conditions -/// -/// At window boundaries, there's a brief race where: -/// 1. Multiple threads may attempt reset simultaneously (CAS ensures only one wins) -/// 2. Threads may increment the old counter after reset (acceptable over-admission) -/// -/// The maximum over-admission is bounded by `limit + num_concurrent_threads - 1`. -pub const RateLimiter = struct { - /// Current request count in this window. - count: std.atomic.Value(u32) = .init(0), - - /// Window start timestamp in milliseconds since epoch. - window_start: std.atomic.Value(i64) = .init(0), - - /// Maximum requests allowed per window. - limit: u32, - - /// Window duration in milliseconds. - window_ms: u32, - - /// For testing: injectable time source - time_source: *const fn () i64 = &defaultTimeSource, - - fn defaultTimeSource() i64 { - return std.time.milliTimestamp(); - } - - /// Initialize a rate limiter with custom window duration. - pub fn init(limit: u32, window_ms: u32) RateLimiter { - return initWithTimeSource(limit, window_ms, &defaultTimeSource); - } - - /// Initialize with injectable time source (for testing). - pub fn initWithTimeSource( - limit: u32, - window_ms: u32, - time_source: *const fn () i64, - ) RateLimiter { - const now = time_source(); - var limiter = RateLimiter{ - .limit = limit, - .window_ms = window_ms, - .time_source = time_source, - }; - limiter.window_start.store(now, .release); - return limiter; - } - - /// Initialize a rate limiter with per-second window. - pub fn initPerSecond(limit: u32) RateLimiter { - return init(limit, 1000); - } - - /// Initialize a rate limiter with per-minute window. - pub fn initPerMinute(limit: u32) RateLimiter { - return init(limit, 60_000); - } - - /// Check if request should be allowed. Increments counter atomically. - /// - /// Returns true if under the rate limit, false if limit exceeded. - /// Automatically resets window when expired. - /// - /// This is the only public method - checking and incrementing are atomic. - pub fn shouldKeep(self: *RateLimiter) bool { - const now = self.time_source(); - - // Fast path: check if window might be expired - const window_start = self.window_start.load(.acquire); - const elapsed = now - window_start; - - if (elapsed >= self.window_ms) { - // Window expired - try to reset - self.tryResetWindow(window_start, now); - } - - // Increment and check limit - // fetchAdd returns previous value, so if prev < limit, we're allowed - const prev = self.count.fetchAdd(1, .monotonic); - return prev < self.limit; - } - - /// Attempt to reset the window. Only one thread wins the CAS race. - fn tryResetWindow(self: *RateLimiter, expected_start: i64, now: i64) void { - // CAS to claim the reset - only one thread succeeds - const result = self.window_start.cmpxchgStrong( - expected_start, - now, - .acq_rel, - .acquire, - ); - - if (result == null) { - // We won the race, reset the counter - self.count.store(0, .release); - } - // If CAS failed, another thread already reset - that's fine - } - - /// Get current count (for testing/debugging only). - pub fn currentCount(self: *const RateLimiter) u32 { - return self.count.load(.acquire); - } - - /// Get window start (for testing/debugging only). - pub fn currentWindowStart(self: *const RateLimiter) i64 { - return self.window_start.load(.acquire); - } - - /// Force reset (for testing only). - pub fn reset(self: *RateLimiter) void { - self.count.store(0, .release); - self.window_start.store(self.time_source(), .release); - } -}; - -// ============================================================================= -// Test Helpers -// ============================================================================= - -/// Thread-safe mock time source for testing. -/// Uses atomic value that can be advanced from any thread. -const MockTime = struct { - value: std.atomic.Value(i64), - - fn init(start: i64) MockTime { - return .{ .value = std.atomic.Value(i64).init(start) }; - } - - fn get(self: *const MockTime) i64 { - return self.value.load(.acquire); - } - - fn set(self: *MockTime, time: i64) void { - self.value.store(time, .release); - } - - fn advance(self: *MockTime, delta: i64) void { - _ = self.value.fetchAdd(delta, .acq_rel); - } -}; - -// ============================================================================= -// Tests - Basic Functionality -// ============================================================================= - -test "RateLimiter: init sets correct values" { - var limiter = RateLimiter.init(100, 1000); - - try testing.expectEqual(@as(u32, 100), limiter.limit); - try testing.expectEqual(@as(u32, 1000), limiter.window_ms); - try testing.expectEqual(@as(u32, 0), limiter.currentCount()); -} - -test "RateLimiter: initPerSecond convenience" { - const limiter = RateLimiter.initPerSecond(50); - - try testing.expectEqual(@as(u32, 50), limiter.limit); - try testing.expectEqual(@as(u32, 1000), limiter.window_ms); -} - -test "RateLimiter: initPerMinute convenience" { - const limiter = RateLimiter.initPerMinute(1000); - - try testing.expectEqual(@as(u32, 1000), limiter.limit); - try testing.expectEqual(@as(u32, 60_000), limiter.window_ms); -} - -test "RateLimiter: allows requests under limit" { - var limiter = RateLimiter.initPerSecond(5); - - for (0..5) |_| { - try testing.expect(limiter.shouldKeep()); - } - - try testing.expectEqual(@as(u32, 5), limiter.currentCount()); -} - -test "RateLimiter: blocks at limit" { - var limiter = RateLimiter.initPerSecond(3); - - try testing.expect(limiter.shouldKeep()); // 1 - try testing.expect(limiter.shouldKeep()); // 2 - try testing.expect(limiter.shouldKeep()); // 3 - try testing.expect(!limiter.shouldKeep()); // 4 - blocked - - try testing.expectEqual(@as(u32, 4), limiter.currentCount()); -} - -test "RateLimiter: limit of 1" { - var limiter = RateLimiter.initPerSecond(1); - - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); -} - -test "RateLimiter: limit of 0 blocks everything" { - var limiter = RateLimiter.initPerSecond(0); - - try testing.expect(!limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); -} - -test "RateLimiter: high limit" { - var limiter = RateLimiter.initPerSecond(1_000_000); - - for (0..10000) |_| { - try testing.expect(limiter.shouldKeep()); - } -} - -test "RateLimiter: reset clears count" { - var limiter = RateLimiter.initPerSecond(5); - - _ = limiter.shouldKeep(); - _ = limiter.shouldKeep(); - try testing.expectEqual(@as(u32, 2), limiter.currentCount()); - - limiter.reset(); - try testing.expectEqual(@as(u32, 0), limiter.currentCount()); - - // Should allow again - try testing.expect(limiter.shouldKeep()); -} - -// ============================================================================= -// Tests - Window Expiry (using injectable time) -// ============================================================================= - -test "RateLimiter: window expiry resets count" { - var mock_time: i64 = 1000; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(3, 100, &mockTime.get); - - // Use up limit - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // Advance time past window - mock_time = 1150; - - // Should allow again - try testing.expect(limiter.shouldKeep()); - try testing.expectEqual(@as(u32, 1), limiter.currentCount()); -} - -test "RateLimiter: window expiry exactly at boundary" { - var mock_time: i64 = 0; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(2, 100, &mockTime.get); - - _ = limiter.shouldKeep(); - _ = limiter.shouldKeep(); - try testing.expect(!limiter.shouldKeep()); - - // Exactly at window boundary - mock_time = 100; - try testing.expect(limiter.shouldKeep()); -} - -test "RateLimiter: multiple window rollovers" { - var mock_time: i64 = 0; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(2, 100, &mockTime.get); - - // Window 1 - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // Window 2 - mock_time = 100; - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // Window 3 - mock_time = 200; - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // Skip to window 10 - mock_time = 900; - try testing.expect(limiter.shouldKeep()); -} - -test "RateLimiter: time going backwards handled gracefully" { - var mock_time: i64 = 1000; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(3, 100, &mockTime.get); - - _ = limiter.shouldKeep(); - _ = limiter.shouldKeep(); - - // Time goes backwards (NTP adjustment, etc.) - mock_time = 500; - - // Should still work - elapsed will be negative, won't trigger reset - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // When time catches up, normal operation resumes - mock_time = 1100; - try testing.expect(limiter.shouldKeep()); -} - -test "RateLimiter: very short window with mock time" { - var mock_time: i64 = 0; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(5, 1, &mockTime.get); // 1ms window - - // Should allow 5, then block - for (0..5) |_| { - try testing.expect(limiter.shouldKeep()); - } - try testing.expect(!limiter.shouldKeep()); - - // Advance time past window - mock_time = 2; - try testing.expect(limiter.shouldKeep()); -} - -test "RateLimiter: very long window" { - var mock_time: i64 = 0; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - // 1 hour window - var limiter = RateLimiter.initWithTimeSource(100, 3_600_000, &mockTime.get); - - for (0..100) |_| { - try testing.expect(limiter.shouldKeep()); - } - try testing.expect(!limiter.shouldKeep()); - - // Advance 30 minutes - still blocked - mock_time = 1_800_000; - try testing.expect(!limiter.shouldKeep()); - - // Advance to 1 hour - reset - mock_time = 3_600_000; - try testing.expect(limiter.shouldKeep()); -} - -// ============================================================================= -// Tests - Concurrent Access -// ============================================================================= - -test "RateLimiter: concurrent increments respect limit" { - if (@import("builtin").single_threaded) return error.SkipZigTest; - - // Use a high limit that won't expire during test - var limiter = RateLimiter.initPerSecond(1000); - var kept = std.atomic.Value(u32).init(0); - - const thread_count = 8; - const iterations_per_thread = 200; - var threads: [thread_count]std.Thread = undefined; - - for (&threads) |*t| { - t.* = try std.Thread.spawn(.{}, struct { - fn run(lim: *RateLimiter, k: *std.atomic.Value(u32)) void { - for (0..iterations_per_thread) |_| { - if (lim.shouldKeep()) { - _ = k.fetchAdd(1, .monotonic); - } - } - } - }.run, .{ &limiter, &kept }); - } - - for (&threads) |*t| { - t.join(); - } - - // Should keep exactly 1000 (the limit) - const kept_count = kept.load(.acquire); - try testing.expectEqual(@as(u32, 1000), kept_count); -} - -test "RateLimiter: concurrent access with window reset" { - if (@import("builtin").single_threaded) return error.SkipZigTest; - - // Shared mock time that threads will advance - var mock_time = MockTime.init(0); - const getMockTime = struct { - var time: *MockTime = undefined; - fn get() i64 { - return time.get(); - } - }; - getMockTime.time = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(10, 100, &getMockTime.get); - var total_kept = std.atomic.Value(u32).init(0); - var windows_processed = std.atomic.Value(u32).init(0); - - const thread_count = 4; - var threads: [thread_count]std.Thread = undefined; - - for (&threads) |*t| { - t.* = try std.Thread.spawn(.{}, struct { - fn run( - lim: *RateLimiter, - k: *std.atomic.Value(u32), - w: *std.atomic.Value(u32), - mt: *MockTime, - ) void { - // Each thread processes multiple "windows" - for (0..5) |_| { - // Try to get through limit - for (0..20) |_| { - if (lim.shouldKeep()) { - _ = k.fetchAdd(1, .monotonic); - } - } - _ = w.fetchAdd(1, .monotonic); - // Advance time (all threads do this, but that's fine) - mt.advance(100); - } - } - }.run, .{ &limiter, &total_kept, &windows_processed, &mock_time }); - } - - for (&threads) |*t| { - t.join(); - } - - // Each window allows 10, we have 5 windows per thread, 4 threads - // Due to races at window boundaries, we allow some variance - const kept = total_kept.load(.acquire); - // Should be roughly 10 * 5 = 50 (per logical window advance) - // But with concurrent advances and races, bounds are wider - try testing.expect(kept >= 40); // At least got a reasonable amount - try testing.expect(kept <= 200); // Didn't explode -} - -test "RateLimiter: no data races under contention" { - if (@import("builtin").single_threaded) return error.SkipZigTest; - - var limiter = RateLimiter.initPerSecond(1000); - var iterations = std.atomic.Value(u32).init(0); - - const thread_count = 8; - var threads: [thread_count]std.Thread = undefined; - - for (&threads) |*t| { - t.* = try std.Thread.spawn(.{}, struct { - fn run(lim: *RateLimiter, iters: *std.atomic.Value(u32)) void { - for (0..1000) |_| { - _ = lim.shouldKeep(); - _ = iters.fetchAdd(1, .monotonic); - } - } - }.run, .{ &limiter, &iterations }); - } - - for (&threads) |*t| { - t.join(); - } - - // All iterations should complete - try testing.expectEqual(@as(u32, thread_count * 1000), iterations.load(.acquire)); - - // Count should be exactly thread_count * 1000 - try testing.expectEqual(@as(u32, thread_count * 1000), limiter.currentCount()); -} - -test "RateLimiter: CAS race at window boundary" { - if (@import("builtin").single_threaded) return error.SkipZigTest; - - // Test that CAS correctly handles multiple threads trying to reset - var mock_time = MockTime.init(0); - const getMockTime = struct { - var time: *MockTime = undefined; - fn get() i64 { - return time.get(); - } - }; - getMockTime.time = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(5, 100, &getMockTime.get); - var reset_count = std.atomic.Value(u32).init(0); - - // Exhaust limit - for (0..5) |_| { - _ = limiter.shouldKeep(); - } - - // Advance time to trigger reset - mock_time.set(100); - - // Spawn threads that all try to trigger reset simultaneously - const thread_count = 8; - var threads: [thread_count]std.Thread = undefined; - - for (&threads) |*t| { - t.* = try std.Thread.spawn(.{}, struct { - fn run(lim: *RateLimiter, rc: *std.atomic.Value(u32)) void { - const before = lim.currentWindowStart(); - _ = lim.shouldKeep(); - const after = lim.currentWindowStart(); - // If window changed, we observed a reset - if (after != before) { - _ = rc.fetchAdd(1, .monotonic); - } - } - }.run, .{ &limiter, &reset_count }); - } - - for (&threads) |*t| { - t.join(); - } - - // Window should have been reset (new start time) - try testing.expectEqual(@as(i64, 100), limiter.currentWindowStart()); - - // Count should be thread_count (all threads incremented after reset) - try testing.expectEqual(@as(u32, thread_count), limiter.currentCount()); -} - -// ============================================================================= -// Tests - Edge Cases -// ============================================================================= - -test "RateLimiter: max u32 limit" { - var limiter = RateLimiter.initPerSecond(std.math.maxInt(u32)); - - for (0..10000) |_| { - try testing.expect(limiter.shouldKeep()); - } -} - -test "RateLimiter: count overflow protection" { - var limiter = RateLimiter.initPerSecond(5); - - // Exhaust limit - for (0..5) |_| { - _ = limiter.shouldKeep(); - } - - // Hammer it many times past limit - for (0..10000) |_| { - try testing.expect(!limiter.shouldKeep()); - } - - // Count will be high but shouldKeep still works correctly - try testing.expect(limiter.currentCount() > 5); -} - -test "RateLimiter: window_ms of 0 always resets" { - var mock_time: i64 = 0; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - // Edge case: 0ms window means always expired - var limiter = RateLimiter.initWithTimeSource(2, 0, &mockTime.get); - - // First two should be allowed (reset happens, then increment) - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - // Third triggers reset again since elapsed >= 0 is always true - try testing.expect(limiter.shouldKeep()); -} - -test "RateLimiter: i64 time overflow edge case" { - var mock_time: i64 = std.math.maxInt(i64) - 50; - const mockTime = struct { - var time_ptr: *i64 = undefined; - fn get() i64 { - return time_ptr.*; - } - }; - mockTime.time_ptr = &mock_time; - - var limiter = RateLimiter.initWithTimeSource(3, 100, &mockTime.get); - - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(limiter.shouldKeep()); - try testing.expect(!limiter.shouldKeep()); - - // Would overflow if we add 100, but subtraction handles this - // This is technically undefined behavior territory, but practically - // we won't hit i64 max milliseconds (292 million years) -} diff --git a/src/policy/registry.zig b/src/policy/registry.zig deleted file mode 100644 index 5dddae7..0000000 --- a/src/policy/registry.zig +++ /dev/null @@ -1,1985 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); -const policy_source = @import("./source.zig"); -const policy_provider = @import("./provider.zig"); -const matcher_index = @import("./matcher_index.zig"); -const o11y = @import("../observability/root.zig"); -const EventBus = o11y.EventBus; -const NoopEventBus = o11y.NoopEventBus; - -const Policy = proto.policy.Policy; -const SourceType = policy_source.SourceType; -const PolicyMetadata = policy_source.PolicyMetadata; -const LogMatcherIndex = matcher_index.LogMatcherIndex; -const MetricMatcherIndex = matcher_index.MetricMatcherIndex; -const TraceMatcherIndex = matcher_index.TraceMatcherIndex; - -// ============================================================================= -// Lock-free Policy Stats -// ============================================================================= - -/// Atomic counters for policy statistics - lock-free updates -pub const PolicyAtomicStats = struct { - hits: std.atomic.Value(i64) = std.atomic.Value(i64).init(0), - misses: std.atomic.Value(i64) = std.atomic.Value(i64).init(0), - // Transform stats (adds, removes, etc.) - less frequent, can batch - transforms_applied: std.atomic.Value(i64) = std.atomic.Value(i64).init(0), - - /// Atomically increment hits - pub inline fn addHit(self: *PolicyAtomicStats) void { - _ = self.hits.fetchAdd(1, .monotonic); - } - - /// Atomically increment misses - pub inline fn addMiss(self: *PolicyAtomicStats) void { - _ = self.misses.fetchAdd(1, .monotonic); - } - - /// Atomically increment transforms applied - pub inline fn addTransform(self: *PolicyAtomicStats, count: i64) void { - _ = self.transforms_applied.fetchAdd(count, .monotonic); - } - - /// Read and reset stats atomically (for flushing) - pub fn readAndReset(self: *PolicyAtomicStats) struct { hits: i64, misses: i64, transforms: i64 } { - return .{ - .hits = self.hits.swap(0, .monotonic), - .misses = self.misses.swap(0, .monotonic), - .transforms = self.transforms_applied.swap(0, .monotonic), - }; - } -}; - -// ============================================================================= -// Observability Events -// ============================================================================= - -const PolicyErrorNoProvider = struct { - policy_id: []const u8, - message: []const u8, -}; - -const PolicyErrorNotFound = struct { - policy_id: []const u8, - message: []const u8, -}; - -const PolicyRegistryUnchanged = struct {}; - -/// Policy config types - derived from the Policy.target field -pub const PolicyConfigType = enum { - /// Policy has a LogTarget (target.log) - log_target, - /// Policy has a MetricTarget (target.metric) - metric_target, - /// Policy has a TraceTarget (target.trace) - trace_target, - /// Policy has no config set - none, - - /// Get the config type from a policy - pub fn fromPolicy(policy: *const Policy) PolicyConfigType { - const target = policy.target orelse return .none; - return switch (target) { - .log => .log_target, - .metric => .metric_target, - .trace => .trace_target, - }; - } -}; - -/// Immutable snapshot of policies for lock-free reads -pub const PolicySnapshot = struct { - /// All policies in this snapshot - policies: []const Policy, - - /// Indices into policies array for log target policies - /// Allows efficient lookup of policies by their config type - log_target_indices: []const u32, - - /// Indices into policies array for metric target policies - metric_target_indices: []const u32, - - /// Indices into policies array for trace target policies - trace_target_indices: []const u32, - - /// Compiled Hyperscan-based matcher index for efficient log evaluation - log_index: LogMatcherIndex, - - /// Compiled Hyperscan-based matcher index for efficient metric evaluation - metric_index: MetricMatcherIndex, - - /// Compiled Hyperscan-based matcher index for efficient trace evaluation (OTLP only) - trace_index: TraceMatcherIndex, - - /// Lock-free atomic stats per policy (indexed by policy position) - /// Mutable even though snapshot is "immutable" - stats are append-only - policy_stats: []PolicyAtomicStats, - - version: u64, - allocator: std.mem.Allocator, - - pub fn deinit(self: *PolicySnapshot) void { - self.log_index.deinit(); - self.metric_index.deinit(); - self.trace_index.deinit(); - self.allocator.free(self.policies); - self.allocator.free(self.log_target_indices); - self.allocator.free(self.metric_target_indices); - self.allocator.free(self.trace_target_indices); - self.allocator.free(self.policy_stats); - } - - /// Get atomic stats for a policy by index (for lock-free updates) - pub fn getStats(self: *const PolicySnapshot, idx: u32) ?*PolicyAtomicStats { - if (idx >= self.policy_stats.len) { - return null; - } - return &self.policy_stats[idx]; - } - - /// Get a policy by index - pub fn getPolicy(self: *const PolicySnapshot, idx: u32) ?*const Policy { - if (idx >= self.policies.len) { - return null; - } - return &self.policies[idx]; - } - - /// Get all log target policies - pub fn getLogTargetPolicies(self: *const PolicySnapshot) []const Policy { - if (self.log_target_indices.len == 0) { - return &.{}; - } - // Return a slice view - caller iterates using indices - return self.policies; - } - - /// Get log target policy indices for iteration - pub fn getLogTargetIndices(self: *const PolicySnapshot) []const u32 { - return self.log_target_indices; - } - - /// Get metric target policy indices for iteration - pub fn getMetricTargetIndices(self: *const PolicySnapshot) []const u32 { - return self.metric_target_indices; - } - - /// Iterator for log target policies - pub fn iterateLogTargetPolicies(self: *const PolicySnapshot) LogTargetPolicyIterator { - return .{ - .snapshot = self, - .index = 0, - }; - } - - /// Iterator for metric target policies - pub fn iterateMetricTargetPolicies(self: *const PolicySnapshot) MetricTargetPolicyIterator { - return .{ - .snapshot = self, - .index = 0, - }; - } - - pub const LogTargetPolicyIterator = struct { - snapshot: *const PolicySnapshot, - index: usize, - - pub fn next(self: *LogTargetPolicyIterator) ?*const Policy { - if (self.index >= self.snapshot.log_target_indices.len) { - return null; - } - const policy_idx = self.snapshot.log_target_indices[self.index]; - self.index += 1; - return &self.snapshot.policies[policy_idx]; - } - }; - - pub const MetricTargetPolicyIterator = struct { - snapshot: *const PolicySnapshot, - index: usize, - - pub fn next(self: *MetricTargetPolicyIterator) ?*const Policy { - if (self.index >= self.snapshot.metric_target_indices.len) { - return null; - } - const policy_idx = self.snapshot.metric_target_indices[self.index]; - self.index += 1; - return &self.snapshot.policies[policy_idx]; - } - }; -}; - -/// Grace period in nanoseconds before freeing old snapshots. -/// This allows in-flight readers to complete before memory is reclaimed. -const SNAPSHOT_GRACE_PERIOD_NS: u64 = 100 * std.time.ns_per_ms; // 100ms - -/// Maximum number of pending snapshots waiting for cleanup. -/// If this limit is reached, we force cleanup of the oldest snapshots. -const MAX_PENDING_SNAPSHOTS: usize = 8; - -/// A snapshot pending cleanup after its grace period expires -const PendingSnapshot = struct { - snapshot: *const PolicySnapshot, - retire_time: i128, // Timestamp when snapshot was retired -}; - -/// Centralized policy registry with multi-source support -pub const PolicyRegistry = struct { - // All policies stored together - policies: std.ArrayListUnmanaged(Policy), - - // Source tracking for deduplication and priority - // Key: policy id, Value: PolicyMetadata - policy_sources: std.StringHashMap(PolicyMetadata), - - // Synchronization - mutex: std.Thread.Mutex, - allocator: std.mem.Allocator, - version: std.atomic.Value(u64), - - // Current immutable snapshot for lock-free reads - current_snapshot: std.atomic.Value(?*const PolicySnapshot), - - // Snapshots pending cleanup after grace period - pending_snapshots: std.ArrayListUnmanaged(PendingSnapshot), - - // Provider references for error routing, keyed by provider ID - // These are not owned by the registry - caller must ensure they outlive the registry - providers: std.StringHashMapUnmanaged(*policy_provider.PolicyProvider), - - // Event bus for observability - bus: *EventBus, - - pub fn init(allocator: std.mem.Allocator, bus: *EventBus) PolicyRegistry { - return .{ - .policies = .empty, - .policy_sources = std.StringHashMap(PolicyMetadata).init(allocator), - .mutex = .{}, - .allocator = allocator, - .version = std.atomic.Value(u64).init(0), - .current_snapshot = std.atomic.Value(?*const PolicySnapshot).init(null), - .pending_snapshots = .{}, - .providers = .{}, - .bus = bus, - }; - } - - /// Register a provider for error routing. - /// The provider must outlive the registry. - pub fn registerProvider(self: *PolicyRegistry, provider: *policy_provider.PolicyProvider) !void { - const id = provider.getId(); - const id_copy = try self.allocator.dupe(u8, id); - errdefer self.allocator.free(id_copy); - try self.providers.put(self.allocator, id_copy, provider); - } - - /// Report an error encountered when applying a policy. - /// Routes the error to the appropriate provider based on the policy's source. - pub fn recordPolicyError(self: *PolicyRegistry, policy_id: []const u8, error_message: []const u8) void { - self.mutex.lock(); - defer self.mutex.unlock(); - - if (self.policy_sources.get(policy_id)) |metadata| { - if (self.providers.get(metadata.provider_id)) |provider| { - provider.recordPolicyError(policy_id, error_message); - } else { - // No provider registered, log as fallback - self.bus.err(PolicyErrorNoProvider{ .policy_id = policy_id, .message = error_message }); - } - } else { - // Policy not found, log - self.bus.err(PolicyErrorNotFound{ .policy_id = policy_id, .message = error_message }); - } - } - - /// Report statistics about policy hits, misses, and transform results. - /// Routes the stats to the appropriate provider based on the policy's source. - pub fn recordPolicyStats(self: *PolicyRegistry, policy_id: []const u8, hits: i64, misses: i64, transform_result: policy_provider.TransformResult) void { - self.mutex.lock(); - defer self.mutex.unlock(); - - if (self.policy_sources.get(policy_id)) |metadata| { - if (self.providers.get(metadata.provider_id)) |provider| { - provider.recordPolicyStats(policy_id, hits, misses, transform_result); - } - // No fallback logging for stats - silent drop if no provider - } - // Silent drop if policy not found - stats are best-effort - } - - pub fn deinit(self: *PolicyRegistry) void { - // Free all stored policies (we own them via dupe) - for (self.policies.items) |*policy| { - policy.deinit(self.allocator); - } - self.policies.deinit(self.allocator); - - // Free source tracking keys and hashmap - var it = self.policy_sources.keyIterator(); - while (it.next()) |key| { - self.allocator.free(key.*); - } - self.policy_sources.deinit(); - - // Free provider keys - var prov_it = self.providers.keyIterator(); - while (prov_it.next()) |key| { - self.allocator.free(key.*); - } - self.providers.deinit(self.allocator); - - // Free all pending snapshots (force cleanup, no grace period on shutdown) - for (self.pending_snapshots.items) |pending| { - @constCast(pending.snapshot).deinit(); - self.allocator.destroy(pending.snapshot); - } - self.pending_snapshots.deinit(self.allocator); - - // Free current snapshot if exists - // Note: snapshot.policies is a shallow copy of self.policies, so its Policy - // structs share pointers with the originals we just freed. snapshot.deinit() - // only frees the array itself and matcher_index, not the policy contents. - if (self.current_snapshot.load(.acquire)) |snapshot| { - @constCast(snapshot).deinit(); - self.allocator.destroy(snapshot); - } - } - - /// Update policies from a specific provider - /// Deduplicates by id and applies priority rules based on source_type - pub fn updatePolicies( - self: *PolicyRegistry, - policies: []const Policy, - provider_id: []const u8, - source_type: SourceType, - ) !void { - self.mutex.lock(); - defer self.mutex.unlock(); - - // Track if any changes were made - var changed = false; - - // Track which policy ids from this provider are in the new set - var new_policy_ids = std.StringHashMap(void).init(self.allocator); - defer { - var it = new_policy_ids.keyIterator(); - while (it.next()) |key| { - self.allocator.free(key.*); - } - new_policy_ids.deinit(); - } - - // Process each incoming policy - for (policies) |policy| { - const id_copy = try self.allocator.dupe(u8, policy.id); - errdefer self.allocator.free(id_copy); - - // Track this id as present in new set - try new_policy_ids.put(id_copy, {}); - - // Check if policy already exists - if (self.policy_sources.get(policy.id)) |existing_meta| { - // Apply priority rules - if (existing_meta.shouldReplace(source_type)) { - // Remove old policy and its source tracking - self.removePolicyById(policy.id); - if (self.policy_sources.fetchRemove(policy.id)) |kv| { - self.allocator.free(kv.key); - } - - // Add new policy - try self.addPolicyInternal(policy, provider_id, source_type); - changed = true; - } - // else: higher priority source has priority, keep existing - } else { - // New policy, add it - try self.addPolicyInternal(policy, provider_id, source_type); - changed = true; - } - } - - // Remove policies from this provider that are no longer present - const removed = try self.removeStalePolicies(provider_id, &new_policy_ids); - if (removed > 0) { - changed = true; - } - - // Only create new snapshot if something changed - if (changed) { - try self.createSnapshot(); - } else { - self.bus.debug(PolicyRegistryUnchanged{}); - } - } - - /// Add a policy and track its source - /// Deep copies the policy so the registry owns the memory - fn addPolicyInternal( - self: *PolicyRegistry, - policy: Policy, - provider_id: []const u8, - source_type: SourceType, - ) !void { - // Deep copy the policy so we own the memory - var policy_copy = try policy.dupe(self.allocator); - errdefer policy_copy.deinit(self.allocator); - - try self.policies.append(self.allocator, policy_copy); - - // Track source metadata by policy id - const id_key = try self.allocator.dupe(u8, policy.id); - errdefer self.allocator.free(id_key); - - try self.policy_sources.put(id_key, PolicyMetadata.init(provider_id, source_type)); - } - - /// Remove a policy by id and free its memory - fn removePolicyById(self: *PolicyRegistry, id: []const u8) void { - for (self.policies.items, 0..) |*policy, i| { - if (std.mem.eql(u8, policy.id, id)) { - policy.deinit(self.allocator); - _ = self.policies.swapRemove(i); - break; - } - } - } - - /// Remove policies from provider that are no longer in the new set - /// Returns the number of policies removed - fn removeStalePolicies( - self: *PolicyRegistry, - provider_id: []const u8, - new_ids: *const std.StringHashMap(void), - ) !usize { - var ids_to_remove = std.ArrayListUnmanaged([]const u8){}; - defer ids_to_remove.deinit(self.allocator); - - // Find policies from this provider not in new set - var it = self.policy_sources.iterator(); - while (it.next()) |entry| { - const id = entry.key_ptr.*; - const metadata = entry.value_ptr.*; - - // Only consider policies from this provider - if (!std.mem.eql(u8, metadata.provider_id, provider_id)) continue; - - // If not in new set, mark for removal - if (!new_ids.contains(id)) { - try ids_to_remove.append(self.allocator, id); - } - } - - // Remove stale policies - for (ids_to_remove.items) |id| { - self.removePolicyById(id); - - // Remove from source tracking - _ = self.policy_sources.remove(id); - self.allocator.free(id); - } - - return ids_to_remove.items.len; - } - - /// Create immutable snapshot of current policies - fn createSnapshot(self: *PolicyRegistry) !void { - const policies_slice = try self.allocator.alloc(Policy, self.policies.items.len); - errdefer self.allocator.free(policies_slice); - - @memcpy(policies_slice, self.policies.items); - - // Build indices by config type - // First pass: count policies of each type - var log_target_count: usize = 0; - var metric_target_count: usize = 0; - var trace_target_count: usize = 0; - for (policies_slice) |*policy| { - const config_type = PolicyConfigType.fromPolicy(policy); - switch (config_type) { - .log_target => log_target_count += 1, - .metric_target => metric_target_count += 1, - .trace_target => trace_target_count += 1, - .none => {}, - } - } - - // Allocate index arrays - const log_target_indices = try self.allocator.alloc(u32, log_target_count); - errdefer self.allocator.free(log_target_indices); - - const metric_target_indices = try self.allocator.alloc(u32, metric_target_count); - errdefer self.allocator.free(metric_target_indices); - - const trace_target_indices = try self.allocator.alloc(u32, trace_target_count); - errdefer self.allocator.free(trace_target_indices); - - // Second pass: populate indices - var log_target_idx: usize = 0; - var metric_target_idx: usize = 0; - var trace_target_idx: usize = 0; - for (policies_slice, 0..) |*policy, i| { - const config_type = PolicyConfigType.fromPolicy(policy); - switch (config_type) { - .log_target => { - log_target_indices[log_target_idx] = @intCast(i); - log_target_idx += 1; - }, - .metric_target => { - metric_target_indices[metric_target_idx] = @intCast(i); - metric_target_idx += 1; - }, - .trace_target => { - trace_target_indices[trace_target_idx] = @intCast(i); - trace_target_idx += 1; - }, - .none => {}, - } - } - - // Build matcher indices for Hyperscan-based matching - var log_idx = try LogMatcherIndex.build(self.allocator, self.bus, policies_slice); - errdefer log_idx.deinit(); - - var metric_idx = try MetricMatcherIndex.build(self.allocator, self.bus, policies_slice); - errdefer metric_idx.deinit(); - - var trace_idx = try TraceMatcherIndex.build(self.allocator, self.bus, policies_slice); - errdefer trace_idx.deinit(); - - // Increment version - const new_version = self.version.load(.monotonic) + 1; - self.version.store(new_version, .monotonic); - - // Allocate atomic stats array for lock-free per-policy counters - const policy_stats = try self.allocator.alloc(PolicyAtomicStats, policies_slice.len); - errdefer self.allocator.free(policy_stats); - // Initialize all stats to zero (default init does this) - for (policy_stats) |*stat| { - stat.* = .{}; - } - - // Create new snapshot with indices - const snapshot = try self.allocator.create(PolicySnapshot); - snapshot.* = .{ - .policies = policies_slice, - .log_target_indices = log_target_indices, - .metric_target_indices = metric_target_indices, - .trace_target_indices = trace_target_indices, - .log_index = log_idx, - .metric_index = metric_idx, - .trace_index = trace_idx, - .policy_stats = policy_stats, - .version = new_version, - .allocator = self.allocator, - }; - - // Swap snapshot atomically - const old_snapshot = self.current_snapshot.swap(snapshot, .acq_rel); - - // Defer cleanup of old snapshot to allow in-flight readers to complete. - // This implements a simple grace period mechanism to prevent use-after-free. - if (old_snapshot) |old| { - const now = std.time.nanoTimestamp(); - try self.pending_snapshots.append(self.allocator, .{ - .snapshot = old, - .retire_time = now, - }); - } - - // Clean up snapshots whose grace period has expired - self.cleanupExpiredSnapshots(); - } - - /// Clean up snapshots whose grace period has expired. - /// Also forces cleanup if we have too many pending snapshots. - fn cleanupExpiredSnapshots(self: *PolicyRegistry) void { - const now = std.time.nanoTimestamp(); - var i: usize = 0; - - while (i < self.pending_snapshots.items.len) { - const pending = self.pending_snapshots.items[i]; - const elapsed = now - pending.retire_time; - const grace_expired = elapsed >= SNAPSHOT_GRACE_PERIOD_NS; - const force_cleanup = self.pending_snapshots.items.len > MAX_PENDING_SNAPSHOTS; - - if (grace_expired or force_cleanup) { - // Grace period expired or too many pending - free this snapshot - @constCast(pending.snapshot).deinit(); - self.allocator.destroy(pending.snapshot); - _ = self.pending_snapshots.swapRemove(i); - // Don't increment i - swapRemove moved an element into this position - } else { - i += 1; - } - } - } - - /// Get current policy snapshot (lock-free read) - pub fn getSnapshot(self: *const PolicyRegistry) ?*const PolicySnapshot { - return self.current_snapshot.load(.acquire); - } - - /// Clear all policies from a specific source - /// Clear all policies from a specific provider - pub fn clearProvider(self: *PolicyRegistry, provider_id: []const u8) !void { - self.mutex.lock(); - defer self.mutex.unlock(); - - var ids_to_remove = std.ArrayListUnmanaged([]const u8){}; - defer ids_to_remove.deinit(self.allocator); - - // Find all policies from this provider - var it = self.policy_sources.iterator(); - while (it.next()) |entry| { - if (std.mem.eql(u8, entry.value_ptr.provider_id, provider_id)) { - try ids_to_remove.append(self.allocator, entry.key_ptr.*); - } - } - - // Remove each policy - for (ids_to_remove.items) |id| { - self.removePolicyById(id); - _ = self.policy_sources.remove(id); - self.allocator.free(id); - } - - // Create new snapshot - try self.createSnapshot(); - } - - /// Get total policy count - pub fn getPolicyCount(self: *const PolicyRegistry) usize { - return self.policies.items.len; - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -const testing = std.testing; -const PolicyCallback = policy_provider.PolicyCallback; -const PolicyUpdate = policy_provider.PolicyUpdate; - -/// Test policy provider that can be configured to emit policies on demand -/// Implements the PolicyProvider interface for integration testing -pub const TestPolicyProvider = struct { - allocator: std.mem.Allocator, - id: []const u8, - source_type: SourceType, - policies: std.ArrayListUnmanaged(Policy), - callbacks: std.ArrayListUnmanaged(PolicyCallback), - - pub fn init(allocator: std.mem.Allocator, id: []const u8, source_type: SourceType) TestPolicyProvider { - return .{ - .allocator = allocator, - .id = id, - .source_type = source_type, - .policies = .empty, - .callbacks = .empty, - }; - } - - pub fn deinit(self: *TestPolicyProvider) void { - for (self.policies.items) |*policy| { - policy.deinit(self.allocator); - } - self.policies.deinit(self.allocator); - self.callbacks.deinit(self.allocator); - } - - /// Get the unique identifier for this provider - pub fn getId(self: *TestPolicyProvider) []const u8 { - return self.id; - } - - /// Add a policy to the provider's set - pub fn addPolicy(self: *TestPolicyProvider, policy: Policy) !void { - const policy_copy = try policy.dupe(self.allocator); - try self.policies.append(self.allocator, policy_copy); - } - - /// Remove a policy by name - pub fn removePolicy(self: *TestPolicyProvider, name: []const u8) void { - for (self.policies.items, 0..) |*policy, i| { - if (std.mem.eql(u8, policy.name, name)) { - policy.deinit(self.allocator); - _ = self.policies.swapRemove(i); - return; - } - } - } - - /// Clear all policies - pub fn clearPolicies(self: *TestPolicyProvider) void { - for (self.policies.items) |*policy| { - policy.deinit(self.allocator); - } - self.policies.clearRetainingCapacity(); - } - - /// Notify all subscribers of the current policy set - pub fn notifySubscribers(self: *TestPolicyProvider) !void { - const update = PolicyUpdate{ - .policies = self.policies.items, - .provider_id = self.id, - }; - for (self.callbacks.items) |callback| { - try callback.call(update); - } - } - - /// Subscribe to policy updates (PolicyProvider interface) - pub fn subscribe(self: *TestPolicyProvider, callback: PolicyCallback) !void { - try self.callbacks.append(self.allocator, callback); - // Immediately notify with current policies - const update = PolicyUpdate{ - .policies = self.policies.items, - .provider_id = self.id, - }; - try callback.call(update); - } - - /// Record policy errors (no-op for tests) - pub fn recordPolicyError(self: *TestPolicyProvider, policy_id: []const u8, error_message: []const u8) void { - _ = self; - _ = policy_id; - _ = error_message; - } - - /// Record policy stats (no-op for tests) - pub fn recordPolicyStats(self: *TestPolicyProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: policy_provider.TransformResult) void { - _ = self; - _ = policy_id; - _ = hits; - _ = misses; - _ = transform_result; - } - - /// Get as PolicyProvider interface - pub fn provider(self: *TestPolicyProvider) policy_provider.PolicyProvider { - return policy_provider.PolicyProvider.init(self); - } -}; - -/// Helper to create a test policy with minimal required fields -fn createTestPolicy( - allocator: std.mem.Allocator, - name: []const u8, -) !Policy { - var policy = Policy{ - .id = try allocator.dupe(u8, name), // Use name as id for tests - .name = try allocator.dupe(u8, name), - .enabled = true, - }; - _ = &policy; - - return policy; -} - -/// Helper to free a test policy created with createTestPolicy -fn freeTestPolicy(allocator: std.mem.Allocator, policy: *Policy) void { - policy.deinit(allocator); -} - -// ----------------------------------------------------------------------------- -// Basic Registry Operations Tests -// ----------------------------------------------------------------------------- - -test "PolicyRegistry: init and deinit with no policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - try testing.expectEqual(@as(usize, 0), registry.getPolicyCount()); - try testing.expect(registry.getSnapshot() == null); -} - -test "PolicyRegistry: add single policy" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy = try createTestPolicy(allocator, "test-policy"); - defer freeTestPolicy(allocator, &policy); - - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("test-policy", snapshot.?.policies[0].name); -} - -test "PolicyRegistry: add multiple policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy1 = try createTestPolicy(allocator, "policy-1"); - defer freeTestPolicy(allocator, &policy1); - - var policy2 = try createTestPolicy(allocator, "policy-2"); - defer freeTestPolicy(allocator, &policy2); - - var policy3 = try createTestPolicy(allocator, "policy-3"); - defer freeTestPolicy(allocator, &policy3); - - try registry.updatePolicies(&.{ policy1, policy2, policy3 }, "file-provider", .file); - - try testing.expectEqual(@as(usize, 3), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 3), snapshot.?.policies.len); -} - -test "PolicyRegistry: update existing policy from same source" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add initial policy - var policy1 = try createTestPolicy(allocator, "test-policy"); - defer freeTestPolicy(allocator, &policy1); - - try registry.updatePolicies(&.{policy1}, "file-provider", .file); - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - // Update with same name but different description - var policy2 = try createTestPolicy(allocator, "test-policy"); - policy2.description = try allocator.dupe(u8, "updated description"); - defer freeTestPolicy(allocator, &policy2); - - try registry.updatePolicies(&.{policy2}, "file-provider", .file); - - // Should still have 1 policy, but updated - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("updated description", snapshot.?.policies[0].description); -} - -// ----------------------------------------------------------------------------- -// Source Priority Tests -// ----------------------------------------------------------------------------- - -test "PolicyRegistry: HTTP source takes priority over file source" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add policy from HTTP source - var http_policy = try createTestPolicy(allocator, "shared-policy"); - http_policy.description = try allocator.dupe(u8, "http version"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - - // Try to update with file source (should be ignored) - var file_policy = try createTestPolicy(allocator, "shared-policy"); - file_policy.description = try allocator.dupe(u8, "file version"); - defer freeTestPolicy(allocator, &file_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - - // Should still have the HTTP version - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("http version", snapshot.?.policies[0].description); -} - -test "PolicyRegistry: HTTP source can update file source policy" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add policy from file source - var file_policy = try createTestPolicy(allocator, "shared-policy"); - file_policy.description = try allocator.dupe(u8, "file version"); - defer freeTestPolicy(allocator, &file_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - - // Update with HTTP source (should replace) - var http_policy = try createTestPolicy(allocator, "shared-policy"); - http_policy.description = try allocator.dupe(u8, "http version"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - - // Should have the HTTP version - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 1), snapshot.?.policies.len); - try testing.expectEqualStrings("http version", snapshot.?.policies[0].description); -} - -test "PolicyRegistry: multiple sources with different policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add policies from file source - var file_policy = try createTestPolicy(allocator, "file-only-policy"); - defer freeTestPolicy(allocator, &file_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - - // Add policies from HTTP source - var http_policy = try createTestPolicy(allocator, "http-only-policy"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - - // Should have both policies - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); -} - -// ----------------------------------------------------------------------------- -// Stale Policy Removal Tests -// ----------------------------------------------------------------------------- - -test "PolicyRegistry: stale policies are removed when source updates" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add two policies from file source - var policy1 = try createTestPolicy(allocator, "policy-1"); - defer freeTestPolicy(allocator, &policy1); - - var policy2 = try createTestPolicy(allocator, "policy-2"); - defer freeTestPolicy(allocator, &policy2); - - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); - - // Update with only one policy (policy-2 should be removed) - try registry.updatePolicies(&.{policy1}, "file-provider", .file); - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("policy-1", snapshot.?.policies[0].name); -} - -test "PolicyRegistry: stale removal only affects same source" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add policy from file source - var file_policy = try createTestPolicy(allocator, "file-policy"); - defer freeTestPolicy(allocator, &file_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - - // Add policy from HTTP source - var http_policy = try createTestPolicy(allocator, "http-policy"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); - - // Update file source with empty set (should only remove file-policy) - try registry.updatePolicies(&.{}, "file-provider", .file); - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("http-policy", snapshot.?.policies[0].name); -} - -test "PolicyRegistry: clearProvider removes all policies from provider" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add policies from both sources - var file_policy = try createTestPolicy(allocator, "file-policy"); - defer freeTestPolicy(allocator, &file_policy); - - var http_policy = try createTestPolicy(allocator, "http-policy"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); - - // Clear file provider - try registry.clearProvider("file-provider"); - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("http-policy", snapshot.?.policies[0].name); -} - -// ----------------------------------------------------------------------------- -// Snapshot Versioning Tests -// ----------------------------------------------------------------------------- - -test "PolicyRegistry: snapshot version increments on update" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy = try createTestPolicy(allocator, "test-policy"); - defer freeTestPolicy(allocator, &policy); - - // First update - try registry.updatePolicies(&.{policy}, "file-provider", .file); - const snapshot1 = registry.getSnapshot(); - try testing.expect(snapshot1 != null); - try testing.expectEqual(@as(u64, 1), snapshot1.?.version); - - // Second update - try registry.updatePolicies(&.{policy}, "file-provider", .file); - const snapshot2 = registry.getSnapshot(); - try testing.expect(snapshot2 != null); - try testing.expectEqual(@as(u64, 2), snapshot2.?.version); - - // Third update - try registry.updatePolicies(&.{}, "file-provider", .file); - const snapshot3 = registry.getSnapshot(); - try testing.expect(snapshot3 != null); - try testing.expectEqual(@as(u64, 3), snapshot3.?.version); -} - -test "PolicyRegistry: clearProvider increments version" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy = try createTestPolicy(allocator, "test-policy"); - defer freeTestPolicy(allocator, &policy); - - try registry.updatePolicies(&.{policy}, "file-provider", .file); - const version_before = registry.getSnapshot().?.version; - - try registry.clearProvider("file-provider"); - const version_after = registry.getSnapshot().?.version; - - try testing.expect(version_after > version_before); -} - -// ----------------------------------------------------------------------------- -// TestPolicyProvider Integration Tests -// ----------------------------------------------------------------------------- - -test "TestPolicyProvider: basic functionality" { - const allocator = testing.allocator; - - var prov = TestPolicyProvider.init(allocator, "file-provider", .file); - defer prov.deinit(); - - // Add a policy - var policy = try createTestPolicy(allocator, "provider-policy"); - defer freeTestPolicy(allocator, &policy); - - try prov.addPolicy(policy); - try testing.expectEqual(@as(usize, 1), prov.policies.items.len); - - // Remove the policy - prov.removePolicy("provider-policy"); - try testing.expectEqual(@as(usize, 0), prov.policies.items.len); -} - -test "TestPolicyProvider: integrates with PolicyRegistry" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var file_provider = TestPolicyProvider.init(allocator, "file-provider", .file); - defer file_provider.deinit(); - - // Add policy to provider - var policy = try createTestPolicy(allocator, "provider-policy"); - defer freeTestPolicy(allocator, &policy); - - try file_provider.addPolicy(policy); - - // Create callback that updates registry - const Ctx = struct { - registry: *PolicyRegistry, - source_type: SourceType, - - fn onUpdate(ctx_ptr: *anyopaque, update: PolicyUpdate) anyerror!void { - const self: *@This() = @ptrCast(@alignCast(ctx_ptr)); - try self.registry.updatePolicies(update.policies, update.provider_id, self.source_type); - } - }; - - var ctx = Ctx{ .registry = ®istry, .source_type = .file }; - const callback = PolicyCallback{ - .context = &ctx, - .onUpdate = Ctx.onUpdate, - }; - - // Subscribe - should immediately update registry - try file_provider.subscribe(callback); - - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("provider-policy", snapshot.?.policies[0].name); -} - -test "TestPolicyProvider: multiple providers with different sources" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var file_provider = TestPolicyProvider.init(allocator, "file-provider", .file); - defer file_provider.deinit(); - - var http_provider = TestPolicyProvider.init(allocator, "http-provider", .http); - defer http_provider.deinit(); - - // Add policies to providers - var file_policy = try createTestPolicy(allocator, "file-policy"); - defer freeTestPolicy(allocator, &file_policy); - - var http_policy = try createTestPolicy(allocator, "http-policy"); - defer freeTestPolicy(allocator, &http_policy); - - try file_provider.addPolicy(file_policy); - try http_provider.addPolicy(http_policy); - - // Create callbacks with source types - const Ctx = struct { - registry: *PolicyRegistry, - source_type: SourceType, - - fn onUpdate(ctx_ptr: *anyopaque, update: PolicyUpdate) anyerror!void { - const self: *@This() = @ptrCast(@alignCast(ctx_ptr)); - try self.registry.updatePolicies(update.policies, update.provider_id, self.source_type); - } - }; - - var file_ctx = Ctx{ .registry = ®istry, .source_type = .file }; - const file_callback = PolicyCallback{ - .context = &file_ctx, - .onUpdate = Ctx.onUpdate, - }; - - var http_ctx = Ctx{ .registry = ®istry, .source_type = .http }; - const http_callback = PolicyCallback{ - .context = &http_ctx, - .onUpdate = Ctx.onUpdate, - }; - - // Subscribe to both providers - try file_provider.subscribe(file_callback); - try http_provider.subscribe(http_callback); - - // Registry should have both policies - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); -} - -test "TestPolicyProvider: notifySubscribers updates registry" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var prov = TestPolicyProvider.init(allocator, "file-provider", .file); - defer prov.deinit(); - - // Create and subscribe callback - const Ctx = struct { - registry: *PolicyRegistry, - source_type: SourceType, - - fn onUpdate(ctx_ptr: *anyopaque, update: PolicyUpdate) anyerror!void { - const self: *@This() = @ptrCast(@alignCast(ctx_ptr)); - try self.registry.updatePolicies(update.policies, update.provider_id, self.source_type); - } - }; - - var ctx = Ctx{ .registry = ®istry, .source_type = .file }; - const callback = PolicyCallback{ - .context = &ctx, - .onUpdate = Ctx.onUpdate, - }; - - try prov.subscribe(callback); - try testing.expectEqual(@as(usize, 0), registry.getPolicyCount()); - - // Add policy and notify - var policy1 = try createTestPolicy(allocator, "policy-1"); - defer freeTestPolicy(allocator, &policy1); - - try prov.addPolicy(policy1); - try prov.notifySubscribers(); - - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - // Add another policy and notify - var policy2 = try createTestPolicy(allocator, "policy-2"); - defer freeTestPolicy(allocator, &policy2); - - try prov.addPolicy(policy2); - try prov.notifySubscribers(); - - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); - - // Remove policy and notify - prov.removePolicy("policy-1"); - try prov.notifySubscribers(); - - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqualStrings("policy-2", snapshot.?.policies[0].name); -} - -test "TestPolicyProvider: HTTP provider overrides file provider" { - const allocator = testing.allocator; - - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var file_provider = TestPolicyProvider.init(allocator, "file-provider", .file); - defer file_provider.deinit(); - - var http_provider = TestPolicyProvider.init(allocator, "http-provider", .http); - defer http_provider.deinit(); - - // Create callbacks with source types - const Ctx = struct { - registry: *PolicyRegistry, - source_type: SourceType, - - fn onUpdate(ctx_ptr: *anyopaque, update: PolicyUpdate) anyerror!void { - const self: *@This() = @ptrCast(@alignCast(ctx_ptr)); - try self.registry.updatePolicies(update.policies, update.provider_id, self.source_type); - } - }; - - var file_ctx = Ctx{ .registry = ®istry, .source_type = .file }; - const file_callback = PolicyCallback{ - .context = &file_ctx, - .onUpdate = Ctx.onUpdate, - }; - - var http_ctx = Ctx{ .registry = ®istry, .source_type = .http }; - const http_callback = PolicyCallback{ - .context = &http_ctx, - .onUpdate = Ctx.onUpdate, - }; - - // Add same-named policy to file provider first - var file_policy = try createTestPolicy(allocator, "shared-policy"); - file_policy.description = try allocator.dupe(u8, "file version 1"); - defer freeTestPolicy(allocator, &file_policy); - - try file_provider.addPolicy(file_policy); - try file_provider.subscribe(file_callback); - - // Verify file policy is in registry - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - try testing.expectEqualStrings( - "file version 1", - registry.getSnapshot().?.policies[0].description, - ); - - // Add same-named policy to HTTP provider (should override) - var http_policy = try createTestPolicy(allocator, "shared-policy"); - http_policy.description = try allocator.dupe(u8, "http version"); - defer freeTestPolicy(allocator, &http_policy); - - try http_provider.addPolicy(http_policy); - try http_provider.subscribe(http_callback); - - // Verify HTTP policy replaced file policy - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - try testing.expectEqualStrings( - "http version", - registry.getSnapshot().?.policies[0].description, - ); - - // Update file provider - should NOT override HTTP - file_provider.clearPolicies(); - var file_policy2 = try createTestPolicy(allocator, "shared-policy"); - file_policy2.description = try allocator.dupe(u8, "file version 2"); - defer freeTestPolicy(allocator, &file_policy2); - - try file_provider.addPolicy(file_policy2); - try file_provider.notifySubscribers(); - - // Should still have HTTP version - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - try testing.expectEqualStrings( - "http version", - registry.getSnapshot().?.policies[0].description, - ); -} - -// ----------------------------------------------------------------------------- -// Policy Config Type Indexing Tests -// ----------------------------------------------------------------------------- - -const LogTarget = proto.policy.LogTarget; - -/// Helper to create a test policy with a log target config -fn createTestPolicyWithFilter( - allocator: std.mem.Allocator, - name: []const u8, -) !Policy { - var policy = Policy{ - .id = try allocator.dupe(u8, name), // Use name as id for tests - .name = try allocator.dupe(u8, name), - .enabled = true, - .target = .{ .log = LogTarget{ - .match = .empty, - .keep = try allocator.dupe(u8, "none"), - } }, - }; - _ = &policy; - - return policy; -} - -test "PolicyConfigType: fromPolicy returns log_target when log is set" { - const allocator = testing.allocator; - - var policy = try createTestPolicyWithFilter(allocator, "filter-policy"); - defer freeTestPolicy(allocator, &policy); - - const config_type = PolicyConfigType.fromPolicy(&policy); - try testing.expectEqual(PolicyConfigType.log_target, config_type); -} - -test "PolicyConfigType: fromPolicy returns none when log is null" { - const allocator = testing.allocator; - - var policy = try createTestPolicy(allocator, "no-filter-policy"); - defer freeTestPolicy(allocator, &policy); - - const config_type = PolicyConfigType.fromPolicy(&policy); - try testing.expectEqual(PolicyConfigType.none, config_type); -} - -test "PolicySnapshot: log_target_indices contains only log policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create mix of policies with and without log targets - var policy_no_filter = try createTestPolicy(allocator, "no-filter"); - defer freeTestPolicy(allocator, &policy_no_filter); - - var policy_with_filter = try createTestPolicyWithFilter(allocator, "with-filter"); - defer freeTestPolicy(allocator, &policy_with_filter); - - var another_no_filter = try createTestPolicy(allocator, "another-no-filter"); - defer freeTestPolicy(allocator, &another_no_filter); - - try registry.updatePolicies(&.{ policy_no_filter, policy_with_filter, another_no_filter }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // Should have 3 total policies but only 1 log target index - try testing.expectEqual(@as(usize, 3), snapshot.?.policies.len); - try testing.expectEqual(@as(usize, 1), snapshot.?.log_target_indices.len); - - // The indexed policy should be the one with log target - const indexed_policy = snapshot.?.policies[snapshot.?.log_target_indices[0]]; - try testing.expectEqualStrings("with-filter", indexed_policy.name); - try testing.expect(indexed_policy.target != null); -} - -test "PolicySnapshot: multiple log policies are indexed" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var filter1 = try createTestPolicyWithFilter(allocator, "filter-1"); - defer freeTestPolicy(allocator, &filter1); - - var filter2 = try createTestPolicyWithFilter(allocator, "filter-2"); - defer freeTestPolicy(allocator, &filter2); - - var filter3 = try createTestPolicyWithFilter(allocator, "filter-3"); - defer freeTestPolicy(allocator, &filter3); - - try registry.updatePolicies(&.{ filter1, filter2, filter3 }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // All 3 policies should be indexed - try testing.expectEqual(@as(usize, 3), snapshot.?.policies.len); - try testing.expectEqual(@as(usize, 3), snapshot.?.log_target_indices.len); -} - -test "PolicySnapshot: empty when no log policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy1 = try createTestPolicy(allocator, "policy-1"); - defer freeTestPolicy(allocator, &policy1); - - var policy2 = try createTestPolicy(allocator, "policy-2"); - defer freeTestPolicy(allocator, &policy2); - - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // No log target indices - try testing.expectEqual(@as(usize, 2), snapshot.?.policies.len); - try testing.expectEqual(@as(usize, 0), snapshot.?.log_target_indices.len); -} - -test "PolicySnapshot: iterateLogTargetPolicies returns all log policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var no_filter = try createTestPolicy(allocator, "no-filter"); - defer freeTestPolicy(allocator, &no_filter); - - var filter1 = try createTestPolicyWithFilter(allocator, "filter-1"); - defer freeTestPolicy(allocator, &filter1); - - var filter2 = try createTestPolicyWithFilter(allocator, "filter-2"); - defer freeTestPolicy(allocator, &filter2); - - try registry.updatePolicies(&.{ no_filter, filter1, filter2 }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // Iterate and collect names - var iter = snapshot.?.iterateLogTargetPolicies(); - var count: usize = 0; - var found_filter1 = false; - var found_filter2 = false; - - while (iter.next()) |policy| { - count += 1; - try testing.expect(policy.target != null); - - if (std.mem.eql(u8, policy.name, "filter-1")) { - found_filter1 = true; - } else if (std.mem.eql(u8, policy.name, "filter-2")) { - found_filter2 = true; - } - } - - try testing.expectEqual(@as(usize, 2), count); - try testing.expect(found_filter1); - try testing.expect(found_filter2); -} - -test "PolicySnapshot: iterator returns null when no log policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var policy = try createTestPolicy(allocator, "no-filter"); - defer freeTestPolicy(allocator, &policy); - - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - var iter = snapshot.?.iterateLogTargetPolicies(); - try testing.expect(iter.next() == null); -} - -test "PolicySnapshot: indices update when policies change" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Start with one log policy - var filter1 = try createTestPolicyWithFilter(allocator, "filter-1"); - defer freeTestPolicy(allocator, &filter1); - - try registry.updatePolicies(&.{filter1}, "file-provider", .file); - - var snapshot = registry.getSnapshot(); - try testing.expectEqual(@as(usize, 1), snapshot.?.log_target_indices.len); - - // Add another log policy - var filter2 = try createTestPolicyWithFilter(allocator, "filter-2"); - defer freeTestPolicy(allocator, &filter2); - - try registry.updatePolicies(&.{ filter1, filter2 }, "file-provider", .file); - - snapshot = registry.getSnapshot(); - try testing.expectEqual(@as(usize, 2), snapshot.?.log_target_indices.len); - - // Remove log policies, add non-log - var no_filter = try createTestPolicy(allocator, "no-filter"); - defer freeTestPolicy(allocator, &no_filter); - - try registry.updatePolicies(&.{no_filter}, "file-provider", .file); - - snapshot = registry.getSnapshot(); - try testing.expectEqual(@as(usize, 0), snapshot.?.log_target_indices.len); -} - -// ----------------------------------------------------------------------------- -// Policy Error Routing Tests -// ----------------------------------------------------------------------------- - -/// Mock provider that records errors for testing -const MockErrorProvider = struct { - recorded_errors: std.ArrayListUnmanaged(struct { policy_id: []const u8, message: []const u8 }), - allocator: std.mem.Allocator, - id: []const u8, - - fn init(allocator: std.mem.Allocator, id: []const u8) MockErrorProvider { - return .{ - .recorded_errors = .{}, - .allocator = allocator, - .id = id, - }; - } - - pub fn getId(self: *MockErrorProvider) []const u8 { - return self.id; - } - - // Must be pub for PolicyProvider.init to find it - pub fn deinit(self: *MockErrorProvider) void { - for (self.recorded_errors.items) |entry| { - self.allocator.free(entry.policy_id); - self.allocator.free(entry.message); - } - self.recorded_errors.deinit(self.allocator); - } - - pub fn subscribe(self: *MockErrorProvider, callback: PolicyCallback) !void { - _ = self; - _ = callback; - } - - pub fn recordPolicyError(self: *MockErrorProvider, policy_id: []const u8, error_message: []const u8) void { - const id_copy = self.allocator.dupe(u8, policy_id) catch return; - const msg_copy = self.allocator.dupe(u8, error_message) catch { - self.allocator.free(id_copy); - return; - }; - self.recorded_errors.append(self.allocator, .{ - .policy_id = id_copy, - .message = msg_copy, - }) catch { - self.allocator.free(id_copy); - self.allocator.free(msg_copy); - }; - } - - pub fn recordPolicyStats(self: *MockErrorProvider, policy_id: []const u8, hits: i64, misses: i64, transform_result: policy_provider.TransformResult) void { - // No-op for mock - stats tracking not needed for error tests - _ = self; - _ = policy_id; - _ = hits; - _ = misses; - _ = transform_result; - } - - fn getErrorCount(self: *MockErrorProvider) usize { - return self.recorded_errors.items.len; - } - - fn hasError(self: *MockErrorProvider, policy_id: []const u8, message: []const u8) bool { - for (self.recorded_errors.items) |entry| { - if (std.mem.eql(u8, entry.policy_id, policy_id) and - std.mem.eql(u8, entry.message, message)) - { - return true; - } - } - return false; - } -}; - -test "PolicyRegistry: registerProvider and recordPolicyError routes to correct provider" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create mock providers - var file_mock = MockErrorProvider.init(allocator, "file-provider"); - defer file_mock.deinit(); - - var http_mock = MockErrorProvider.init(allocator, "http-provider"); - defer http_mock.deinit(); - - // Register providers - var file_provider = policy_provider.PolicyProvider.init(&file_mock); - var http_provider = policy_provider.PolicyProvider.init(&http_mock); - - try registry.registerProvider(&file_provider); - try registry.registerProvider(&http_provider); - - // Add policies from different sources - var file_policy = try createTestPolicy(allocator, "file-policy-1"); - defer freeTestPolicy(allocator, &file_policy); - - var http_policy = try createTestPolicy(allocator, "http-policy-1"); - defer freeTestPolicy(allocator, &http_policy); - - try registry.updatePolicies(&.{file_policy}, "file-provider", .file); - try registry.updatePolicies(&.{http_policy}, "http-provider", .http); - - // Record errors - registry.recordPolicyError("file-policy-1", "Invalid regex in file policy"); - registry.recordPolicyError("http-policy-1", "Invalid regex in http policy"); - - // Verify errors routed to correct providers - try testing.expectEqual(@as(usize, 1), file_mock.getErrorCount()); - try testing.expectEqual(@as(usize, 1), http_mock.getErrorCount()); - - try testing.expect(file_mock.hasError("file-policy-1", "Invalid regex in file policy")); - try testing.expect(http_mock.hasError("http-policy-1", "Invalid regex in http policy")); - - // Verify no cross-contamination - try testing.expect(!file_mock.hasError("http-policy-1", "Invalid regex in http policy")); - try testing.expect(!http_mock.hasError("file-policy-1", "Invalid regex in file policy")); -} - -test "PolicyRegistry: recordPolicyError for unknown policy does not route to provider" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var mock = MockErrorProvider.init(allocator, "file-provider"); - defer mock.deinit(); - - var provider = policy_provider.PolicyProvider.init(&mock); - try registry.registerProvider(&provider); - - // Add a real policy so we can test error routing works - var real_policy = try createTestPolicy(allocator, "real-policy"); - defer freeTestPolicy(allocator, &real_policy); - try registry.updatePolicies(&.{real_policy}, "file-provider", .file); - - // Record error for the real policy - should be routed - registry.recordPolicyError("real-policy", "Some error"); - - // Verify the error was recorded - try testing.expectEqual(@as(usize, 1), mock.getErrorCount()); - try testing.expect(mock.hasError("real-policy", "Some error")); -} - -test "PolicyRegistry: multiple errors for same policy accumulate" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - var mock = MockErrorProvider.init(allocator, "file-provider"); - defer mock.deinit(); - - var provider = policy_provider.PolicyProvider.init(&mock); - try registry.registerProvider(&provider); - - var policy = try createTestPolicy(allocator, "error-prone-policy"); - defer freeTestPolicy(allocator, &policy); - try registry.updatePolicies(&.{policy}, "file-provider", .file); - - // Record multiple errors for the same policy - registry.recordPolicyError("error-prone-policy", "First error"); - registry.recordPolicyError("error-prone-policy", "Second error"); - registry.recordPolicyError("error-prone-policy", "Third error"); - - // All errors should be recorded - try testing.expectEqual(@as(usize, 3), mock.getErrorCount()); - try testing.expect(mock.hasError("error-prone-policy", "First error")); - try testing.expect(mock.hasError("error-prone-policy", "Second error")); - try testing.expect(mock.hasError("error-prone-policy", "Third error")); -} - -test "PolicyRegistry: policies keyed by id not name" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create two policies with same name but different ids - var policy1 = Policy{ - .id = try allocator.dupe(u8, "id-1"), - .name = try allocator.dupe(u8, "same-name"), - .enabled = true, - }; - defer policy1.deinit(allocator); - - var policy2 = Policy{ - .id = try allocator.dupe(u8, "id-2"), - .name = try allocator.dupe(u8, "same-name"), - .enabled = true, - }; - defer policy2.deinit(allocator); - - // Both should be added (different ids) - try registry.updatePolicies(&.{ policy1, policy2 }, "file-provider", .file); - - try testing.expectEqual(@as(usize, 2), registry.getPolicyCount()); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - try testing.expectEqual(@as(usize, 2), snapshot.?.policies.len); -} - -test "PolicyRegistry: policy update by id replaces correctly" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Add initial policy - var policy_v1 = Policy{ - .id = try allocator.dupe(u8, "policy-123"), - .name = try allocator.dupe(u8, "my-policy"), - .enabled = true, - .description = try allocator.dupe(u8, "version 1"), - }; - defer policy_v1.deinit(allocator); - - try registry.updatePolicies(&.{policy_v1}, "file-provider", .file); - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - var snapshot = registry.getSnapshot(); - try testing.expectEqualStrings("version 1", snapshot.?.policies[0].description); - - // Update with same id, different description - var policy_v2 = Policy{ - .id = try allocator.dupe(u8, "policy-123"), - .name = try allocator.dupe(u8, "my-policy-renamed"), - .enabled = true, - .description = try allocator.dupe(u8, "version 2"), - }; - defer policy_v2.deinit(allocator); - - try registry.updatePolicies(&.{policy_v2}, "file-provider", .file); - - // Should still have 1 policy, but updated - try testing.expectEqual(@as(usize, 1), registry.getPolicyCount()); - - snapshot = registry.getSnapshot(); - try testing.expectEqualStrings("version 2", snapshot.?.policies[0].description); - try testing.expectEqualStrings("my-policy-renamed", snapshot.?.policies[0].name); -} - -// ----------------------------------------------------------------------------- -// Metric Policy Tests -// ----------------------------------------------------------------------------- - -const MetricTarget = proto.policy.MetricTarget; - -test "PolicyConfigType: fromPolicy returns metric_target when metric is set" { - const allocator = testing.allocator; - - var policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy"), - .name = try allocator.dupe(u8, "metric-policy"), - .enabled = true, - .target = .{ .metric = MetricTarget{ - .match = .empty, - .keep = true, - } }, - }; - defer policy.deinit(allocator); - - const config_type = PolicyConfigType.fromPolicy(&policy); - try testing.expectEqual(PolicyConfigType.metric_target, config_type); -} - -test "PolicySnapshot: metric_target_indices contains only metric policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create a log policy - var log_policy = Policy{ - .id = try allocator.dupe(u8, "log-policy"), - .name = try allocator.dupe(u8, "log-policy"), - .enabled = true, - .target = .{ .log = LogTarget{ - .match = .empty, - .keep = try allocator.dupe(u8, "none"), - } }, - }; - defer log_policy.deinit(allocator); - - // Create a metric policy - var metric_policy = Policy{ - .id = try allocator.dupe(u8, "metric-policy"), - .name = try allocator.dupe(u8, "metric-policy"), - .enabled = true, - .target = .{ .metric = MetricTarget{ - .match = .empty, - .keep = true, - } }, - }; - defer metric_policy.deinit(allocator); - - // Create a policy with no target - var no_target_policy = try createTestPolicy(allocator, "no-target"); - defer freeTestPolicy(allocator, &no_target_policy); - - // Add all policies - try registry.updatePolicies(&.{ log_policy, metric_policy, no_target_policy }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // Should have 3 policies total - try testing.expectEqual(@as(usize, 3), snapshot.?.policies.len); - - // Should have 1 log policy indexed - try testing.expectEqual(@as(usize, 1), snapshot.?.log_target_indices.len); - - // Should have 1 metric policy indexed - try testing.expectEqual(@as(usize, 1), snapshot.?.metric_target_indices.len); - - // Verify the log policy is correct - const log_policy_idx = snapshot.?.log_target_indices[0]; - try testing.expectEqualStrings("log-policy", snapshot.?.policies[log_policy_idx].name); - - // Verify the metric policy is correct - const metric_policy_idx = snapshot.?.metric_target_indices[0]; - try testing.expectEqualStrings("metric-policy", snapshot.?.policies[metric_policy_idx].name); -} - -test "PolicySnapshot: iterateMetricTargetPolicies iterates only metric policies" { - const allocator = testing.allocator; - var noop_bus: NoopEventBus = undefined; - noop_bus.init(); - var registry = PolicyRegistry.init(allocator, noop_bus.eventBus()); - defer registry.deinit(); - - // Create two metric policies - var metric_policy1 = Policy{ - .id = try allocator.dupe(u8, "metric-1"), - .name = try allocator.dupe(u8, "metric-1"), - .enabled = true, - .target = .{ .metric = MetricTarget{ - .match = .empty, - .keep = true, - } }, - }; - defer metric_policy1.deinit(allocator); - - var metric_policy2 = Policy{ - .id = try allocator.dupe(u8, "metric-2"), - .name = try allocator.dupe(u8, "metric-2"), - .enabled = true, - .target = .{ .metric = MetricTarget{ - .match = .empty, - .keep = false, - } }, - }; - defer metric_policy2.deinit(allocator); - - // Create a log policy (should not be in metric iteration) - var log_policy = Policy{ - .id = try allocator.dupe(u8, "log-policy"), - .name = try allocator.dupe(u8, "log-policy"), - .enabled = true, - .target = .{ .log = LogTarget{ - .match = .empty, - .keep = try allocator.dupe(u8, "all"), - } }, - }; - defer log_policy.deinit(allocator); - - try registry.updatePolicies(&.{ metric_policy1, log_policy, metric_policy2 }, "file-provider", .file); - - const snapshot = registry.getSnapshot(); - try testing.expect(snapshot != null); - - // Iterate metric policies - var iter = snapshot.?.iterateMetricTargetPolicies(); - var count: usize = 0; - var found_metric1 = false; - var found_metric2 = false; - - while (iter.next()) |policy| { - count += 1; - try testing.expect(policy.target != null); - - if (std.mem.eql(u8, policy.name, "metric-1")) { - found_metric1 = true; - } else if (std.mem.eql(u8, policy.name, "metric-2")) { - found_metric2 = true; - } - } - - try testing.expectEqual(@as(usize, 2), count); - try testing.expect(found_metric1); - try testing.expect(found_metric2); -} diff --git a/src/policy/root.zig b/src/policy/root.zig deleted file mode 100644 index b1bd68d..0000000 --- a/src/policy/root.zig +++ /dev/null @@ -1,145 +0,0 @@ -//! Policy management package for Tero Edge -//! -//! This package provides policy loading, management, and evaluation capabilities. -//! Policies can be loaded from multiple sources (file, HTTP) with priority-based -//! conflict resolution. -//! -//! ## Usage -//! -//! ```zig -//! const policy = @import("policy"); -//! -//! // Create a registry -//! var registry = policy.Registry.init(allocator, bus); -//! defer registry.deinit(); -//! -//! // Create and register a file provider -//! const file_provider = try policy.FileProvider.init(allocator, bus, "local", "policies.json"); -//! defer file_provider.deinit(); -//! -//! const provider_interface = policy.Provider.init(file_provider); -//! try registry.registerProvider(&provider_interface); -//! ``` - -const std = @import("std"); - -// ============================================================================= -// Core Types -// ============================================================================= - -/// Re-export source types -pub const source = @import("./source.zig"); -pub const SourceType = source.SourceType; -pub const PolicyMetadata = source.PolicyMetadata; - -/// Re-export provider interface -pub const provider = @import("./provider.zig"); -pub const Provider = provider.PolicyProvider; -pub const PolicyCallback = provider.PolicyCallback; -pub const PolicyUpdate = provider.PolicyUpdate; - -/// Re-export registry -pub const registry = @import("./registry.zig"); -pub const Registry = registry.PolicyRegistry; -pub const Snapshot = registry.PolicySnapshot; -pub const ConfigType = registry.PolicyConfigType; -pub const TestPolicyProvider = registry.TestPolicyProvider; - -// ============================================================================= -// Provider Implementations -// ============================================================================= - -/// File-based policy provider -pub const FileProvider = @import("./provider_file.zig").FileProvider; - -/// HTTP-based policy provider -pub const HttpProvider = @import("./provider_http.zig").HttpProvider; - -/// Async policy loader for off-hot-path initialization -pub const Loader = @import("./loader.zig").PolicyLoader; - -// ============================================================================= -// Configuration Types -// ============================================================================= - -pub const types = @import("./types.zig"); -pub const ServiceMetadata = types.ServiceMetadata; -pub const ProviderType = types.ProviderType; -pub const ProviderConfig = types.ProviderConfig; -pub const Header = types.Header; - -// Field reference types (shared across policy engine and transforms) -pub const FieldRef = types.FieldRef; -pub const MetricFieldRef = types.MetricFieldRef; -pub const TraceFieldRef = types.TraceFieldRef; -pub const LogFieldAccessor = types.LogFieldAccessor; -pub const LogFieldMutator = types.LogFieldMutator; -pub const MetricFieldAccessor = types.MetricFieldAccessor; -pub const MetricFieldMutator = types.MetricFieldMutator; -pub const TraceFieldAccessor = types.TraceFieldAccessor; -pub const TraceFieldMutator = types.TraceFieldMutator; -pub const MutateOp = types.MutateOp; -pub const MetricMutateOp = types.MetricMutateOp; -pub const TraceMutateOp = types.TraceMutateOp; -pub const TelemetryType = types.TelemetryType; - -// ============================================================================= -// Matcher Index (Hyperscan-based pattern matching) -// ============================================================================= - -pub const matcher_index = @import("./matcher_index.zig"); -pub const LogMatcherIndex = matcher_index.LogMatcherIndex; -pub const MetricMatcherIndex = matcher_index.MetricMatcherIndex; -pub const LogMatcherKey = matcher_index.LogMatcherKey; -pub const MetricMatcherKey = matcher_index.MetricMatcherKey; -pub const MAX_POLICIES = matcher_index.MAX_POLICIES; - -// ============================================================================= -// Sampling and Rate Limiting -// ============================================================================= - -pub const sampler = @import("./sampler.zig"); -pub const Sampler = sampler.Sampler; - -pub const trace_sampler = @import("./trace_sampler.zig"); -pub const TraceSampler = trace_sampler.TraceSampler; - -pub const rate_limiter = @import("./rate_limiter.zig"); -pub const RateLimiter = rate_limiter.RateLimiter; - -// ============================================================================= -// Policy Engine -// ============================================================================= - -pub const policy_engine = @import("./policy_engine.zig"); -pub const PolicyEngine = policy_engine.PolicyEngine; -pub const PolicyResult = policy_engine.PolicyResult; -pub const FilterDecision = policy_engine.FilterDecision; -pub const MAX_MATCHES_PER_SCAN = policy_engine.MAX_MATCHES_PER_SCAN; - -// ============================================================================= -// Parsing -// ============================================================================= - -pub const parser = @import("./parser.zig"); - -// ============================================================================= -// Transforms -// ============================================================================= - -pub const log_transform = @import("./log_transform.zig"); -pub const TransformResult = log_transform.TransformResult; -pub const applyTransforms = log_transform.applyTransforms; -pub const applyRemove = log_transform.applyRemove; -pub const applyRedact = log_transform.applyRedact; -pub const applyRename = log_transform.applyRename; -pub const applyAdd = log_transform.applyAdd; - -// ============================================================================= -// Tests -// ============================================================================= - -test { - // Run all tests in submodules - std.testing.refAllDecls(@This()); -} diff --git a/src/policy/sampler.zig b/src/policy/sampler.zig deleted file mode 100644 index 68f20ef..0000000 --- a/src/policy/sampler.zig +++ /dev/null @@ -1,243 +0,0 @@ -//! Stateless percentage-based sampling for telemetry policies. -//! -//! Uses a hash of the input to make deterministic keep/drop decisions. -//! The same input always produces the same result, ensuring consistent -//! sampling across distributed systems (e.g., all spans of a trace are -//! either kept or dropped together when using trace_id as input). -//! -//! ## Design Principles -//! -//! - Stateless: No mutable state, purely functional -//! - Deterministic: Same input always produces same result -//! - Uniform distribution: Uses splitmix64 for good avalanche properties -//! -//! ## Usage -//! -//! ```zig -//! const sampler = Sampler{ .percentage = 50 }; -//! if (sampler.shouldKeep(trace_id)) { -//! // Keep this trace -//! } -//! ``` - -const std = @import("std"); -const testing = std.testing; - -/// Stateless percentage-based sampler. -/// -/// Uses a hash of the input to make deterministic keep/drop decisions. -/// The same input always produces the same result, ensuring consistent -/// sampling across distributed systems (e.g., all spans of a trace are -/// either kept or dropped together when using trace_id as input). -pub const Sampler = struct { - /// Percentage of items to keep (0-100). - /// Values > 100 are treated as 100. - percentage: u8, - - pub fn init(percentage: u8) Sampler { - return Sampler{ .percentage = percentage }; - } - - /// Decide whether to keep based on hash of input. - /// - /// Deterministic: same input always produces same result. - /// Distribution: approximately `percentage`% of inputs will return true. - pub fn shouldKeep(self: Sampler, hash_input: u64) bool { - if (self.percentage == 0) return false; - if (self.percentage >= 100) return true; - - const hash = mixHash(hash_input); - const bucket = @as(u8, @truncate(hash % 100)); - return bucket < self.percentage; - } - - /// splitmix64 hash mixing function. - /// Provides good avalanche properties to ensure uniform distribution - /// even for sequential or poorly-distributed inputs. - fn mixHash(x: u64) u64 { - var h = x +% 0x9e3779b97f4a7c15; - h = (h ^ (h >> 30)) *% 0xbf58476d1ce4e5b9; - h = (h ^ (h >> 27)) *% 0x94d049bb133111eb; - return h ^ (h >> 31); - } -}; - -// ============================================================================= -// Tests -// ============================================================================= - -test "Sampler: zero percentage always rejects" { - const sampler = Sampler{ .percentage = 0 }; - - // Test with various inputs - try testing.expect(!sampler.shouldKeep(0)); - try testing.expect(!sampler.shouldKeep(1)); - try testing.expect(!sampler.shouldKeep(std.math.maxInt(u64))); - try testing.expect(!sampler.shouldKeep(0xDEADBEEF)); - - // Test sequential inputs - for (0..1000) |i| { - try testing.expect(!sampler.shouldKeep(i)); - } -} - -test "Sampler: 100 percentage always accepts" { - const sampler = Sampler{ .percentage = 100 }; - - try testing.expect(sampler.shouldKeep(0)); - try testing.expect(sampler.shouldKeep(1)); - try testing.expect(sampler.shouldKeep(std.math.maxInt(u64))); - try testing.expect(sampler.shouldKeep(0xDEADBEEF)); - - for (0..1000) |i| { - try testing.expect(sampler.shouldKeep(i)); - } -} - -test "Sampler: over 100 percentage treated as 100" { - const sampler = Sampler{ .percentage = 255 }; - - for (0..1000) |i| { - try testing.expect(sampler.shouldKeep(i)); - } -} - -test "Sampler: deterministic for same input" { - const sampler = Sampler{ .percentage = 50 }; - - // Same input should always produce same result - const inputs = [_]u64{ 0, 1, 42, 12345, 0xDEADBEEF, std.math.maxInt(u64) }; - - for (inputs) |input| { - const first_result = sampler.shouldKeep(input); - // Check 100 times - for (0..100) |_| { - try testing.expectEqual(first_result, sampler.shouldKeep(input)); - } - } -} - -test "Sampler: different percentages are independent" { - const low = Sampler{ .percentage = 10 }; - const high = Sampler{ .percentage = 90 }; - - // An input accepted by low should definitely be accepted by high - // (since low samples a subset of what high samples - both use same hash) - for (0..1000) |i| { - if (low.shouldKeep(i)) { - try testing.expect(high.shouldKeep(i)); - } - } -} - -test "Sampler: approximate distribution for 50%" { - const sampler = Sampler{ .percentage = 50 }; - var kept: u32 = 0; - const total: u32 = 10000; - - for (0..total) |i| { - if (sampler.shouldKeep(i)) kept += 1; - } - - // Should be roughly 50% (within 5% tolerance for statistical significance) - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.45 and ratio < 0.55); -} - -test "Sampler: approximate distribution for 10%" { - const sampler = Sampler{ .percentage = 10 }; - var kept: u32 = 0; - const total: u32 = 10000; - - for (0..total) |i| { - if (sampler.shouldKeep(i)) kept += 1; - } - - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.07 and ratio < 0.13); -} - -test "Sampler: approximate distribution for 90%" { - const sampler = Sampler{ .percentage = 90 }; - var kept: u32 = 0; - const total: u32 = 10000; - - for (0..total) |i| { - if (sampler.shouldKeep(i)) kept += 1; - } - - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.87 and ratio < 0.93); -} - -test "Sampler: 1% edge case" { - const sampler = Sampler{ .percentage = 1 }; - var kept: u32 = 0; - const total: u32 = 100000; - - for (0..total) |i| { - if (sampler.shouldKeep(i)) kept += 1; - } - - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.005 and ratio < 0.015); -} - -test "Sampler: 99% edge case" { - const sampler = Sampler{ .percentage = 99 }; - var kept: u32 = 0; - const total: u32 = 100000; - - for (0..total) |i| { - if (sampler.shouldKeep(i)) kept += 1; - } - - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.985 and ratio < 0.995); -} - -test "Sampler: hash avalanche - sequential inputs distribute well" { - // Verify that sequential inputs don't cluster - const sampler = Sampler{ .percentage = 50 }; - - // Count runs of same decision - var max_run: u32 = 0; - var current_run: u32 = 1; - var last_decision = sampler.shouldKeep(0); - - for (1..10000) |i| { - const decision = sampler.shouldKeep(i); - if (decision == last_decision) { - current_run += 1; - if (current_run > max_run) max_run = current_run; - } else { - current_run = 1; - } - last_decision = decision; - } - - // With good distribution, max run should be reasonable - // For true 50/50, expected max run in 10000 samples is ~13 - // Allow up to 30 to account for variance - try testing.expect(max_run < 30); -} - -test "Sampler: combined usage with RateLimiter" { - const rate_limiter = @import("rate_limiter.zig"); - - // Simulate using both: first sample, then rate limit - const sampler = Sampler{ .percentage = 50 }; - var limiter = rate_limiter.RateLimiter.initPerSecond(5); - - var kept: u32 = 0; - - for (0..100) |i| { - // First check sampling, then rate limit - if (sampler.shouldKeep(i) and limiter.shouldKeep()) { - kept += 1; - } - } - - // Should keep at most 5 (rate limit) - try testing.expectEqual(@as(u32, 5), kept); -} diff --git a/src/policy/source.zig b/src/policy/source.zig deleted file mode 100644 index b087687..0000000 --- a/src/policy/source.zig +++ /dev/null @@ -1,52 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); -const TelemetryType = proto.policy.TelemetryType; - -/// Source type for policies with priority ordering -/// Higher numeric value = higher priority (HTTP overlays file) -pub const SourceType = enum(u8) { - file = 0, - http = 1, - - pub fn priority(self: SourceType) u8 { - return @intFromEnum(self); - } -}; - -/// Metadata about a policy's source and history -/// Cold data: only accessed during updates, not during evaluation -pub const PolicyMetadata = struct { - /// Provider ID that owns this policy - provider_id: []const u8, - /// Source type for priority ordering - source_type: SourceType, - last_updated: i128, // Unix timestamp in nanoseconds - - pub fn init(provider_id: []const u8, source_type: SourceType) PolicyMetadata { - return .{ - .provider_id = provider_id, - .source_type = source_type, - .last_updated = std.time.nanoTimestamp(), - }; - } - - /// Check if this policy should be replaced by a new one from the given source - pub fn shouldReplace(self: PolicyMetadata, new_source_type: SourceType) bool { - return new_source_type.priority() >= self.source_type.priority(); - } -}; - -test "PolicyMetadata.shouldReplace prioritizes HTTP over file" { - const file_meta = PolicyMetadata.init("file-provider", .file); - const http_meta = PolicyMetadata.init("http-provider", .http); - - // HTTP should replace file - try std.testing.expect(file_meta.shouldReplace(.http)); - - // File should not replace HTTP - try std.testing.expect(!http_meta.shouldReplace(.file)); - - // Same source should replace (update) - try std.testing.expect(file_meta.shouldReplace(.file)); - try std.testing.expect(http_meta.shouldReplace(.http)); -} diff --git a/src/policy/trace_sampler.zig b/src/policy/trace_sampler.zig deleted file mode 100644 index c869bfd..0000000 --- a/src/policy/trace_sampler.zig +++ /dev/null @@ -1,608 +0,0 @@ -//! Probabilistic Trace Sampler -//! -//! Implements the OpenTelemetry probability sampling specification: -//! https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/ -//! -//! The sampling decision is based on comparing a 56-bit randomness value (R) against -//! a rejection threshold (T). If R >= T, the span is kept; otherwise it is dropped. -//! -//! ## Threshold Calculation -//! -//! The threshold is derived from the configured percentage: -//! T = floor((1 - percentage/100) * 2^56) -//! -//! For example: -//! - 100% sampling: T = 0 (keep everything) -//! - 50% sampling: T = 2^55 (keep half) -//! - 0% sampling: T = 2^56 (keep nothing) -//! -//! ## Randomness Value (R) -//! -//! The randomness value is derived from the trace_id: -//! - For hash_seed mode: R = hash(trace_id, seed) & 0x00FFFFFFFFFFFFFF -//! - For proportional/equalizing modes: R is extracted from existing tracestate -//! -//! ## Tracestate Handling -//! -//! The sampler reads and writes the `th` (threshold) key in the tracestate header -//! following the W3C tracestate specification. - -const std = @import("std"); -const proto = @import("proto"); -const testing = std.testing; - -const TraceSamplingConfig = proto.policy.TraceSamplingConfig; -const SamplingMode = proto.policy.SamplingMode; - -/// Maximum value for 56-bit randomness/threshold (2^56) -const MAX_56BIT: u64 = 1 << 56; - -/// Default sampling precision (hex digits) -const DEFAULT_PRECISION: u32 = 4; - -/// Default hash seed -const DEFAULT_HASH_SEED: u32 = 0; - -/// Probabilistic trace sampler following OTel probability sampling spec. -pub const TraceSampler = struct { - /// Rejection threshold (T). Spans with R >= T are kept. - threshold: u64, - /// Sampling mode - mode: SamplingMode, - /// Hash seed for deterministic sampling - hash_seed: u32, - /// Precision for threshold encoding (1-14 hex digits) - precision: u32, - /// Whether to reject on errors - fail_closed: bool, - /// Original percentage for reference - percentage: f32, - - /// Initialize sampler from TraceSamplingConfig - pub fn init(config: ?*const TraceSamplingConfig) TraceSampler { - if (config == null) { - // No config = keep all - return .{ - .threshold = 0, - .mode = .SAMPLING_MODE_HASH_SEED, - .hash_seed = DEFAULT_HASH_SEED, - .precision = DEFAULT_PRECISION, - .fail_closed = true, - .percentage = 100.0, - }; - } - - const cfg = config.?; - const percentage = cfg.percentage; - - // Calculate threshold: T = floor((1 - percentage/100) * 2^56) - const threshold = calculateThreshold(percentage); - - return .{ - .threshold = threshold, - .mode = cfg.mode orelse .SAMPLING_MODE_HASH_SEED, - .hash_seed = cfg.hash_seed orelse DEFAULT_HASH_SEED, - .precision = @min(14, @max(1, cfg.sampling_precision orelse DEFAULT_PRECISION)), - .fail_closed = cfg.fail_closed orelse true, - .percentage = percentage, - }; - } - - /// Calculate threshold from percentage - /// T = floor((1 - percentage/100) * 2^56) - fn calculateThreshold(percentage: f32) u64 { - if (percentage >= 100.0) return 0; // Keep all - if (percentage <= 0.0) return MAX_56BIT; // Keep none - - const ratio = 1.0 - (@as(f64, percentage) / 100.0); - const threshold_f = ratio * @as(f64, @floatFromInt(MAX_56BIT)); - return @intFromFloat(@min(@as(f64, @floatFromInt(MAX_56BIT)), @max(0.0, threshold_f))); - } - - /// Make sampling decision for a span. - /// - /// Returns a SamplingResult with: - /// - keep: whether to keep the span - /// - new_threshold: threshold to write to tracestate (if sampling) - pub fn sample(self: TraceSampler, trace_id: []const u8, tracestate: []const u8) SamplingResult { - // Edge cases - if (self.percentage >= 100.0) { - return .{ .keep = true, .new_threshold = null }; - } - if (self.percentage <= 0.0) { - return .{ .keep = false, .new_threshold = null }; - } - - return switch (self.mode) { - .SAMPLING_MODE_UNSPECIFIED, .SAMPLING_MODE_HASH_SEED => self.sampleHashSeed(trace_id), - .SAMPLING_MODE_PROPORTIONAL => self.sampleProportional(trace_id, tracestate), - .SAMPLING_MODE_EQUALIZING => self.sampleEqualizing(trace_id, tracestate), - _ => self.sampleHashSeed(trace_id), // Unknown mode defaults to hash_seed - }; - } - - /// Hash seed mode: deterministic sampling based on trace_id hash - fn sampleHashSeed(self: TraceSampler, trace_id: []const u8) SamplingResult { - const r = self.computeRandomness(trace_id); - const keep = r >= self.threshold; - return .{ - .keep = keep, - .new_threshold = if (keep) self.encodeThreshold() else null, - }; - } - - /// Proportional mode: adjust sampling relative to existing probability - fn sampleProportional(self: TraceSampler, trace_id: []const u8, tracestate: []const u8) SamplingResult { - // Parse existing threshold from tracestate - const existing_threshold = parseThresholdFromTracestate(tracestate); - - if (existing_threshold) |existing_t| { - // If existing threshold is more restrictive (higher), respect it - if (existing_t >= self.threshold) { - // Already sampled at lower rate, check if it passes our threshold - const r = self.computeRandomness(trace_id); - const keep = r >= self.threshold; - return .{ - .keep = keep, - .new_threshold = if (keep) self.encodeThreshold() else null, - }; - } - // Existing threshold is less restrictive - apply our more restrictive threshold - const r = self.computeRandomness(trace_id); - const keep = r >= self.threshold; - return .{ - .keep = keep, - .new_threshold = if (keep) self.encodeThreshold() else null, - }; - } - - // No existing threshold - use hash seed behavior - return self.sampleHashSeed(trace_id); - } - - /// Equalizing mode: preferentially sample spans with higher existing rates - fn sampleEqualizing(self: TraceSampler, trace_id: []const u8, tracestate: []const u8) SamplingResult { - // Parse existing threshold from tracestate - const existing_threshold = parseThresholdFromTracestate(tracestate); - - if (existing_threshold) |existing_t| { - // Calculate effective threshold for equalizing - // Spans that were sampled at high rates (low threshold) should be - // more likely to be dropped to equalize overall sampling - const r = self.computeRandomness(trace_id); - - // Use the more restrictive threshold - const effective_threshold = @max(existing_t, self.threshold); - const keep = r >= effective_threshold; - return .{ - .keep = keep, - .new_threshold = if (keep) self.encodeThreshold() else null, - }; - } - - // No existing threshold - use hash seed behavior - return self.sampleHashSeed(trace_id); - } - - /// Compute 56-bit randomness value from trace_id and hash_seed - fn computeRandomness(self: TraceSampler, trace_id: []const u8) u64 { - // Use the last 7 bytes of trace_id XORed with hash_seed - // This follows the OTel spec which uses the rightmost bits - var r: u64 = 0; - - if (trace_id.len >= 16) { - // Standard 16-byte trace_id - use last 7 bytes for randomness - // Bytes 9-15 (indices 9, 10, 11, 12, 13, 14, 15) = 7 bytes = 56 bits - for (trace_id[9..16]) |b| { - r = (r << 8) | b; - } - } else if (trace_id.len > 0) { - // Shorter trace_id - hash the whole thing - for (trace_id) |b| { - r = (r << 8) ^ b; - } - } - - // Mix with hash_seed for deterministic but varied sampling - r ^= @as(u64, self.hash_seed); - - // Apply splitmix64 mixing to ensure good distribution - r = mixHash(r); - - // Mask to 56 bits - return r & (MAX_56BIT - 1); - } - - /// Encode threshold as hex string for tracestate. - /// Returns a thread-local buffer - caller should copy if needed. - pub fn encodeThreshold(self: TraceSampler) []const u8 { - // Static buffer for threshold encoding - const S = struct { - threadlocal var buf: [14]u8 = undefined; - }; - - // Encode threshold as hex with trailing zeros removed - const hex_chars = "0123456789abcdef"; - var len: usize = 0; - var t = self.threshold; - - // Encode up to precision digits - var i: u32 = 0; - while (i < self.precision and t > 0) : (i += 1) { - const nibble = @as(u4, @truncate(t >> 52)); - S.buf[len] = hex_chars[nibble]; - len += 1; - t <<= 4; - } - - // If threshold is 0, encode as "0" - if (len == 0) { - S.buf[0] = '0'; - len = 1; - } - - return S.buf[0..len]; - } - - /// splitmix64 hash mixing function for good avalanche properties - fn mixHash(x: u64) u64 { - var h = x +% 0x9e3779b97f4a7c15; - h = (h ^ (h >> 30)) *% 0xbf58476d1ce4e5b9; - h = (h ^ (h >> 27)) *% 0x94d049bb133111eb; - return h ^ (h >> 31); - } -}; - -/// Result of sampling decision -pub const SamplingResult = struct { - /// Whether to keep the span - keep: bool, - /// New threshold to write to tracestate (null if not sampling) - new_threshold: ?[]const u8, -}; - -/// Parse threshold value from tracestate header. -/// Looks for the `th` key in the `ot` vendor section. -/// Returns null if not found or invalid. -fn parseThresholdFromTracestate(tracestate: []const u8) ?u64 { - if (tracestate.len == 0) return null; - - // Look for "ot=..." vendor section - var it = std.mem.splitScalar(u8, tracestate, ','); - while (it.next()) |entry| { - const trimmed = std.mem.trim(u8, entry, " "); - if (std.mem.startsWith(u8, trimmed, "ot=")) { - // Parse the ot value for th key - const ot_value = trimmed[3..]; - return parseOtThreshold(ot_value); - } - } - - return null; -} - -/// Parse threshold from OT vendor tracestate value -/// Format: "th:HEXVALUE" or "th:HEXVALUE;other:value" -fn parseOtThreshold(ot_value: []const u8) ?u64 { - var it = std.mem.splitScalar(u8, ot_value, ';'); - while (it.next()) |kv| { - if (std.mem.startsWith(u8, kv, "th:")) { - const hex_value = kv[3..]; - return parseHexThreshold(hex_value); - } - } - return null; -} - -/// Parse hex threshold value to u64 -fn parseHexThreshold(hex: []const u8) ?u64 { - if (hex.len == 0 or hex.len > 14) return null; - - var threshold: u64 = 0; - for (hex) |c| { - const digit: u64 = switch (c) { - '0'...'9' => c - '0', - 'a'...'f' => c - 'a' + 10, - 'A'...'F' => c - 'A' + 10, - else => return null, - }; - threshold = (threshold << 4) | digit; - } - - // Shift to fill 56 bits (14 hex digits = 56 bits) - // If fewer digits provided, shift left to fill - const shift: u6 = @intCast((14 - hex.len) * 4); - return threshold << shift; -} - -/// Maximum size for tracestate buffer (W3C spec allows up to ~8KB, but we use a reasonable limit) -pub const MAX_TRACESTATE_LEN: usize = 512; - -/// Update tracestate with sampling threshold. -/// Adds or updates the `ot=th:THRESHOLD` entry in the tracestate. -/// -/// Writes the result to the provided buffer and returns a slice of the written data. -/// Returns null if the buffer is too small. -/// -/// Per W3C tracestate spec: -/// - Maximum 32 entries -/// - Our entry goes at the beginning (most recent sampler) -/// - If ot vendor already exists, update the th value -pub fn updateTracestateInPlace( - buf: []u8, - existing_tracestate: []const u8, - threshold_hex: []const u8, -) ?[]u8 { - // Build the new ot entry: "ot=th:THRESHOLD" - var new_ot_buf: [32]u8 = undefined; - const new_ot = std.fmt.bufPrint(&new_ot_buf, "ot=th:{s}", .{threshold_hex}) catch return null; - - var pos: usize = 0; - - // Add our ot entry first (most recent sampler) - if (pos + new_ot.len > buf.len) return null; - @memcpy(buf[pos..][0..new_ot.len], new_ot); - pos += new_ot.len; - - if (existing_tracestate.len == 0) { - return buf[0..pos]; - } - - // Process existing entries - var it = std.mem.splitScalar(u8, existing_tracestate, ','); - var entry_count: usize = 1; // We already added one - - while (it.next()) |entry| { - const trimmed = std.mem.trim(u8, entry, " "); - if (trimmed.len == 0) continue; - - // Skip existing ot entry (we're replacing it) - if (std.mem.startsWith(u8, trimmed, "ot=")) continue; - - // Check entry limit (W3C spec: max 32 entries) - if (entry_count >= 32) break; - - // Check buffer space: need comma + entry - if (pos + 1 + trimmed.len > buf.len) break; - - buf[pos] = ','; - pos += 1; - @memcpy(buf[pos..][0..trimmed.len], trimmed); - pos += trimmed.len; - entry_count += 1; - } - - return buf[0..pos]; -} - -/// Compute the threshold hex string for a given percentage. -/// This is a standalone helper for when you don't have a full TraceSampler. -/// Returns a thread-local buffer - caller should copy if persistence needed. -pub fn thresholdHexFromPercentage(percentage: f32, precision: u32) []const u8 { - const sampler = TraceSampler{ - .threshold = TraceSampler.calculateThreshold(percentage), - .mode = .SAMPLING_MODE_HASH_SEED, - .hash_seed = 0, - .precision = @min(14, @max(1, precision)), - .fail_closed = true, - .percentage = percentage, - }; - return sampler.encodeThreshold(); -} - -// ============================================================================= -// Tests -// ============================================================================= - -test "TraceSampler: 100% keeps all" { - const config = TraceSamplingConfig{ - .percentage = 100.0, - .mode = null, - .sampling_precision = null, - .hash_seed = null, - .fail_closed = null, - }; - const sampler = TraceSampler.init(&config); - - // All trace IDs should be kept - const trace_id = [_]u8{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 }; - const result = sampler.sample(&trace_id, ""); - - try testing.expect(result.keep); -} - -test "TraceSampler: 0% rejects all" { - const config = TraceSamplingConfig{ - .percentage = 0.0, - .mode = null, - .sampling_precision = null, - .hash_seed = null, - .fail_closed = null, - }; - const sampler = TraceSampler.init(&config); - - const trace_id = [_]u8{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 }; - const result = sampler.sample(&trace_id, ""); - - try testing.expect(!result.keep); -} - -test "TraceSampler: null config keeps all" { - const sampler = TraceSampler.init(null); - - const trace_id = [_]u8{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 }; - const result = sampler.sample(&trace_id, ""); - - try testing.expect(result.keep); -} - -test "TraceSampler: deterministic for same trace_id" { - const config = TraceSamplingConfig{ - .percentage = 50.0, - .mode = null, - .sampling_precision = null, - .hash_seed = null, - .fail_closed = null, - }; - const sampler = TraceSampler.init(&config); - - const trace_id = [_]u8{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 }; - - const first_result = sampler.sample(&trace_id, ""); - for (0..100) |_| { - const result = sampler.sample(&trace_id, ""); - try testing.expectEqual(first_result.keep, result.keep); - } -} - -test "TraceSampler: hash_seed affects sampling" { - const config1 = TraceSamplingConfig{ - .percentage = 50.0, - .mode = .SAMPLING_MODE_HASH_SEED, - .sampling_precision = null, - .hash_seed = 0, - .fail_closed = null, - }; - const config2 = TraceSamplingConfig{ - .percentage = 50.0, - .mode = .SAMPLING_MODE_HASH_SEED, - .sampling_precision = null, - .hash_seed = 12345, - .fail_closed = null, - }; - - const sampler1 = TraceSampler.init(&config1); - const sampler2 = TraceSampler.init(&config2); - - // Different hash seeds may produce different results for some trace IDs - var different_count: u32 = 0; - for (0..100) |i| { - var trace_id = [_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - trace_id[15] = @intCast(i); - - const r1 = sampler1.sample(&trace_id, ""); - const r2 = sampler2.sample(&trace_id, ""); - - if (r1.keep != r2.keep) different_count += 1; - } - - // With different seeds, we expect some results to differ - try testing.expect(different_count > 0); -} - -test "TraceSampler: approximate distribution for 50%" { - const config = TraceSamplingConfig{ - .percentage = 50.0, - .mode = null, - .sampling_precision = null, - .hash_seed = null, - .fail_closed = null, - }; - const sampler = TraceSampler.init(&config); - - var kept: u32 = 0; - const total: u32 = 10000; - - for (0..total) |i| { - var trace_id = [_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - // Vary the last bytes - trace_id[14] = @intCast((i >> 8) & 0xff); - trace_id[15] = @intCast(i & 0xff); - - const result = sampler.sample(&trace_id, ""); - if (result.keep) kept += 1; - } - - const ratio = @as(f64, @floatFromInt(kept)) / @as(f64, @floatFromInt(total)); - try testing.expect(ratio > 0.45 and ratio < 0.55); -} - -test "TraceSampler: threshold calculation" { - // 100% = threshold 0 (keep all) - try testing.expectEqual(@as(u64, 0), TraceSampler.calculateThreshold(100.0)); - - // 0% = threshold MAX (keep none) - try testing.expectEqual(MAX_56BIT, TraceSampler.calculateThreshold(0.0)); - - // 50% = threshold is half of MAX - const half_threshold = TraceSampler.calculateThreshold(50.0); - const expected_half = MAX_56BIT / 2; - try testing.expect(half_threshold > expected_half - 1000 and half_threshold < expected_half + 1000); -} - -test "parseThresholdFromTracestate: empty" { - try testing.expectEqual(@as(?u64, null), parseThresholdFromTracestate("")); -} - -test "parseThresholdFromTracestate: valid ot threshold" { - // "ot=th:8" means threshold 0x80000000000000 (8 shifted to fill 56 bits) - const result = parseThresholdFromTracestate("ot=th:8"); - try testing.expect(result != null); - try testing.expectEqual(@as(u64, 0x80000000000000), result.?); -} - -test "parseThresholdFromTracestate: multiple entries" { - const result = parseThresholdFromTracestate("vendor1=val1,ot=th:4,vendor2=val2"); - try testing.expect(result != null); - try testing.expectEqual(@as(u64, 0x40000000000000), result.?); -} - -test "parseHexThreshold: single digit" { - try testing.expectEqual(@as(?u64, 0x10000000000000), parseHexThreshold("1")); - try testing.expectEqual(@as(?u64, 0x80000000000000), parseHexThreshold("8")); - try testing.expectEqual(@as(?u64, 0xf0000000000000), parseHexThreshold("f")); -} - -test "parseHexThreshold: multiple digits" { - try testing.expectEqual(@as(?u64, 0x12000000000000), parseHexThreshold("12")); - try testing.expectEqual(@as(?u64, 0x12340000000000), parseHexThreshold("1234")); -} - -test "updateTracestateInPlace: empty tracestate" { - var buf: [MAX_TRACESTATE_LEN]u8 = undefined; - const result = updateTracestateInPlace(&buf, "", "8"); - try testing.expect(result != null); - try testing.expectEqualStrings("ot=th:8", result.?); -} - -test "updateTracestateInPlace: existing entries preserved" { - var buf: [MAX_TRACESTATE_LEN]u8 = undefined; - const result = updateTracestateInPlace(&buf, "vendor1=val1,vendor2=val2", "8"); - try testing.expect(result != null); - try testing.expectEqualStrings("ot=th:8,vendor1=val1,vendor2=val2", result.?); -} - -test "updateTracestateInPlace: existing ot entry replaced" { - var buf: [MAX_TRACESTATE_LEN]u8 = undefined; - const result = updateTracestateInPlace(&buf, "ot=th:4,vendor1=val1", "8"); - try testing.expect(result != null); - try testing.expectEqualStrings("ot=th:8,vendor1=val1", result.?); -} - -test "updateTracestateInPlace: ot entry moved to front" { - var buf: [MAX_TRACESTATE_LEN]u8 = undefined; - const result = updateTracestateInPlace(&buf, "vendor1=val1,ot=th:4,vendor2=val2", "8"); - try testing.expect(result != null); - try testing.expectEqualStrings("ot=th:8,vendor1=val1,vendor2=val2", result.?); -} - -test "updateTracestateInPlace: buffer too small returns null" { - var buf: [5]u8 = undefined; // Too small to fit "ot=th:8" - const result = updateTracestateInPlace(&buf, "", "8"); - try testing.expect(result == null); -} - -test "thresholdHexFromPercentage: 50%" { - const hex = thresholdHexFromPercentage(50.0, 4); - // 50% means threshold = 2^55 = 0x80000000000000 - // Encoded with 4 digits precision (encodes top 4 nibbles) - // The encoding extracts nibbles from the threshold by shifting - try testing.expect(hex.len > 0); - try testing.expect(hex[0] == '8'); // First nibble is 8 -} - -test "thresholdHexFromPercentage: 100%" { - const hex = thresholdHexFromPercentage(100.0, 4); - // 100% means threshold = 0 - try testing.expectEqualStrings("0", hex); -} diff --git a/src/policy/types.zig b/src/policy/types.zig deleted file mode 100644 index 43ae5f0..0000000 --- a/src/policy/types.zig +++ /dev/null @@ -1,442 +0,0 @@ -const std = @import("std"); -const proto = @import("proto"); -const provider_http = @import("./provider_http.zig"); - -pub const Header = provider_http.Header; -pub const PolicyStage = proto.policy.PolicyStage; - -// ============================================================================= -// TelemetryType - Distinguishes between log and metric telemetry -// ============================================================================= - -/// Type of telemetry being evaluated -pub const TelemetryType = enum { - /// Log telemetry (OTLP logs, Datadog logs, etc.) - log, - /// Metric telemetry (Prometheus, OTLP metrics, etc.) - metric, - /// Trace telemetry (OTLP traces only - Datadog traces not supported) - trace, -}; - -// ============================================================================= -// Service and Provider Configuration -// ============================================================================= - -/// Service metadata for identifying this edge instance -pub const ServiceMetadata = struct { - /// Service name (e.g., "tero-edge") - name: []const u8 = "tero-edge", - /// Service namespace (e.g., "tero") - namespace: []const u8 = "tero", - /// Service version (e.g., "0.1.0", defaults to "latest") - version: []const u8 = "latest", - /// Service instance ID - generated at startup, not configurable - /// This field is set by the runtime, not from config - instance_id: []const u8 = "", - /// Supported policy stages for this service. - /// Different binaries support different stages (e.g., OTLP supports traces, Datadog does not). - supported_stages: []const PolicyStage = &.{}, -}; - -/// Provider type enumeration -pub const ProviderType = enum { - file, - http, -}; - -/// Configuration for a policy provider -pub const ProviderConfig = struct { - /// Unique identifier for this provider (used to track which policies came from where) - id: []const u8 = "", - type: ProviderType = .file, - // For file provider - path: ?[]const u8 = null, - // For http provider - url: ?[]const u8 = null, - poll_interval: ?u64 = null, // seconds - headers: []const Header = &.{}, // custom headers for http provider -}; - -// ============================================================================= -// Field Reference Types -// ============================================================================= - -const LogRemove = proto.policy.LogRemove; -const LogRedact = proto.policy.LogRedact; -const LogRename = proto.policy.LogRename; -const LogAdd = proto.policy.LogAdd; -const LogMatcher = proto.policy.LogMatcher; -const LogField = proto.policy.LogField; -const LogSampleKey = proto.policy.LogSampleKey; -const MetricMatcher = proto.policy.MetricMatcher; -const MetricField = proto.policy.MetricField; -const AttributePath = proto.policy.AttributePath; - -/// Reference to a field for accessor/mutator operations. -/// Attribute fields now use AttributePath for nested attribute access (v1.2.0). -pub const FieldRef = union(enum) { - log_field: LogField, - /// AttributePath for nested log attribute access (e.g., path: ["http", "method"]) - log_attribute: AttributePath, - /// AttributePath for nested resource attribute access - resource_attribute: AttributePath, - /// AttributePath for nested scope attribute access - scope_attribute: AttributePath, - - pub fn fromRemoveField(field: ?LogRemove.field_union) ?FieldRef { - const f = field orelse return null; - return switch (f) { - .log_field => |v| .{ .log_field = v }, - .log_attribute => |v| .{ .log_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - pub fn fromRedactField(field: ?LogRedact.field_union) ?FieldRef { - const f = field orelse return null; - return switch (f) { - .log_field => |v| .{ .log_field = v }, - .log_attribute => |v| .{ .log_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - pub fn fromRenameFrom(from: ?LogRename.from_union) ?FieldRef { - const f = from orelse return null; - return switch (f) { - .from_log_field => |v| .{ .log_field = v }, - .from_log_attribute => |v| .{ .log_attribute = v }, - .from_resource_attribute => |v| .{ .resource_attribute = v }, - .from_scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - pub fn fromAddField(field: ?LogAdd.field_union) ?FieldRef { - const f = field orelse return null; - return switch (f) { - .log_field => |v| .{ .log_field = v }, - .log_attribute => |v| .{ .log_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - pub fn fromMatcherField(field: ?LogMatcher.field_union) ?FieldRef { - const f = field orelse return null; - return switch (f) { - .log_field => |v| .{ .log_field = v }, - .log_attribute => |v| .{ .log_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - pub fn fromSampleKeyField(field: ?LogSampleKey.field_union) ?FieldRef { - const f = field orelse return null; - return switch (f) { - .log_field => |v| .{ .log_field = v }, - .log_attribute => |v| .{ .log_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - }; - } - - /// Check if this field ref requires a key (attribute-based fields) - pub fn isKeyed(self: FieldRef) bool { - return switch (self) { - .log_attribute, .resource_attribute, .scope_attribute => true, - .log_field => false, - }; - } - - /// Get the path for attribute-based fields, empty slice for log_field. - /// For backward compatibility, use getKey() which returns first segment as string. - pub fn getPath(self: FieldRef) []const []const u8 { - return switch (self) { - .log_attribute => |attr| attr.path.items, - .resource_attribute => |attr| attr.path.items, - .scope_attribute => |attr| attr.path.items, - .log_field => &.{}, - }; - } - - /// Get the key for attribute-based fields (first path segment), empty string for log_field. - /// For nested paths, this returns the first segment only - use getPath() for full path. - pub fn getKey(self: FieldRef) []const u8 { - const path = self.getPath(); - if (path.len > 0) return path[0]; - return ""; - } -}; - -// ============================================================================= -// Metric Field Reference Types -// ============================================================================= - -const MetricType = proto.policy.MetricType; -const AggregationTemporality = proto.policy.AggregationTemporality; - -/// Reference to a metric field for accessor/mutator operations. -/// Enum fields (metric_type, aggregation_temporality) are matched as strings via Hyperscan. -/// Attribute fields now use AttributePath for nested attribute access (v1.2.0). -pub const MetricFieldRef = union(enum) { - metric_field: MetricField, - /// AttributePath for nested datapoint attribute access - datapoint_attribute: AttributePath, - /// AttributePath for nested resource attribute access - resource_attribute: AttributePath, - /// AttributePath for nested scope attribute access - scope_attribute: AttributePath, - /// Match on metric type (gauge, sum, histogram, etc.) - /// The field accessor returns the type as a string, matched via regex. - metric_type: void, - /// Match on aggregation temporality (delta, cumulative) - /// The field accessor returns the temporality as a string, matched via regex. - aggregation_temporality: void, - - pub fn fromMatcherField(field: ?MetricMatcher.field_union) ?MetricFieldRef { - const f = field orelse return null; - return switch (f) { - .metric_field => |v| .{ .metric_field = v }, - .datapoint_attribute => |v| .{ .datapoint_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - .metric_type => .{ .metric_type = {} }, - .aggregation_temporality => .{ .aggregation_temporality = {} }, - }; - } - - /// Check if this field ref requires a key (attribute-based fields) - pub fn isKeyed(self: MetricFieldRef) bool { - return switch (self) { - .datapoint_attribute, .resource_attribute, .scope_attribute => true, - .metric_field, .metric_type, .aggregation_temporality => false, - }; - } - - /// Get the path for attribute-based fields, empty slice for simple fields. - pub fn getPath(self: MetricFieldRef) []const []const u8 { - return switch (self) { - .datapoint_attribute => |attr| attr.path.items, - .resource_attribute => |attr| attr.path.items, - .scope_attribute => |attr| attr.path.items, - .metric_field, .metric_type, .aggregation_temporality => &.{}, - }; - } - - /// Get the key for attribute-based fields (first path segment), empty string for simple fields. - pub fn getKey(self: MetricFieldRef) []const u8 { - const path = self.getPath(); - if (path.len > 0) return path[0]; - return ""; - } -}; - -// ============================================================================= -// Log Field Accessor/Mutator Types -// ============================================================================= - -/// Log field accessor function type - returns the value for a given log field -/// Returns null if the field doesn't exist -pub const LogFieldAccessor = *const fn (ctx: *const anyopaque, field: FieldRef) ?[]const u8; - -/// Log field mutator function type - sets, removes, or renames a log field -/// Returns true if the operation succeeded -pub const LogFieldMutator = *const fn (ctx: *anyopaque, op: MutateOp) bool; - -// ============================================================================= -// Metric Field Accessor/Mutator Types -// ============================================================================= - -/// Metric field accessor function type - returns the value for a given metric field -/// Returns null if the field doesn't exist -pub const MetricFieldAccessor = *const fn (ctx: *const anyopaque, field: MetricFieldRef) ?[]const u8; - -/// Metric field mutator function type - sets, removes, or renames a metric field -/// Returns true if the operation succeeded -pub const MetricFieldMutator = *const fn (ctx: *anyopaque, op: MetricMutateOp) bool; - -/// Mutation operation for log field mutator -pub const MutateOp = union(enum) { - /// Remove a field entirely - remove: FieldRef, - /// Set a field to a value (upsert controls insert vs update behavior) - set: struct { - field: FieldRef, - value: []const u8, - upsert: bool, - }, - /// Rename a field (move value from one field to another) - rename: struct { - from: FieldRef, - to: []const u8, - upsert: bool, - }, -}; - -/// Mutation operation for metric field mutator -pub const MetricMutateOp = union(enum) { - /// Remove a field entirely - remove: MetricFieldRef, - /// Set a field to a value (upsert controls insert vs update behavior) - set: struct { - field: MetricFieldRef, - value: []const u8, - upsert: bool, - }, - /// Rename a field (move value from one field to another) - rename: struct { - from: MetricFieldRef, - to: []const u8, - upsert: bool, - }, -}; - -// ============================================================================= -// Trace Field Reference Types -// ============================================================================= - -const TraceMatcher = proto.policy.TraceMatcher; -const TraceField = proto.policy.TraceField; -const SpanKind = proto.policy.SpanKind; -const SpanStatusCode = proto.policy.SpanStatusCode; - -/// Reference to a trace/span field for accessor/mutator operations. -/// Supports all field types from TraceMatcher for comprehensive span matching. -/// Attribute fields now use AttributePath for nested attribute access (v1.2.0). -pub const TraceFieldRef = union(enum) { - /// Simple trace fields (name, trace_id, span_id, etc.) - trace_field: TraceField, - /// AttributePath for nested span attribute access - span_attribute: AttributePath, - /// AttributePath for nested resource attribute access - resource_attribute: AttributePath, - /// AttributePath for nested scope attribute access - scope_attribute: AttributePath, - /// Match on span kind (enum value) - span_kind: SpanKind, - /// Match on span status code (enum value) - span_status: SpanStatusCode, - /// Event name matcher (matches if span contains an event with this name) - event_name: []const u8, - /// AttributePath for nested event attribute access - event_attribute: AttributePath, - /// Link trace ID matcher (matches if span has a link to this trace) - link_trace_id: []const u8, - - pub fn fromMatcherField(field: ?TraceMatcher.field_union) ?TraceFieldRef { - const f = field orelse return null; - return switch (f) { - .trace_field => |v| .{ .trace_field = v }, - .span_attribute => |v| .{ .span_attribute = v }, - .resource_attribute => |v| .{ .resource_attribute = v }, - .scope_attribute => |v| .{ .scope_attribute = v }, - .span_kind => |v| .{ .span_kind = v }, - .span_status => |v| .{ .span_status = v }, - .event_name => |v| .{ .event_name = v }, - .event_attribute => |v| .{ .event_attribute = v }, - .link_trace_id => |v| .{ .link_trace_id = v }, - }; - } - - /// Check if this field ref requires a key (attribute-based fields) - pub fn isKeyed(self: TraceFieldRef) bool { - return switch (self) { - .span_attribute, .resource_attribute, .scope_attribute, .event_attribute => true, - .event_name, .link_trace_id => true, - .trace_field, .span_kind, .span_status => false, - }; - } - - /// Get the path for attribute-based fields, empty slice for simple fields. - pub fn getPath(self: TraceFieldRef) []const []const u8 { - return switch (self) { - .span_attribute => |attr| attr.path.items, - .resource_attribute => |attr| attr.path.items, - .scope_attribute => |attr| attr.path.items, - .event_attribute => |attr| attr.path.items, - .event_name, .link_trace_id, .trace_field, .span_kind, .span_status => &.{}, - }; - } - - /// Get the key for attribute-based fields (first path segment), empty string for simple fields. - /// For event_name and link_trace_id, returns the value itself. - pub fn getKey(self: TraceFieldRef) []const u8 { - return switch (self) { - .span_attribute, .resource_attribute, .scope_attribute, .event_attribute => |attr| blk: { - const path = attr.path.items; - break :blk if (path.len > 0) path[0] else ""; - }, - .event_name => |k| k, - .link_trace_id => |k| k, - .trace_field, .span_kind, .span_status => "", - }; - } -}; - -// ============================================================================= -// Trace Field Accessor/Mutator Types -// ============================================================================= - -/// Trace field accessor function type - returns the value for a given trace/span field -/// Returns null if the field doesn't exist -pub const TraceFieldAccessor = *const fn (ctx: *const anyopaque, field: TraceFieldRef) ?[]const u8; - -/// Trace field mutator function type - sets, removes, or renames a trace/span field -/// Returns true if the operation succeeded -pub const TraceFieldMutator = *const fn (ctx: *anyopaque, op: TraceMutateOp) bool; - -/// Mutation operation for trace field mutator -pub const TraceMutateOp = union(enum) { - /// Remove a field entirely - remove: TraceFieldRef, - /// Set a field to a value (upsert controls insert vs update behavior) - set: struct { - field: TraceFieldRef, - value: []const u8, - upsert: bool, - }, - /// Rename a field (move value from one field to another) - rename: struct { - from: TraceFieldRef, - to: []const u8, - upsert: bool, - }, -}; - -// ============================================================================= -// Transform Result -// ============================================================================= - -/// Result of applying transforms to a log record. -/// Tracks both attempted and applied counts for each transform stage. -/// Used for reporting transform hit/miss statistics. -pub const TransformResult = struct { - /// Number of remove operations attempted - removes_attempted: usize = 0, - /// Number of remove operations applied (hits) - removes_applied: usize = 0, - /// Number of redact operations attempted - redacts_attempted: usize = 0, - /// Number of redact operations applied (hits) - redacts_applied: usize = 0, - /// Number of rename operations attempted - renames_attempted: usize = 0, - /// Number of rename operations applied (hits) - renames_applied: usize = 0, - /// Number of add operations attempted - adds_attempted: usize = 0, - /// Number of add operations applied (hits) - adds_applied: usize = 0, - - pub fn totalApplied(self: TransformResult) usize { - return self.removes_applied + self.redacts_applied + self.renames_applied + self.adds_applied; - } - - pub fn totalAttempted(self: TransformResult) usize { - return self.removes_attempted + self.redacts_attempted + self.renames_attempted + self.adds_attempted; - } -}; diff --git a/src/prometheus/field_accessor.zig b/src/prometheus/field_accessor.zig index 91f5fb4..4f549b4 100644 --- a/src/prometheus/field_accessor.zig +++ b/src/prometheus/field_accessor.zig @@ -12,7 +12,7 @@ const std = @import("std"); const proto = @import("proto"); const line_parser = @import("line_parser.zig"); -const policy = @import("../policy/root.zig"); +const policy = @import("policy_zig"); const MetricFieldRef = policy.MetricFieldRef; const MetricField = proto.policy.MetricField; diff --git a/src/prometheus/streaming_filter.zig b/src/prometheus/streaming_filter.zig index ade10b7..11c3260 100644 --- a/src/prometheus/streaming_filter.zig +++ b/src/prometheus/streaming_filter.zig @@ -14,8 +14,8 @@ const std = @import("std"); const proto = @import("proto"); const line_parser = @import("line_parser.zig"); const field_accessor = @import("field_accessor.zig"); -const policy = @import("../policy/root.zig"); -const o11y = @import("../observability/root.zig"); +const policy = @import("policy_zig"); +const o11y = @import("o11y"); const AttributePath = proto.policy.AttributePath; diff --git a/src/prometheus_main.zig b/src/prometheus_main.zig index 6f7396e..6899808 100644 --- a/src/prometheus_main.zig +++ b/src/prometheus_main.zig @@ -27,7 +27,7 @@ const policy = edge.policy; const ProxyConfig = config_types.ProxyConfig; -const o11y = @import("observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const StdLogAdapter = o11y.StdLogAdapter; const Level = o11y.Level; diff --git a/src/proto/google/api.pb.zig b/src/proto/google/api.pb.zig deleted file mode 100644 index 738879d..0000000 --- a/src/proto/google/api.pb.zig +++ /dev/null @@ -1,526 +0,0 @@ -// Code generated by protoc-gen-zig -///! package google.api -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package google.protobuf -const google_protobuf = @import("protobuf.pb.zig"); - -/// Defines the HTTP configuration for an API service. It contains a list of -/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -/// to one or more HTTP REST API methods. -pub const Http = struct { - rules: std.ArrayListUnmanaged(HttpRule) = .empty, - fully_decode_reserved_expansion: bool = false, - - pub const _desc_table = .{ - .rules = fd(1, .{ .repeated = .submessage }), - .fully_decode_reserved_expansion = fd(2, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// gRPC Transcoding -/// -/// gRPC Transcoding is a feature for mapping between a gRPC method and one or -/// more HTTP REST endpoints. It allows developers to build a single API service -/// that supports both gRPC APIs and REST APIs. Many systems, including [Google -/// APIs](https://github.com/googleapis/googleapis), -/// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -/// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -/// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -/// and use it for large scale production services. -/// -/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -/// how different portions of the gRPC request message are mapped to the URL -/// path, URL query parameters, and HTTP request body. It also controls how the -/// gRPC response message is mapped to the HTTP response body. `HttpRule` is -/// typically specified as an `google.api.http` annotation on the gRPC method. -/// -/// Each mapping specifies a URL path template and an HTTP method. The path -/// template may refer to one or more fields in the gRPC request message, as long -/// as each field is a non-repeated field with a primitive (non-message) type. -/// The path template controls how fields of the request message are mapped to -/// the URL path. -/// -/// Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/{name=messages/*}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string name = 1; // Mapped to URL path. -/// } -/// message Message { -/// string text = 1; // The resource content. -/// } -/// -/// This enables an HTTP REST to gRPC mapping as below: -/// -/// - HTTP: `GET /v1/messages/123456` -/// - gRPC: `GetMessage(name: "messages/123456")` -/// -/// Any fields in the request message which are not bound by the path template -/// automatically become HTTP query parameters if there is no HTTP request body. -/// For example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get:"/v1/messages/{message_id}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// message SubMessage { -/// string subfield = 1; -/// } -/// string message_id = 1; // Mapped to URL path. -/// int64 revision = 2; // Mapped to URL query parameter `revision`. -/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -/// } -/// -/// This enables a HTTP JSON to RPC mapping as below: -/// -/// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` -/// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: -/// SubMessage(subfield: "foo"))` -/// -/// Note that fields which are mapped to URL query parameters must have a -/// primitive type or a repeated primitive type or a non-repeated message type. -/// In the case of a repeated type, the parameter can be repeated in the URL -/// as `...?param=A¶m=B`. In the case of a message type, each field of the -/// message is mapped to a separate parameter, such as -/// `...?foo.a=A&foo.b=B&foo.c=C`. -/// -/// For HTTP methods that allow a request body, the `body` field -/// specifies the mapping. Consider a REST update method on the -/// message resource collection: -/// -/// service Messaging { -/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "message" -/// }; -/// } -/// } -/// message UpdateMessageRequest { -/// string message_id = 1; // mapped to the URL -/// Message message = 2; // mapped to the body -/// } -/// -/// The following HTTP JSON to RPC mapping is enabled, where the -/// representation of the JSON in the request body is determined by -/// protos JSON encoding: -/// -/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -/// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` -/// -/// The special name `*` can be used in the body mapping to define that -/// every field not bound by the path template should be mapped to the -/// request body. This enables the following alternative definition of -/// the update method: -/// -/// service Messaging { -/// rpc UpdateMessage(Message) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "*" -/// }; -/// } -/// } -/// message Message { -/// string message_id = 1; -/// string text = 2; -/// } -/// -/// -/// The following HTTP JSON to RPC mapping is enabled: -/// -/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -/// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` -/// -/// Note that when using `*` in the body mapping, it is not possible to -/// have HTTP parameters, as all fields not bound by the path end in -/// the body. This makes this option more rarely used in practice when -/// defining REST APIs. The common usage of `*` is in custom methods -/// which don't use the URL at all for transferring data. -/// -/// It is possible to define multiple HTTP methods for one RPC by using -/// the `additional_bindings` option. Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/messages/{message_id}" -/// additional_bindings { -/// get: "/v1/users/{user_id}/messages/{message_id}" -/// } -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string message_id = 1; -/// string user_id = 2; -/// } -/// -/// This enables the following two alternative HTTP JSON to RPC mappings: -/// -/// - HTTP: `GET /v1/messages/123456` -/// - gRPC: `GetMessage(message_id: "123456")` -/// -/// - HTTP: `GET /v1/users/me/messages/123456` -/// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` -/// -/// Rules for HTTP mapping -/// -/// 1. Leaf request fields (recursive expansion nested messages in the request -/// message) are classified into three categories: -/// - Fields referred by the path template. They are passed via the URL path. -/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They -/// are passed via the HTTP -/// request body. -/// - All other fields are passed via the URL query parameters, and the -/// parameter name is the field path in the request message. A repeated -/// field can be represented as multiple query parameters under the same -/// name. -/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL -/// query parameter, all fields -/// are passed via URL path and HTTP request body. -/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP -/// request body, all -/// fields are passed via URL path and URL query parameters. -/// -/// Path template syntax -/// -/// Template = "/" Segments [ Verb ] ; -/// Segments = Segment { "/" Segment } ; -/// Segment = "*" | "**" | LITERAL | Variable ; -/// Variable = "{" FieldPath [ "=" Segments ] "}" ; -/// FieldPath = IDENT { "." IDENT } ; -/// Verb = ":" LITERAL ; -/// -/// The syntax `*` matches a single URL path segment. The syntax `**` matches -/// zero or more URL path segments, which must be the last part of the URL path -/// except the `Verb`. -/// -/// The syntax `Variable` matches part of the URL path as specified by its -/// template. A variable template must not contain other variables. If a variable -/// matches a single path segment, its template may be omitted, e.g. `{var}` -/// is equivalent to `{var=*}`. -/// -/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -/// contains any reserved character, such characters should be percent-encoded -/// before the matching. -/// -/// If a variable contains exactly one path segment, such as `"{var}"` or -/// `"{var=*}"`, when such a variable is expanded into a URL path on the client -/// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -/// server side does the reverse decoding. Such variables show up in the -/// [Discovery -/// Document](https://developers.google.com/discovery/v1/reference/apis) as -/// `{var}`. -/// -/// If a variable contains multiple path segments, such as `"{var=foo/*}"` -/// or `"{var=**}"`, when such a variable is expanded into a URL path on the -/// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -/// The server side does the reverse decoding, except "%2F" and "%2f" are left -/// unchanged. Such variables show up in the -/// [Discovery -/// Document](https://developers.google.com/discovery/v1/reference/apis) as -/// `{+var}`. -/// -/// Using gRPC API Service Configuration -/// -/// gRPC API Service Configuration (service config) is a configuration language -/// for configuring a gRPC service to become a user-facing product. The -/// service config is simply the YAML representation of the `google.api.Service` -/// proto message. -/// -/// As an alternative to annotating your proto file, you can configure gRPC -/// transcoding in your service config YAML files. You do this by specifying a -/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -/// effect as the proto annotation. This can be particularly useful if you -/// have a proto that is reused in multiple services. Note that any transcoding -/// specified in the service config will override any matching transcoding -/// configuration in the proto. -/// -/// The following example selects a gRPC method and applies an `HttpRule` to it: -/// -/// http: -/// rules: -/// - selector: example.v1.Messaging.GetMessage -/// get: /v1/messages/{message_id}/{sub.subfield} -/// -/// Special notes -/// -/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -/// proto to JSON conversion must follow the [proto3 -/// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -/// -/// While the single segment variable follows the semantics of -/// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -/// Expansion, the multi segment variable **does not** follow RFC 6570 Section -/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -/// does not expand special characters like `?` and `#`, which would lead -/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -/// for multi segment variables. -/// -/// The path variables **must not** refer to any repeated or mapped field, -/// because client libraries are not capable of handling such variable expansion. -/// -/// The path variables **must not** capture the leading "/" character. The reason -/// is that the most common use case "{var}" does not capture the leading "/" -/// character. For consistency, all path variables must share the same behavior. -/// -/// Repeated message fields must not be mapped to URL query parameters, because -/// no client library can support such complicated mapping. -/// -/// If an API needs to use a JSON array for request or response body, it can map -/// the request or response body to a repeated field. However, some gRPC -/// Transcoding implementations may not support this feature. -pub const HttpRule = struct { - selector: []const u8 = &.{}, - body: []const u8 = &.{}, - response_body: []const u8 = &.{}, - additional_bindings: std.ArrayListUnmanaged(HttpRule) = .empty, - pattern: ?pattern_union = null, - - pub const _pattern_case = enum { - get, - put, - post, - delete, - patch, - custom, - }; - pub const pattern_union = union(_pattern_case) { - get: []const u8, - put: []const u8, - post: []const u8, - delete: []const u8, - patch: []const u8, - custom: CustomHttpPattern, - pub const _desc_table = .{ - .get = fd(2, .{ .scalar = .string }), - .put = fd(3, .{ .scalar = .string }), - .post = fd(4, .{ .scalar = .string }), - .delete = fd(5, .{ .scalar = .string }), - .patch = fd(6, .{ .scalar = .string }), - .custom = fd(8, .submessage), - }; - }; - - pub const _desc_table = .{ - .selector = fd(1, .{ .scalar = .string }), - .body = fd(7, .{ .scalar = .string }), - .response_body = fd(12, .{ .scalar = .string }), - .additional_bindings = fd(11, .{ .repeated = .submessage }), - .pattern = fd(null, .{ .oneof = pattern_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A custom pattern is used for defining custom HTTP verb. -pub const CustomHttpPattern = struct { - kind: []const u8 = &.{}, - path: []const u8 = &.{}, - - pub const _desc_table = .{ - .kind = fd(1, .{ .scalar = .string }), - .path = fd(2, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/google/protobuf.pb.zig b/src/proto/google/protobuf.pb.zig deleted file mode 100644 index d0e0e45..0000000 --- a/src/proto/google/protobuf.pb.zig +++ /dev/null @@ -1,2940 +0,0 @@ -// Code generated by protoc-gen-zig -///! package google.protobuf -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; - -/// The full set of known editions. -pub const Edition = enum(i32) { - EDITION_UNKNOWN = 0, - EDITION_LEGACY = 900, - EDITION_PROTO2 = 998, - EDITION_PROTO3 = 999, - EDITION_2023 = 1000, - EDITION_2024 = 1001, - EDITION_1_TEST_ONLY = 1, - EDITION_2_TEST_ONLY = 2, - EDITION_99997_TEST_ONLY = 99997, - EDITION_99998_TEST_ONLY = 99998, - EDITION_99999_TEST_ONLY = 99999, - EDITION_MAX = 2147483647, - _, -}; - -/// Describes the 'visibility' of a symbol with respect to the proto import -/// system. Symbols can only be imported when the visibility rules do not prevent -/// it (ex: local symbols cannot be imported). Visibility modifiers can only set -/// on `message` and `enum` as they are the only types available to be referenced -/// from other files. -pub const SymbolVisibility = enum(i32) { - VISIBILITY_UNSET = 0, - VISIBILITY_LOCAL = 1, - VISIBILITY_EXPORT = 2, - _, -}; - -/// The protocol compiler can output a FileDescriptorSet containing the .proto -/// files it parses. -pub const FileDescriptorSet = struct { - file: std.ArrayListUnmanaged(FileDescriptorProto) = .empty, - - pub const _desc_table = .{ - .file = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a complete .proto file. -pub const FileDescriptorProto = struct { - name: ?[]const u8 = null, - package: ?[]const u8 = null, - dependency: std.ArrayListUnmanaged([]const u8) = .empty, - public_dependency: std.ArrayListUnmanaged(i32) = .empty, - weak_dependency: std.ArrayListUnmanaged(i32) = .empty, - option_dependency: std.ArrayListUnmanaged([]const u8) = .empty, - message_type: std.ArrayListUnmanaged(DescriptorProto) = .empty, - enum_type: std.ArrayListUnmanaged(EnumDescriptorProto) = .empty, - service: std.ArrayListUnmanaged(ServiceDescriptorProto) = .empty, - extension: std.ArrayListUnmanaged(FieldDescriptorProto) = .empty, - options: ?FileOptions = null, - source_code_info: ?SourceCodeInfo = null, - syntax: ?[]const u8 = null, - edition: ?Edition = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .package = fd(2, .{ .scalar = .string }), - .dependency = fd(3, .{ .repeated = .{ .scalar = .string } }), - .public_dependency = fd(10, .{ .repeated = .{ .scalar = .int32 } }), - .weak_dependency = fd(11, .{ .repeated = .{ .scalar = .int32 } }), - .option_dependency = fd(15, .{ .repeated = .{ .scalar = .string } }), - .message_type = fd(4, .{ .repeated = .submessage }), - .enum_type = fd(5, .{ .repeated = .submessage }), - .service = fd(6, .{ .repeated = .submessage }), - .extension = fd(7, .{ .repeated = .submessage }), - .options = fd(8, .submessage), - .source_code_info = fd(9, .submessage), - .syntax = fd(12, .{ .scalar = .string }), - .edition = fd(14, .@"enum"), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a message type. -pub const DescriptorProto = struct { - name: ?[]const u8 = null, - field: std.ArrayListUnmanaged(FieldDescriptorProto) = .empty, - extension: std.ArrayListUnmanaged(FieldDescriptorProto) = .empty, - nested_type: std.ArrayListUnmanaged(DescriptorProto) = .empty, - enum_type: std.ArrayListUnmanaged(EnumDescriptorProto) = .empty, - extension_range: std.ArrayListUnmanaged(DescriptorProto.ExtensionRange) = .empty, - oneof_decl: std.ArrayListUnmanaged(OneofDescriptorProto) = .empty, - options: ?MessageOptions = null, - reserved_range: std.ArrayListUnmanaged(DescriptorProto.ReservedRange) = .empty, - reserved_name: std.ArrayListUnmanaged([]const u8) = .empty, - visibility: ?SymbolVisibility = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .field = fd(2, .{ .repeated = .submessage }), - .extension = fd(6, .{ .repeated = .submessage }), - .nested_type = fd(3, .{ .repeated = .submessage }), - .enum_type = fd(4, .{ .repeated = .submessage }), - .extension_range = fd(5, .{ .repeated = .submessage }), - .oneof_decl = fd(8, .{ .repeated = .submessage }), - .options = fd(7, .submessage), - .reserved_range = fd(9, .{ .repeated = .submessage }), - .reserved_name = fd(10, .{ .repeated = .{ .scalar = .string } }), - .visibility = fd(11, .@"enum"), - }; - - pub const ExtensionRange = struct { - start: ?i32 = null, - end: ?i32 = null, - options: ?ExtensionRangeOptions = null, - - pub const _desc_table = .{ - .start = fd(1, .{ .scalar = .int32 }), - .end = fd(2, .{ .scalar = .int32 }), - .options = fd(3, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Range of reserved tag numbers. Reserved tag numbers may not be used by - /// fields or extension ranges in the same message. Reserved ranges may - /// not overlap. - pub const ReservedRange = struct { - start: ?i32 = null, - end: ?i32 = null, - - pub const _desc_table = .{ - .start = fd(1, .{ .scalar = .int32 }), - .end = fd(2, .{ .scalar = .int32 }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const ExtensionRangeOptions = struct { - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - declaration: std.ArrayListUnmanaged(ExtensionRangeOptions.Declaration) = .empty, - features: ?FeatureSet = null, - verification: ?ExtensionRangeOptions.VerificationState = .UNVERIFIED, - - pub const _desc_table = .{ - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - .declaration = fd(2, .{ .repeated = .submessage }), - .features = fd(50, .submessage), - .verification = fd(3, .@"enum"), - }; - - /// The verification state of the extension range. - pub const VerificationState = enum(i32) { - DECLARATION = 0, - UNVERIFIED = 1, - _, - }; - - pub const Declaration = struct { - number: ?i32 = null, - full_name: ?[]const u8 = null, - type: ?[]const u8 = null, - reserved: ?bool = null, - repeated: ?bool = null, - - pub const _desc_table = .{ - .number = fd(1, .{ .scalar = .int32 }), - .full_name = fd(2, .{ .scalar = .string }), - .type = fd(3, .{ .scalar = .string }), - .reserved = fd(5, .{ .scalar = .bool }), - .repeated = fd(6, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a field within a message. -pub const FieldDescriptorProto = struct { - name: ?[]const u8 = null, - number: ?i32 = null, - label: ?FieldDescriptorProto.Label = null, - type: ?FieldDescriptorProto.Type = null, - type_name: ?[]const u8 = null, - extendee: ?[]const u8 = null, - default_value: ?[]const u8 = null, - oneof_index: ?i32 = null, - json_name: ?[]const u8 = null, - options: ?FieldOptions = null, - proto3_optional: ?bool = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .number = fd(3, .{ .scalar = .int32 }), - .label = fd(4, .@"enum"), - .type = fd(5, .@"enum"), - .type_name = fd(6, .{ .scalar = .string }), - .extendee = fd(2, .{ .scalar = .string }), - .default_value = fd(7, .{ .scalar = .string }), - .oneof_index = fd(9, .{ .scalar = .int32 }), - .json_name = fd(10, .{ .scalar = .string }), - .options = fd(8, .submessage), - .proto3_optional = fd(17, .{ .scalar = .bool }), - }; - - pub const Type = enum(i32) { - TYPE_DOUBLE = 1, - TYPE_FLOAT = 2, - TYPE_INT64 = 3, - TYPE_UINT64 = 4, - TYPE_INT32 = 5, - TYPE_FIXED64 = 6, - TYPE_FIXED32 = 7, - TYPE_BOOL = 8, - TYPE_STRING = 9, - TYPE_GROUP = 10, - TYPE_MESSAGE = 11, - TYPE_BYTES = 12, - TYPE_UINT32 = 13, - TYPE_ENUM = 14, - TYPE_SFIXED32 = 15, - TYPE_SFIXED64 = 16, - TYPE_SINT32 = 17, - TYPE_SINT64 = 18, - _, - }; - - pub const Label = enum(i32) { - LABEL_OPTIONAL = 1, - LABEL_REPEATED = 3, - LABEL_REQUIRED = 2, - _, - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a oneof. -pub const OneofDescriptorProto = struct { - name: ?[]const u8 = null, - options: ?OneofOptions = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .options = fd(2, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes an enum type. -pub const EnumDescriptorProto = struct { - name: ?[]const u8 = null, - value: std.ArrayListUnmanaged(EnumValueDescriptorProto) = .empty, - options: ?EnumOptions = null, - reserved_range: std.ArrayListUnmanaged(EnumDescriptorProto.EnumReservedRange) = .empty, - reserved_name: std.ArrayListUnmanaged([]const u8) = .empty, - visibility: ?SymbolVisibility = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .value = fd(2, .{ .repeated = .submessage }), - .options = fd(3, .submessage), - .reserved_range = fd(4, .{ .repeated = .submessage }), - .reserved_name = fd(5, .{ .repeated = .{ .scalar = .string } }), - .visibility = fd(6, .@"enum"), - }; - - /// Range of reserved numeric values. Reserved values may not be used by - /// entries in the same enum. Reserved ranges may not overlap. - /// - /// Note that this is distinct from DescriptorProto.ReservedRange in that it - /// is inclusive such that it can appropriately represent the entire int32 - /// domain. - pub const EnumReservedRange = struct { - start: ?i32 = null, - end: ?i32 = null, - - pub const _desc_table = .{ - .start = fd(1, .{ .scalar = .int32 }), - .end = fd(2, .{ .scalar = .int32 }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a value within an enum. -pub const EnumValueDescriptorProto = struct { - name: ?[]const u8 = null, - number: ?i32 = null, - options: ?EnumValueOptions = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .number = fd(2, .{ .scalar = .int32 }), - .options = fd(3, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a service. -pub const ServiceDescriptorProto = struct { - name: ?[]const u8 = null, - method: std.ArrayListUnmanaged(MethodDescriptorProto) = .empty, - options: ?ServiceOptions = null, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .method = fd(2, .{ .repeated = .submessage }), - .options = fd(3, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes a method of a service. -pub const MethodDescriptorProto = struct { - name: ?[]const u8 = null, - input_type: ?[]const u8 = null, - output_type: ?[]const u8 = null, - options: ?MethodOptions = null, - client_streaming: ?bool = false, - server_streaming: ?bool = false, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .input_type = fd(2, .{ .scalar = .string }), - .output_type = fd(3, .{ .scalar = .string }), - .options = fd(4, .submessage), - .client_streaming = fd(5, .{ .scalar = .bool }), - .server_streaming = fd(6, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const FileOptions = struct { - java_package: ?[]const u8 = null, - java_outer_classname: ?[]const u8 = null, - java_multiple_files: ?bool = false, - java_generate_equals_and_hash: ?bool = null, - java_string_check_utf8: ?bool = false, - optimize_for: ?FileOptions.OptimizeMode = .SPEED, - go_package: ?[]const u8 = null, - cc_generic_services: ?bool = false, - java_generic_services: ?bool = false, - py_generic_services: ?bool = false, - deprecated: ?bool = false, - cc_enable_arenas: ?bool = true, - objc_class_prefix: ?[]const u8 = null, - csharp_namespace: ?[]const u8 = null, - swift_prefix: ?[]const u8 = null, - php_class_prefix: ?[]const u8 = null, - php_namespace: ?[]const u8 = null, - php_metadata_namespace: ?[]const u8 = null, - ruby_package: ?[]const u8 = null, - features: ?FeatureSet = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .java_package = fd(1, .{ .scalar = .string }), - .java_outer_classname = fd(8, .{ .scalar = .string }), - .java_multiple_files = fd(10, .{ .scalar = .bool }), - .java_generate_equals_and_hash = fd(20, .{ .scalar = .bool }), - .java_string_check_utf8 = fd(27, .{ .scalar = .bool }), - .optimize_for = fd(9, .@"enum"), - .go_package = fd(11, .{ .scalar = .string }), - .cc_generic_services = fd(16, .{ .scalar = .bool }), - .java_generic_services = fd(17, .{ .scalar = .bool }), - .py_generic_services = fd(18, .{ .scalar = .bool }), - .deprecated = fd(23, .{ .scalar = .bool }), - .cc_enable_arenas = fd(31, .{ .scalar = .bool }), - .objc_class_prefix = fd(36, .{ .scalar = .string }), - .csharp_namespace = fd(37, .{ .scalar = .string }), - .swift_prefix = fd(39, .{ .scalar = .string }), - .php_class_prefix = fd(40, .{ .scalar = .string }), - .php_namespace = fd(41, .{ .scalar = .string }), - .php_metadata_namespace = fd(44, .{ .scalar = .string }), - .ruby_package = fd(45, .{ .scalar = .string }), - .features = fd(50, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Generated classes can be optimized for speed or code size. - pub const OptimizeMode = enum(i32) { - SPEED = 1, - CODE_SIZE = 2, - LITE_RUNTIME = 3, - _, - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const MessageOptions = struct { - message_set_wire_format: ?bool = false, - no_standard_descriptor_accessor: ?bool = false, - deprecated: ?bool = false, - map_entry: ?bool = null, - deprecated_legacy_json_field_conflicts: ?bool = null, - features: ?FeatureSet = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .message_set_wire_format = fd(1, .{ .scalar = .bool }), - .no_standard_descriptor_accessor = fd(2, .{ .scalar = .bool }), - .deprecated = fd(3, .{ .scalar = .bool }), - .map_entry = fd(7, .{ .scalar = .bool }), - .deprecated_legacy_json_field_conflicts = fd(11, .{ .scalar = .bool }), - .features = fd(12, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const FieldOptions = struct { - ctype: ?FieldOptions.CType = .STRING, - @"packed": ?bool = null, - jstype: ?FieldOptions.JSType = .JS_NORMAL, - lazy: ?bool = false, - unverified_lazy: ?bool = false, - deprecated: ?bool = false, - weak: ?bool = false, - debug_redact: ?bool = false, - retention: ?FieldOptions.OptionRetention = null, - targets: std.ArrayListUnmanaged(FieldOptions.OptionTargetType) = .empty, - edition_defaults: std.ArrayListUnmanaged(FieldOptions.EditionDefault) = .empty, - features: ?FeatureSet = null, - feature_support: ?FieldOptions.FeatureSupport = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .ctype = fd(1, .@"enum"), - .@"packed" = fd(2, .{ .scalar = .bool }), - .jstype = fd(6, .@"enum"), - .lazy = fd(5, .{ .scalar = .bool }), - .unverified_lazy = fd(15, .{ .scalar = .bool }), - .deprecated = fd(3, .{ .scalar = .bool }), - .weak = fd(10, .{ .scalar = .bool }), - .debug_redact = fd(16, .{ .scalar = .bool }), - .retention = fd(17, .@"enum"), - .targets = fd(19, .{ .repeated = .@"enum" }), - .edition_defaults = fd(20, .{ .repeated = .submessage }), - .features = fd(21, .submessage), - .feature_support = fd(22, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - pub const CType = enum(i32) { - STRING = 0, - CORD = 1, - STRING_PIECE = 2, - _, - }; - - pub const JSType = enum(i32) { - JS_NORMAL = 0, - JS_STRING = 1, - JS_NUMBER = 2, - _, - }; - - /// If set to RETENTION_SOURCE, the option will be omitted from the binary. - pub const OptionRetention = enum(i32) { - RETENTION_UNKNOWN = 0, - RETENTION_RUNTIME = 1, - RETENTION_SOURCE = 2, - _, - }; - - /// This indicates the types of entities that the field may apply to when used - /// as an option. If it is unset, then the field may be freely used as an - /// option on any kind of entity. - pub const OptionTargetType = enum(i32) { - TARGET_TYPE_UNKNOWN = 0, - TARGET_TYPE_FILE = 1, - TARGET_TYPE_EXTENSION_RANGE = 2, - TARGET_TYPE_MESSAGE = 3, - TARGET_TYPE_FIELD = 4, - TARGET_TYPE_ONEOF = 5, - TARGET_TYPE_ENUM = 6, - TARGET_TYPE_ENUM_ENTRY = 7, - TARGET_TYPE_SERVICE = 8, - TARGET_TYPE_METHOD = 9, - _, - }; - - pub const EditionDefault = struct { - edition: ?Edition = null, - value: ?[]const u8 = null, - - pub const _desc_table = .{ - .edition = fd(3, .@"enum"), - .value = fd(2, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Information about the support window of a feature. - pub const FeatureSupport = struct { - edition_introduced: ?Edition = null, - edition_deprecated: ?Edition = null, - deprecation_warning: ?[]const u8 = null, - edition_removed: ?Edition = null, - - pub const _desc_table = .{ - .edition_introduced = fd(1, .@"enum"), - .edition_deprecated = fd(2, .@"enum"), - .deprecation_warning = fd(3, .{ .scalar = .string }), - .edition_removed = fd(4, .@"enum"), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const OneofOptions = struct { - features: ?FeatureSet = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .features = fd(1, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const EnumOptions = struct { - allow_alias: ?bool = null, - deprecated: ?bool = false, - deprecated_legacy_json_field_conflicts: ?bool = null, - features: ?FeatureSet = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .allow_alias = fd(2, .{ .scalar = .bool }), - .deprecated = fd(3, .{ .scalar = .bool }), - .deprecated_legacy_json_field_conflicts = fd(6, .{ .scalar = .bool }), - .features = fd(7, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const EnumValueOptions = struct { - deprecated: ?bool = false, - features: ?FeatureSet = null, - debug_redact: ?bool = false, - feature_support: ?FieldOptions.FeatureSupport = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .deprecated = fd(1, .{ .scalar = .bool }), - .features = fd(2, .submessage), - .debug_redact = fd(3, .{ .scalar = .bool }), - .feature_support = fd(4, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const ServiceOptions = struct { - features: ?FeatureSet = null, - deprecated: ?bool = false, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .features = fd(34, .submessage), - .deprecated = fd(33, .{ .scalar = .bool }), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -pub const MethodOptions = struct { - deprecated: ?bool = false, - idempotency_level: ?MethodOptions.IdempotencyLevel = .IDEMPOTENCY_UNKNOWN, - features: ?FeatureSet = null, - uninterpreted_option: std.ArrayListUnmanaged(UninterpretedOption) = .empty, - - pub const _desc_table = .{ - .deprecated = fd(33, .{ .scalar = .bool }), - .idempotency_level = fd(34, .@"enum"), - .features = fd(35, .submessage), - .uninterpreted_option = fd(999, .{ .repeated = .submessage }), - }; - - /// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - /// or neither? HTTP based RPC implementation may choose GET verb for safe - /// methods, and PUT verb for idempotent methods instead of the default POST. - pub const IdempotencyLevel = enum(i32) { - IDEMPOTENCY_UNKNOWN = 0, - NO_SIDE_EFFECTS = 1, - IDEMPOTENT = 2, - _, - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A message representing a option the parser does not recognize. This only -/// appears in options protos created by the compiler::Parser class. -/// DescriptorPool resolves these when building Descriptor objects. Therefore, -/// options protos in descriptor objects (e.g. returned by Descriptor::options(), -/// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -/// in them. -pub const UninterpretedOption = struct { - name: std.ArrayListUnmanaged(UninterpretedOption.NamePart) = .empty, - identifier_value: ?[]const u8 = null, - positive_int_value: ?u64 = null, - negative_int_value: ?i64 = null, - double_value: ?f64 = null, - string_value: ?[]const u8 = null, - aggregate_value: ?[]const u8 = null, - - pub const _desc_table = .{ - .name = fd(2, .{ .repeated = .submessage }), - .identifier_value = fd(3, .{ .scalar = .string }), - .positive_int_value = fd(4, .{ .scalar = .uint64 }), - .negative_int_value = fd(5, .{ .scalar = .int64 }), - .double_value = fd(6, .{ .scalar = .double }), - .string_value = fd(7, .{ .scalar = .bytes }), - .aggregate_value = fd(8, .{ .scalar = .string }), - }; - - /// The name of the uninterpreted option. Each string represents a segment in - /// a dot-separated name. is_extension is true iff a segment represents an - /// extension (denoted with parentheses in options specs in .proto files). - /// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents - /// "foo.(bar.baz).moo". - pub const NamePart = struct { - name_part: []const u8, - is_extension: bool, - - pub const _desc_table = .{ - .name_part = fd(1, .{ .scalar = .string }), - .is_extension = fd(2, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// TODO Enums in C++ gencode (and potentially other languages) are -/// not well scoped. This means that each of the feature enums below can clash -/// with each other. The short names we've chosen maximize call-site -/// readability, but leave us very open to this scenario. A future feature will -/// be designed and implemented to handle this, hopefully before we ever hit a -/// conflict here. -pub const FeatureSet = struct { - field_presence: ?FeatureSet.FieldPresence = null, - enum_type: ?FeatureSet.EnumType = null, - repeated_field_encoding: ?FeatureSet.RepeatedFieldEncoding = null, - utf8_validation: ?FeatureSet.Utf8Validation = null, - message_encoding: ?FeatureSet.MessageEncoding = null, - json_format: ?FeatureSet.JsonFormat = null, - enforce_naming_style: ?FeatureSet.EnforceNamingStyle = null, - default_symbol_visibility: ?FeatureSet.VisibilityFeature.DefaultSymbolVisibility = null, - - pub const _desc_table = .{ - .field_presence = fd(1, .@"enum"), - .enum_type = fd(2, .@"enum"), - .repeated_field_encoding = fd(3, .@"enum"), - .utf8_validation = fd(4, .@"enum"), - .message_encoding = fd(5, .@"enum"), - .json_format = fd(6, .@"enum"), - .enforce_naming_style = fd(7, .@"enum"), - .default_symbol_visibility = fd(8, .@"enum"), - }; - - pub const FieldPresence = enum(i32) { - FIELD_PRESENCE_UNKNOWN = 0, - EXPLICIT = 1, - IMPLICIT = 2, - LEGACY_REQUIRED = 3, - _, - }; - - pub const EnumType = enum(i32) { - ENUM_TYPE_UNKNOWN = 0, - OPEN = 1, - CLOSED = 2, - _, - }; - - pub const RepeatedFieldEncoding = enum(i32) { - REPEATED_FIELD_ENCODING_UNKNOWN = 0, - PACKED = 1, - EXPANDED = 2, - _, - }; - - pub const Utf8Validation = enum(i32) { - UTF8_VALIDATION_UNKNOWN = 0, - VERIFY = 2, - NONE = 3, - _, - }; - - pub const MessageEncoding = enum(i32) { - MESSAGE_ENCODING_UNKNOWN = 0, - LENGTH_PREFIXED = 1, - DELIMITED = 2, - _, - }; - - pub const JsonFormat = enum(i32) { - JSON_FORMAT_UNKNOWN = 0, - ALLOW = 1, - LEGACY_BEST_EFFORT = 2, - _, - }; - - pub const EnforceNamingStyle = enum(i32) { - ENFORCE_NAMING_STYLE_UNKNOWN = 0, - STYLE2024 = 1, - STYLE_LEGACY = 2, - _, - }; - - pub const VisibilityFeature = struct { - pub const _desc_table = .{}; - - pub const DefaultSymbolVisibility = enum(i32) { - DEFAULT_SYMBOL_VISIBILITY_UNKNOWN = 0, - EXPORT_ALL = 1, - EXPORT_TOP_LEVEL = 2, - LOCAL_ALL = 3, - STRICT = 4, - _, - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A compiled specification for the defaults of a set of features. These -/// messages are generated from FeatureSet extensions and can be used to seed -/// feature resolution. The resolution with this object becomes a simple search -/// for the closest matching edition, followed by proto merges. -pub const FeatureSetDefaults = struct { - defaults: std.ArrayListUnmanaged(FeatureSetDefaults.FeatureSetEditionDefault) = .empty, - minimum_edition: ?Edition = null, - maximum_edition: ?Edition = null, - - pub const _desc_table = .{ - .defaults = fd(1, .{ .repeated = .submessage }), - .minimum_edition = fd(4, .@"enum"), - .maximum_edition = fd(5, .@"enum"), - }; - - /// A map from every known edition with a unique set of defaults to its - /// defaults. Not all editions may be contained here. For a given edition, - /// the defaults at the closest matching edition ordered at or before it should - /// be used. This field must be in strict ascending order by edition. - pub const FeatureSetEditionDefault = struct { - edition: ?Edition = null, - overridable_features: ?FeatureSet = null, - fixed_features: ?FeatureSet = null, - - pub const _desc_table = .{ - .edition = fd(3, .@"enum"), - .overridable_features = fd(4, .submessage), - .fixed_features = fd(5, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Encapsulates information about the original source file from which a -/// FileDescriptorProto was generated. -pub const SourceCodeInfo = struct { - location: std.ArrayListUnmanaged(SourceCodeInfo.Location) = .empty, - - pub const _desc_table = .{ - .location = fd(1, .{ .repeated = .submessage }), - }; - - pub const Location = struct { - path: std.ArrayListUnmanaged(i32) = .empty, - span: std.ArrayListUnmanaged(i32) = .empty, - leading_comments: ?[]const u8 = null, - trailing_comments: ?[]const u8 = null, - leading_detached_comments: std.ArrayListUnmanaged([]const u8) = .empty, - - pub const _desc_table = .{ - .path = fd(1, .{ .packed_repeated = .{ .scalar = .int32 } }), - .span = fd(2, .{ .packed_repeated = .{ .scalar = .int32 } }), - .leading_comments = fd(3, .{ .scalar = .string }), - .trailing_comments = fd(4, .{ .scalar = .string }), - .leading_detached_comments = fd(6, .{ .repeated = .{ .scalar = .string } }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Describes the relationship between generated code and its original source -/// file. A GeneratedCodeInfo message is associated with only one generated -/// source file, but may contain references to different source .proto files. -pub const GeneratedCodeInfo = struct { - annotation: std.ArrayListUnmanaged(GeneratedCodeInfo.Annotation) = .empty, - - pub const _desc_table = .{ - .annotation = fd(1, .{ .repeated = .submessage }), - }; - - pub const Annotation = struct { - path: std.ArrayListUnmanaged(i32) = .empty, - source_file: ?[]const u8 = null, - begin: ?i32 = null, - end: ?i32 = null, - semantic: ?GeneratedCodeInfo.Annotation.Semantic = null, - - pub const _desc_table = .{ - .path = fd(1, .{ .packed_repeated = .{ .scalar = .int32 } }), - .source_file = fd(2, .{ .scalar = .string }), - .begin = fd(3, .{ .scalar = .int32 }), - .end = fd(4, .{ .scalar = .int32 }), - .semantic = fd(5, .@"enum"), - }; - - /// Represents the identified object's effect on the element in the original - /// .proto file. - pub const Semantic = enum(i32) { - NONE = 0, - SET = 1, - ALIAS = 2, - _, - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/opentelemetry/proto/common/v1.pb.zig b/src/proto/opentelemetry/proto/common/v1.pb.zig deleted file mode 100644 index 1be6156..0000000 --- a/src/proto/opentelemetry/proto/common/v1.pb.zig +++ /dev/null @@ -1,492 +0,0 @@ -// Code generated by protoc-gen-zig -///! package opentelemetry.proto.common.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; - -/// Represents any type of attribute value. AnyValue may contain a -/// primitive value such as a string or integer or it may contain an arbitrary nested -/// object containing arrays, key-value lists and primitives. -pub const AnyValue = struct { - value: ?value_union = null, - - pub const _value_case = enum { - string_value, - bool_value, - int_value, - double_value, - array_value, - kvlist_value, - bytes_value, - }; - pub const value_union = union(_value_case) { - string_value: []const u8, - bool_value: bool, - int_value: i64, - double_value: f64, - array_value: ArrayValue, - kvlist_value: KeyValueList, - bytes_value: []const u8, - pub const _desc_table = .{ - .string_value = fd(1, .{ .scalar = .string }), - .bool_value = fd(2, .{ .scalar = .bool }), - .int_value = fd(3, .{ .scalar = .int64 }), - .double_value = fd(4, .{ .scalar = .double }), - .array_value = fd(5, .submessage), - .kvlist_value = fd(6, .submessage), - .bytes_value = fd(7, .{ .scalar = .bytes }), - }; - }; - - pub const _desc_table = .{ - .value = fd(null, .{ .oneof = value_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -/// since oneof in AnyValue does not allow repeated fields. -pub const ArrayValue = struct { - values: std.ArrayListUnmanaged(AnyValue) = .empty, - - pub const _desc_table = .{ - .values = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -/// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -/// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -/// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -/// are semantically equivalent. -pub const KeyValueList = struct { - values: std.ArrayListUnmanaged(KeyValue) = .empty, - - pub const _desc_table = .{ - .values = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Represents a key-value pair that is used to store Span attributes, Link -/// attributes, etc. -pub const KeyValue = struct { - key: []const u8 = &.{}, - value: ?AnyValue = null, - - pub const _desc_table = .{ - .key = fd(1, .{ .scalar = .string }), - .value = fd(2, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// InstrumentationScope is a message representing the instrumentation scope information -/// such as the fully qualified name and version. -pub const InstrumentationScope = struct { - name: []const u8 = &.{}, - version: []const u8 = &.{}, - attributes: std.ArrayListUnmanaged(KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .version = fd(2, .{ .scalar = .string }), - .attributes = fd(3, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(4, .{ .scalar = .uint32 }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A reference to an Entity. -/// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. -/// -/// Status: [Development] -pub const EntityRef = struct { - schema_url: []const u8 = &.{}, - type: []const u8 = &.{}, - id_keys: std.ArrayListUnmanaged([]const u8) = .empty, - description_keys: std.ArrayListUnmanaged([]const u8) = .empty, - - pub const _desc_table = .{ - .schema_url = fd(1, .{ .scalar = .string }), - .type = fd(2, .{ .scalar = .string }), - .id_keys = fd(3, .{ .repeated = .{ .scalar = .string } }), - .description_keys = fd(4, .{ .repeated = .{ .scalar = .string } }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/opentelemetry/proto/logs/v1.pb.zig b/src/proto/opentelemetry/proto/logs/v1.pb.zig deleted file mode 100644 index a5a1903..0000000 --- a/src/proto/opentelemetry/proto/logs/v1.pb.zig +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by protoc-gen-zig -///! package opentelemetry.proto.logs.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package opentelemetry.proto.common.v1 -const opentelemetry_proto_common_v1 = @import("../common/v1.pb.zig"); -/// import package opentelemetry.proto.resource.v1 -const opentelemetry_proto_resource_v1 = @import("../resource/v1.pb.zig"); - -/// Possible values for LogRecord.SeverityNumber. -pub const SeverityNumber = enum(i32) { - SEVERITY_NUMBER_UNSPECIFIED = 0, - SEVERITY_NUMBER_TRACE = 1, - SEVERITY_NUMBER_TRACE2 = 2, - SEVERITY_NUMBER_TRACE3 = 3, - SEVERITY_NUMBER_TRACE4 = 4, - SEVERITY_NUMBER_DEBUG = 5, - SEVERITY_NUMBER_DEBUG2 = 6, - SEVERITY_NUMBER_DEBUG3 = 7, - SEVERITY_NUMBER_DEBUG4 = 8, - SEVERITY_NUMBER_INFO = 9, - SEVERITY_NUMBER_INFO2 = 10, - SEVERITY_NUMBER_INFO3 = 11, - SEVERITY_NUMBER_INFO4 = 12, - SEVERITY_NUMBER_WARN = 13, - SEVERITY_NUMBER_WARN2 = 14, - SEVERITY_NUMBER_WARN3 = 15, - SEVERITY_NUMBER_WARN4 = 16, - SEVERITY_NUMBER_ERROR = 17, - SEVERITY_NUMBER_ERROR2 = 18, - SEVERITY_NUMBER_ERROR3 = 19, - SEVERITY_NUMBER_ERROR4 = 20, - SEVERITY_NUMBER_FATAL = 21, - SEVERITY_NUMBER_FATAL2 = 22, - SEVERITY_NUMBER_FATAL3 = 23, - SEVERITY_NUMBER_FATAL4 = 24, - _, -}; - -/// LogRecordFlags represents constants used to interpret the -/// LogRecord.flags field, which is protobuf 'fixed32' type and is to -/// be used as bit-fields. Each non-zero value defined in this enum is -/// a bit-mask. To extract the bit-field, for example, use an -/// expression like: -/// -/// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -pub const LogRecordFlags = enum(i32) { - LOG_RECORD_FLAGS_DO_NOT_USE = 0, - LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 255, - _, -}; - -/// LogsData represents the logs data that can be stored in a persistent storage, -/// OR can be embedded by other protocols that transfer OTLP logs data but do not -/// implement the OTLP protocol. -/// -/// The main difference between this message and collector protocol is that -/// in this message there will not be any "control" or "metadata" specific to -/// OTLP protocol. -/// -/// When new fields are added into this message, the OTLP request MUST be updated -/// as well. -pub const LogsData = struct { - resource_logs: std.ArrayListUnmanaged(ResourceLogs) = .empty, - - pub const _desc_table = .{ - .resource_logs = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of ScopeLogs from a Resource. -pub const ResourceLogs = struct { - resource: ?opentelemetry_proto_resource_v1.Resource = null, - scope_logs: std.ArrayListUnmanaged(ScopeLogs) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .resource = fd(1, .submessage), - .scope_logs = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of Logs produced by a Scope. -pub const ScopeLogs = struct { - scope: ?opentelemetry_proto_common_v1.InstrumentationScope = null, - log_records: std.ArrayListUnmanaged(LogRecord) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .scope = fd(1, .submessage), - .log_records = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A log record according to OpenTelemetry Log Data Model: -/// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -pub const LogRecord = struct { - time_unix_nano: u64 = 0, - observed_time_unix_nano: u64 = 0, - severity_number: SeverityNumber = @enumFromInt(0), - severity_text: []const u8 = &.{}, - body: ?opentelemetry_proto_common_v1.AnyValue = null, - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - flags: u32 = 0, - trace_id: []const u8 = &.{}, - span_id: []const u8 = &.{}, - event_name: []const u8 = &.{}, - - pub const _desc_table = .{ - .time_unix_nano = fd(1, .{ .scalar = .fixed64 }), - .observed_time_unix_nano = fd(11, .{ .scalar = .fixed64 }), - .severity_number = fd(2, .@"enum"), - .severity_text = fd(3, .{ .scalar = .string }), - .body = fd(5, .submessage), - .attributes = fd(6, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(7, .{ .scalar = .uint32 }), - .flags = fd(8, .{ .scalar = .fixed32 }), - .trace_id = fd(9, .{ .scalar = .bytes }), - .span_id = fd(10, .{ .scalar = .bytes }), - .event_name = fd(12, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/opentelemetry/proto/metrics/v1.pb.zig b/src/proto/opentelemetry/proto/metrics/v1.pb.zig deleted file mode 100644 index 9d2563e..0000000 --- a/src/proto/opentelemetry/proto/metrics/v1.pb.zig +++ /dev/null @@ -1,1491 +0,0 @@ -// Code generated by protoc-gen-zig -///! package opentelemetry.proto.metrics.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package opentelemetry.proto.common.v1 -const opentelemetry_proto_common_v1 = @import("../common/v1.pb.zig"); -/// import package opentelemetry.proto.resource.v1 -const opentelemetry_proto_resource_v1 = @import("../resource/v1.pb.zig"); - -/// AggregationTemporality defines how a metric aggregator reports aggregated -/// values. It describes how those values relate to the time interval over -/// which they are aggregated. -pub const AggregationTemporality = enum(i32) { - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0, - AGGREGATION_TEMPORALITY_DELTA = 1, - AGGREGATION_TEMPORALITY_CUMULATIVE = 2, - _, -}; - -/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -/// bit-field representing 32 distinct boolean flags. Each flag defined in this -/// enum is a bit-mask. To test the presence of a single flag in the flags of -/// a data point, for example, use an expression like: -/// -/// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -pub const DataPointFlags = enum(i32) { - DATA_POINT_FLAGS_DO_NOT_USE = 0, - DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1, - _, -}; - -/// MetricsData represents the metrics data that can be stored in a persistent -/// storage, OR can be embedded by other protocols that transfer OTLP metrics -/// data but do not implement the OTLP protocol. -/// -/// MetricsData -/// └─── ResourceMetrics -/// ├── Resource -/// ├── SchemaURL -/// └── ScopeMetrics -/// ├── Scope -/// ├── SchemaURL -/// └── Metric -/// ├── Name -/// ├── Description -/// ├── Unit -/// └── data -/// ├── Gauge -/// ├── Sum -/// ├── Histogram -/// ├── ExponentialHistogram -/// └── Summary -/// -/// The main difference between this message and collector protocol is that -/// in this message there will not be any "control" or "metadata" specific to -/// OTLP protocol. -/// -/// When new fields are added into this message, the OTLP request MUST be updated -/// as well. -pub const MetricsData = struct { - resource_metrics: std.ArrayListUnmanaged(ResourceMetrics) = .empty, - - pub const _desc_table = .{ - .resource_metrics = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of ScopeMetrics from a Resource. -pub const ResourceMetrics = struct { - resource: ?opentelemetry_proto_resource_v1.Resource = null, - scope_metrics: std.ArrayListUnmanaged(ScopeMetrics) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .resource = fd(1, .submessage), - .scope_metrics = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of Metrics produced by an Scope. -pub const ScopeMetrics = struct { - scope: ?opentelemetry_proto_common_v1.InstrumentationScope = null, - metrics: std.ArrayListUnmanaged(Metric) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .scope = fd(1, .submessage), - .metrics = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Defines a Metric which has one or more timeseries. The following is a -/// brief summary of the Metric data model. For more details, see: -/// -/// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -/// -/// The data model and relation between entities is shown in the -/// diagram below. Here, "DataPoint" is the term used to refer to any -/// one of the specific data point value types, and "points" is the term used -/// to refer to any one of the lists of points contained in the Metric. -/// -/// - Metric is composed of a metadata and data. -/// - Metadata part contains a name, description, unit. -/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -/// - DataPoint contains timestamps, attributes, and one of the possible value type -/// fields. -/// -/// Metric -/// +------------+ -/// |name | -/// |description | -/// |unit | +------------------------------------+ -/// |data |---> |Gauge, Sum, Histogram, Summary, ... | -/// +------------+ +------------------------------------+ -/// -/// Data [One of Gauge, Sum, Histogram, Summary, ...] -/// +-----------+ -/// |... | // Metadata about the Data. -/// |points |--+ -/// +-----------+ | -/// | +---------------------------+ -/// | |DataPoint 1 | -/// v |+------+------+ +------+ | -/// +-----+ ||label |label |...|label | | -/// | 1 |-->||value1|value2|...|valueN| | -/// +-----+ |+------+------+ +------+ | -/// | . | |+-----+ | -/// | . | ||value| | -/// | . | |+-----+ | -/// | . | +---------------------------+ -/// | . | . -/// | . | . -/// | . | . -/// | . | +---------------------------+ -/// | . | |DataPoint M | -/// +-----+ |+------+------+ +------+ | -/// | M |-->||label |label |...|label | | -/// +-----+ ||value1|value2|...|valueN| | -/// |+------+------+ +------+ | -/// |+-----+ | -/// ||value| | -/// |+-----+ | -/// +---------------------------+ -/// -/// Each distinct type of DataPoint represents the output of a specific -/// aggregation function, the result of applying the DataPoint's -/// associated function of to one or more measurements. -/// -/// All DataPoint types have three common fields: -/// - Attributes includes key-value pairs associated with the data point -/// - TimeUnixNano is required, set to the end time of the aggregation -/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -/// having an AggregationTemporality field, as discussed below. -/// -/// Both TimeUnixNano and StartTimeUnixNano values are expressed as -/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -/// -/// # TimeUnixNano -/// -/// This field is required, having consistent interpretation across -/// DataPoint types. TimeUnixNano is the moment corresponding to when -/// the data point's aggregate value was captured. -/// -/// Data points with the 0 value for TimeUnixNano SHOULD be rejected -/// by consumers. -/// -/// # StartTimeUnixNano -/// -/// StartTimeUnixNano in general allows detecting when a sequence of -/// observations is unbroken. This field indicates to consumers the -/// start time for points with cumulative and delta -/// AggregationTemporality, and it should be included whenever possible -/// to support correct rate calculation. Although it may be omitted -/// when the start time is truly unknown, setting StartTimeUnixNano is -/// strongly encouraged. -pub const Metric = struct { - name: []const u8 = &.{}, - description: []const u8 = &.{}, - unit: []const u8 = &.{}, - metadata: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - data: ?data_union = null, - - pub const _data_case = enum { - gauge, - sum, - histogram, - exponential_histogram, - summary, - }; - pub const data_union = union(_data_case) { - gauge: Gauge, - sum: Sum, - histogram: Histogram, - exponential_histogram: ExponentialHistogram, - summary: Summary, - pub const _desc_table = .{ - .gauge = fd(5, .submessage), - .sum = fd(7, .submessage), - .histogram = fd(9, .submessage), - .exponential_histogram = fd(10, .submessage), - .summary = fd(11, .submessage), - }; - }; - - pub const _desc_table = .{ - .name = fd(1, .{ .scalar = .string }), - .description = fd(2, .{ .scalar = .string }), - .unit = fd(3, .{ .scalar = .string }), - .metadata = fd(12, .{ .repeated = .submessage }), - .data = fd(null, .{ .oneof = data_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Gauge represents the type of a scalar metric that always exports the -/// "current value" for every data point. It should be used for an "unknown" -/// aggregation. -/// -/// A Gauge does not support different aggregation temporalities. Given the -/// aggregation is unknown, points cannot be combined using the same -/// aggregation, regardless of aggregation temporalities. Therefore, -/// AggregationTemporality is not included. Consequently, this also means -/// "StartTimeUnixNano" is ignored for all data points. -pub const Gauge = struct { - data_points: std.ArrayListUnmanaged(NumberDataPoint) = .empty, - - pub const _desc_table = .{ - .data_points = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Sum represents the type of a scalar metric that is calculated as a sum of all -/// reported measurements over a time interval. -pub const Sum = struct { - data_points: std.ArrayListUnmanaged(NumberDataPoint) = .empty, - aggregation_temporality: AggregationTemporality = @enumFromInt(0), - is_monotonic: bool = false, - - pub const _desc_table = .{ - .data_points = fd(1, .{ .repeated = .submessage }), - .aggregation_temporality = fd(2, .@"enum"), - .is_monotonic = fd(3, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Histogram represents the type of a metric that is calculated by aggregating -/// as a Histogram of all reported measurements over a time interval. -pub const Histogram = struct { - data_points: std.ArrayListUnmanaged(HistogramDataPoint) = .empty, - aggregation_temporality: AggregationTemporality = @enumFromInt(0), - - pub const _desc_table = .{ - .data_points = fd(1, .{ .repeated = .submessage }), - .aggregation_temporality = fd(2, .@"enum"), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// ExponentialHistogram represents the type of a metric that is calculated by aggregating -/// as a ExponentialHistogram of all reported double measurements over a time interval. -pub const ExponentialHistogram = struct { - data_points: std.ArrayListUnmanaged(ExponentialHistogramDataPoint) = .empty, - aggregation_temporality: AggregationTemporality = @enumFromInt(0), - - pub const _desc_table = .{ - .data_points = fd(1, .{ .repeated = .submessage }), - .aggregation_temporality = fd(2, .@"enum"), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Summary metric data are used to convey quantile summaries, -/// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -/// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -/// data type. These data points cannot always be merged in a meaningful way. -/// While they can be useful in some applications, histogram data points are -/// recommended for new applications. -/// Summary metrics do not have an aggregation temporality field. This is -/// because the count and sum fields of a SummaryDataPoint are assumed to be -/// cumulative values. -pub const Summary = struct { - data_points: std.ArrayListUnmanaged(SummaryDataPoint) = .empty, - - pub const _desc_table = .{ - .data_points = fd(1, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// NumberDataPoint is a single data point in a timeseries that describes the -/// time-varying scalar value of a metric. -pub const NumberDataPoint = struct { - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - start_time_unix_nano: u64 = 0, - time_unix_nano: u64 = 0, - exemplars: std.ArrayListUnmanaged(Exemplar) = .empty, - flags: u32 = 0, - value: ?value_union = null, - - pub const _value_case = enum { - as_double, - as_int, - }; - pub const value_union = union(_value_case) { - as_double: f64, - as_int: i64, - pub const _desc_table = .{ - .as_double = fd(4, .{ .scalar = .double }), - .as_int = fd(6, .{ .scalar = .sfixed64 }), - }; - }; - - pub const _desc_table = .{ - .attributes = fd(7, .{ .repeated = .submessage }), - .start_time_unix_nano = fd(2, .{ .scalar = .fixed64 }), - .time_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .exemplars = fd(5, .{ .repeated = .submessage }), - .flags = fd(8, .{ .scalar = .uint32 }), - .value = fd(null, .{ .oneof = value_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// HistogramDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a Histogram. A Histogram contains summary statistics -/// for a population of values, it may optionally contain the distribution of -/// those values across a set of buckets. -/// -/// If the histogram contains the distribution of values, then both -/// "explicit_bounds" and "bucket counts" fields must be defined. -/// If the histogram does not contain the distribution of values, then both -/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -/// "sum" are known. -pub const HistogramDataPoint = struct { - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - start_time_unix_nano: u64 = 0, - time_unix_nano: u64 = 0, - count: u64 = 0, - sum: ?f64 = null, - bucket_counts: std.ArrayListUnmanaged(u64) = .empty, - explicit_bounds: std.ArrayListUnmanaged(f64) = .empty, - exemplars: std.ArrayListUnmanaged(Exemplar) = .empty, - flags: u32 = 0, - min: ?f64 = null, - max: ?f64 = null, - - pub const _desc_table = .{ - .attributes = fd(9, .{ .repeated = .submessage }), - .start_time_unix_nano = fd(2, .{ .scalar = .fixed64 }), - .time_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .count = fd(4, .{ .scalar = .fixed64 }), - .sum = fd(5, .{ .scalar = .double }), - .bucket_counts = fd(6, .{ .packed_repeated = .{ .scalar = .fixed64 } }), - .explicit_bounds = fd(7, .{ .packed_repeated = .{ .scalar = .double } }), - .exemplars = fd(8, .{ .repeated = .submessage }), - .flags = fd(10, .{ .scalar = .uint32 }), - .min = fd(11, .{ .scalar = .double }), - .max = fd(12, .{ .scalar = .double }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -/// summary statistics for a population of values, it may optionally contain the -/// distribution of those values across a set of buckets. -pub const ExponentialHistogramDataPoint = struct { - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - start_time_unix_nano: u64 = 0, - time_unix_nano: u64 = 0, - count: u64 = 0, - sum: ?f64 = null, - scale: i32 = 0, - zero_count: u64 = 0, - positive: ?ExponentialHistogramDataPoint.Buckets = null, - negative: ?ExponentialHistogramDataPoint.Buckets = null, - flags: u32 = 0, - exemplars: std.ArrayListUnmanaged(Exemplar) = .empty, - min: ?f64 = null, - max: ?f64 = null, - zero_threshold: f64 = 0, - - pub const _desc_table = .{ - .attributes = fd(1, .{ .repeated = .submessage }), - .start_time_unix_nano = fd(2, .{ .scalar = .fixed64 }), - .time_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .count = fd(4, .{ .scalar = .fixed64 }), - .sum = fd(5, .{ .scalar = .double }), - .scale = fd(6, .{ .scalar = .sint32 }), - .zero_count = fd(7, .{ .scalar = .fixed64 }), - .positive = fd(8, .submessage), - .negative = fd(9, .submessage), - .flags = fd(10, .{ .scalar = .uint32 }), - .exemplars = fd(11, .{ .repeated = .submessage }), - .min = fd(12, .{ .scalar = .double }), - .max = fd(13, .{ .scalar = .double }), - .zero_threshold = fd(14, .{ .scalar = .double }), - }; - - /// Buckets are a set of bucket counts, encoded in a contiguous array - /// of counts. - pub const Buckets = struct { - offset: i32 = 0, - bucket_counts: std.ArrayListUnmanaged(u64) = .empty, - - pub const _desc_table = .{ - .offset = fd(1, .{ .scalar = .sint32 }), - .bucket_counts = fd(2, .{ .packed_repeated = .{ .scalar = .uint64 } }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// SummaryDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a Summary metric. The count and sum fields represent -/// cumulative values. -pub const SummaryDataPoint = struct { - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - start_time_unix_nano: u64 = 0, - time_unix_nano: u64 = 0, - count: u64 = 0, - sum: f64 = 0, - quantile_values: std.ArrayListUnmanaged(SummaryDataPoint.ValueAtQuantile) = .empty, - flags: u32 = 0, - - pub const _desc_table = .{ - .attributes = fd(7, .{ .repeated = .submessage }), - .start_time_unix_nano = fd(2, .{ .scalar = .fixed64 }), - .time_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .count = fd(4, .{ .scalar = .fixed64 }), - .sum = fd(5, .{ .scalar = .double }), - .quantile_values = fd(6, .{ .repeated = .submessage }), - .flags = fd(8, .{ .scalar = .uint32 }), - }; - - /// Represents the value at a given quantile of a distribution. - /// - /// To record Min and Max values following conventions are used: - /// - The 1.0 quantile is equivalent to the maximum value observed. - /// - The 0.0 quantile is equivalent to the minimum value observed. - /// - /// See the following issue for more context: - /// https://github.com/open-telemetry/opentelemetry-proto/issues/125 - pub const ValueAtQuantile = struct { - quantile: f64 = 0, - value: f64 = 0, - - pub const _desc_table = .{ - .quantile = fd(1, .{ .scalar = .double }), - .value = fd(2, .{ .scalar = .double }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A representation of an exemplar, which is a sample input measurement. -/// Exemplars also hold information about the environment when the measurement -/// was recorded, for example the span and trace ID of the active span when the -/// exemplar was recorded. -pub const Exemplar = struct { - filtered_attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - time_unix_nano: u64 = 0, - span_id: []const u8 = &.{}, - trace_id: []const u8 = &.{}, - value: ?value_union = null, - - pub const _value_case = enum { - as_double, - as_int, - }; - pub const value_union = union(_value_case) { - as_double: f64, - as_int: i64, - pub const _desc_table = .{ - .as_double = fd(3, .{ .scalar = .double }), - .as_int = fd(6, .{ .scalar = .sfixed64 }), - }; - }; - - pub const _desc_table = .{ - .filtered_attributes = fd(7, .{ .repeated = .submessage }), - .time_unix_nano = fd(2, .{ .scalar = .fixed64 }), - .span_id = fd(4, .{ .scalar = .bytes }), - .trace_id = fd(5, .{ .scalar = .bytes }), - .value = fd(null, .{ .oneof = value_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/opentelemetry/proto/resource/v1.pb.zig b/src/proto/opentelemetry/proto/resource/v1.pb.zig deleted file mode 100644 index e88b5df..0000000 --- a/src/proto/opentelemetry/proto/resource/v1.pb.zig +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by protoc-gen-zig -///! package opentelemetry.proto.resource.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package opentelemetry.proto.common.v1 -const opentelemetry_proto_common_v1 = @import("../common/v1.pb.zig"); - -/// Resource information. -pub const Resource = struct { - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - entity_refs: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.EntityRef) = .empty, - - pub const _desc_table = .{ - .attributes = fd(1, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(2, .{ .scalar = .uint32 }), - .entity_refs = fd(3, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/opentelemetry/proto/trace/v1.pb.zig b/src/proto/opentelemetry/proto/trace/v1.pb.zig deleted file mode 100644 index e3e3431..0000000 --- a/src/proto/opentelemetry/proto/trace/v1.pb.zig +++ /dev/null @@ -1,519 +0,0 @@ -// Code generated by protoc-gen-zig -///! package opentelemetry.proto.trace.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package opentelemetry.proto.common.v1 -const opentelemetry_proto_common_v1 = @import("../common/v1.pb.zig"); -/// import package opentelemetry.proto.resource.v1 -const opentelemetry_proto_resource_v1 = @import("../resource/v1.pb.zig"); - -/// SpanFlags represents constants used to interpret the -/// Span.flags field, which is protobuf 'fixed32' type and is to -/// be used as bit-fields. Each non-zero value defined in this enum is -/// a bit-mask. -pub const SpanFlags = enum(i32) { - SPAN_FLAGS_DO_NOT_USE = 0, - SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF, - SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100, - SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200, - _, -}; - -/// TracesData represents the traces data that can be stored in a persistent storage, -/// OR can be embedded by other protocols that transfer OTLP traces data but do -/// not implement the OTLP protocol. -pub const TracesData = struct { - resource_spans: std.ArrayListUnmanaged(ResourceSpans) = .empty, - - pub const _desc_table = .{ - .resource_spans = fd(1, .{ .repeated = .submessage }), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of ScopeSpans from a Resource. -pub const ResourceSpans = struct { - resource: ?opentelemetry_proto_resource_v1.Resource = null, - scope_spans: std.ArrayListUnmanaged(ScopeSpans) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .resource = fd(1, .submessage), - .scope_spans = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A collection of Spans produced by an InstrumentationScope. -pub const ScopeSpans = struct { - scope: ?opentelemetry_proto_common_v1.InstrumentationScope = null, - spans: std.ArrayListUnmanaged(Span) = .empty, - schema_url: []const u8 = &.{}, - - pub const _desc_table = .{ - .scope = fd(1, .submessage), - .spans = fd(2, .{ .repeated = .submessage }), - .schema_url = fd(3, .{ .scalar = .string }), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// SpanKind is the type of span. -pub const SpanKind = enum(i32) { - SPAN_KIND_UNSPECIFIED = 0, - SPAN_KIND_INTERNAL = 1, - SPAN_KIND_SERVER = 2, - SPAN_KIND_CLIENT = 3, - SPAN_KIND_PRODUCER = 4, - SPAN_KIND_CONSUMER = 5, - _, -}; - -/// StatusCode for the Span. -pub const StatusCode = enum(i32) { - STATUS_CODE_UNSET = 0, - STATUS_CODE_OK = 1, - STATUS_CODE_ERROR = 2, - _, -}; - -/// The Status type defines a logical error model. -pub const Status = struct { - message: []const u8 = &.{}, - code: StatusCode = @enumFromInt(0), - - pub const _desc_table = .{ - .message = fd(2, .{ .scalar = .string }), - .code = fd(3, .@"enum"), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Event is a time-stamped annotation of the span. -pub const Event = struct { - time_unix_nano: u64 = 0, - name: []const u8 = &.{}, - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - - pub const _desc_table = .{ - .time_unix_nano = fd(1, .{ .scalar = .fixed64 }), - .name = fd(2, .{ .scalar = .string }), - .attributes = fd(3, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(4, .{ .scalar = .uint32 }), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// Link is a pointer from the current span to another span. -pub const Link = struct { - trace_id: []const u8 = &.{}, - span_id: []const u8 = &.{}, - trace_state: []const u8 = &.{}, - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - flags: u32 = 0, - - pub const _desc_table = .{ - .trace_id = fd(1, .{ .scalar = .bytes }), - .span_id = fd(2, .{ .scalar = .bytes }), - .trace_state = fd(3, .{ .scalar = .string }), - .attributes = fd(4, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(5, .{ .scalar = .uint32 }), - .flags = fd(6, .{ .scalar = .fixed32 }), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// A Span represents a single operation performed by a single component of the system. -pub const Span = struct { - trace_id: []const u8 = &.{}, - span_id: []const u8 = &.{}, - trace_state: []const u8 = &.{}, - parent_span_id: []const u8 = &.{}, - flags: u32 = 0, - name: []const u8 = &.{}, - kind: SpanKind = @enumFromInt(0), - start_time_unix_nano: u64 = 0, - end_time_unix_nano: u64 = 0, - attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - dropped_attributes_count: u32 = 0, - events: std.ArrayListUnmanaged(Event) = .empty, - dropped_events_count: u32 = 0, - links: std.ArrayListUnmanaged(Link) = .empty, - dropped_links_count: u32 = 0, - status: ?Status = null, - - pub const _desc_table = .{ - .trace_id = fd(1, .{ .scalar = .bytes }), - .span_id = fd(2, .{ .scalar = .bytes }), - .trace_state = fd(3, .{ .scalar = .string }), - .parent_span_id = fd(4, .{ .scalar = .bytes }), - .flags = fd(16, .{ .scalar = .fixed32 }), - .name = fd(5, .{ .scalar = .string }), - .kind = fd(6, .@"enum"), - .start_time_unix_nano = fd(7, .{ .scalar = .fixed64 }), - .end_time_unix_nano = fd(8, .{ .scalar = .fixed64 }), - .attributes = fd(9, .{ .repeated = .submessage }), - .dropped_attributes_count = fd(10, .{ .scalar = .uint32 }), - .events = fd(11, .{ .repeated = .submessage }), - .dropped_events_count = fd(12, .{ .scalar = .uint32 }), - .links = fd(13, .{ .repeated = .submessage }), - .dropped_links_count = fd(14, .{ .scalar = .uint32 }), - .status = fd(15, .submessage), - }; - - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proto/root.zig b/src/proto/root.zig deleted file mode 100644 index 8fa348c..0000000 --- a/src/proto/root.zig +++ /dev/null @@ -1,6 +0,0 @@ -pub const policy = @import("tero/policy/v1.pb.zig"); -pub const common = @import("opentelemetry/proto/common/v1.pb.zig"); -pub const logs = @import("opentelemetry/proto/logs/v1.pb.zig"); -pub const metrics = @import("opentelemetry/proto/metrics/v1.pb.zig"); -pub const trace = @import("opentelemetry/proto/trace/v1.pb.zig"); -pub const protobuf = @import("protobuf"); diff --git a/src/proto/tero/policy/v1.pb.zig b/src/proto/tero/policy/v1.pb.zig deleted file mode 100644 index 1cc5dec..0000000 --- a/src/proto/tero/policy/v1.pb.zig +++ /dev/null @@ -1,2011 +0,0 @@ -// Code generated by protoc-gen-zig -///! package tero.policy.v1 -const std = @import("std"); - -const protobuf = @import("protobuf"); -const fd = protobuf.fd; -/// import package google.api -const google_api = @import("../../google/api.pb.zig"); -/// import package opentelemetry.proto.common.v1 -const opentelemetry_proto_common_v1 = @import("../../opentelemetry/proto/common/v1.pb.zig"); - -/// AttributePath specifies how to access an attribute value. -/// -/// The path is represented as an array of string segments. Each segment represents -/// a key to traverse into nested maps. -/// -/// Example usage: -/// -/// For an attribute structure like: -/// Attributes: map[string]any{ -/// "http": map[string]any{ -/// "method": "POST", -/// "status_code": 200, -/// }, -/// "user_id": "u123", -/// } -/// -/// - To access "user_id": ["user_id"] -/// - To access http.method: ["http", "method"] -/// -/// YAML/JSON Unmarshaling: -/// -/// Implementations MUST accept both the canonical proto form and shorthand forms -/// for ergonomic policy authoring: -/// -/// Canonical (proto-native): -/// log_attribute: -/// path: ["http", "method"] -/// -/// Shorthand array (MUST be supported): -/// log_attribute: ["http", "method"] -/// -/// Shorthand string (MUST be supported for single-segment paths): -/// log_attribute: "user_id" # equivalent to ["user_id"] -/// -/// When marshaling, implementations SHOULD use the shorthand array form for -/// cleaner output. -pub const AttributePath = struct { - path: std.ArrayListUnmanaged([]const u8) = .empty, - - pub const _desc_table = .{ - .path = fd(1, .{ .repeated = .{ .scalar = .string } }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - /// Custom implementation for AttributePath because protobuf.dupe doesn't handle repeated string fields. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - var result = AttributePath{}; - for (self.path.items) |segment| { - try result.path.append(allocator, try allocator.dupe(u8, segment)); - } - return result; - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogField identifies simple log fields (non-keyed). -pub const LogField = enum(i32) { - LOG_FIELD_UNSPECIFIED = 0, - LOG_FIELD_BODY = 1, - LOG_FIELD_SEVERITY_TEXT = 2, - LOG_FIELD_TRACE_ID = 3, - LOG_FIELD_SPAN_ID = 4, - LOG_FIELD_EVENT_NAME = 5, - LOG_FIELD_RESOURCE_SCHEMA_URL = 10, - LOG_FIELD_SCOPE_SCHEMA_URL = 11, - _, -}; - -/// LogTarget defines matching and actions for logs. -pub const LogTarget = struct { - match: std.ArrayListUnmanaged(LogMatcher) = .empty, - keep: []const u8 = &.{}, - transform: ?LogTransform = null, - sample_key: ?LogSampleKey = null, - - pub const _desc_table = .{ - .match = fd(1, .{ .repeated = .submessage }), - .keep = fd(2, .{ .scalar = .string }), - .transform = fd(3, .submessage), - .sample_key = fd(4, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogSampleKey specifies which field to use as the sampling key for consistent -/// sampling decisions. -pub const LogSampleKey = struct { - field: ?field_union = null, - - pub const _field_case = enum { - log_field, - log_attribute, - resource_attribute, - scope_attribute, - }; - pub const field_union = union(_field_case) { - log_field: LogField, - log_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - pub const _desc_table = .{ - .log_field = fd(1, .@"enum"), - .log_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - }; - }; - - pub const _desc_table = .{ - .field = fd(null, .{ .oneof = field_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogMatcher provides a way to match against log telemetry data using known fields. -/// -/// IMPORTANT CONSTRAINTS: -/// - Multiple matchers are ANDed together: all matchers must match for the -/// overall match to succeed. -/// - The list of matchers should uniquely identify a specific pattern of telemetry -/// for that policy. Matchers should NOT be used as a catch-all; they should be -/// specific enough to target the intended telemetry precisely. -/// -/// All regex fields use RE2 syntax for consistency across implementations. -pub const LogMatcher = struct { - negate: bool = false, - case_insensitive: bool = false, - field: ?field_union = null, - match: ?match_union = null, - - pub const _field_case = enum { - log_field, - log_attribute, - resource_attribute, - scope_attribute, - }; - pub const field_union = union(_field_case) { - log_field: LogField, - log_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - pub const _desc_table = .{ - .log_field = fd(1, .@"enum"), - .log_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - }; - }; - - pub const _match_case = enum { - exact, - regex, - exists, - starts_with, - ends_with, - contains, - }; - pub const match_union = union(_match_case) { - exact: []const u8, - regex: []const u8, - exists: bool, - starts_with: []const u8, - ends_with: []const u8, - contains: []const u8, - pub const _desc_table = .{ - .exact = fd(10, .{ .scalar = .string }), - .regex = fd(11, .{ .scalar = .string }), - .exists = fd(12, .{ .scalar = .bool }), - .starts_with = fd(13, .{ .scalar = .string }), - .ends_with = fd(14, .{ .scalar = .string }), - .contains = fd(15, .{ .scalar = .string }), - }; - }; - - pub const _desc_table = .{ - .negate = fd(20, .{ .scalar = .bool }), - .case_insensitive = fd(21, .{ .scalar = .bool }), - .field = fd(null, .{ .oneof = field_union }), - .match = fd(null, .{ .oneof = match_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogTransform defines modifications to logs. -pub const LogTransform = struct { - remove: std.ArrayListUnmanaged(LogRemove) = .empty, - redact: std.ArrayListUnmanaged(LogRedact) = .empty, - rename: std.ArrayListUnmanaged(LogRename) = .empty, - add: std.ArrayListUnmanaged(LogAdd) = .empty, - - pub const _desc_table = .{ - .remove = fd(1, .{ .repeated = .submessage }), - .redact = fd(2, .{ .repeated = .submessage }), - .rename = fd(3, .{ .repeated = .submessage }), - .add = fd(4, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogRemove removes a field. -pub const LogRemove = struct { - field: ?field_union = null, - - pub const _field_case = enum { - log_field, - log_attribute, - resource_attribute, - scope_attribute, - }; - pub const field_union = union(_field_case) { - log_field: LogField, - log_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - pub const _desc_table = .{ - .log_field = fd(1, .@"enum"), - .log_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - }; - }; - - pub const _desc_table = .{ - .field = fd(null, .{ .oneof = field_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogRedact masks a field value. -pub const LogRedact = struct { - replacement: []const u8 = &.{}, - field: ?field_union = null, - - pub const _field_case = enum { - log_field, - log_attribute, - resource_attribute, - scope_attribute, - }; - pub const field_union = union(_field_case) { - log_field: LogField, - log_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - pub const _desc_table = .{ - .log_field = fd(1, .@"enum"), - .log_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - }; - }; - - pub const _desc_table = .{ - .replacement = fd(10, .{ .scalar = .string }), - .field = fd(null, .{ .oneof = field_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogRename changes a field name. -pub const LogRename = struct { - to: []const u8 = &.{}, - upsert: bool = false, - from: ?from_union = null, - - pub const _from_case = enum { - from_log_field, - from_log_attribute, - from_resource_attribute, - from_scope_attribute, - }; - pub const from_union = union(_from_case) { - from_log_field: LogField, - from_log_attribute: AttributePath, - from_resource_attribute: AttributePath, - from_scope_attribute: AttributePath, - pub const _desc_table = .{ - .from_log_field = fd(1, .@"enum"), - .from_log_attribute = fd(2, .submessage), - .from_resource_attribute = fd(3, .submessage), - .from_scope_attribute = fd(4, .submessage), - }; - }; - - pub const _desc_table = .{ - .to = fd(10, .{ .scalar = .string }), - .upsert = fd(11, .{ .scalar = .bool }), - .from = fd(null, .{ .oneof = from_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// LogAdd inserts a field. -pub const LogAdd = struct { - value: []const u8 = &.{}, - upsert: bool = false, - field: ?field_union = null, - - pub const _field_case = enum { - log_field, - log_attribute, - resource_attribute, - scope_attribute, - }; - pub const field_union = union(_field_case) { - log_field: LogField, - log_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - pub const _desc_table = .{ - .log_field = fd(1, .@"enum"), - .log_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - }; - }; - - pub const _desc_table = .{ - .value = fd(10, .{ .scalar = .string }), - .upsert = fd(11, .{ .scalar = .bool }), - .field = fd(null, .{ .oneof = field_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// MetricField identifies simple metric fields (non-keyed). -pub const MetricField = enum(i32) { - METRIC_FIELD_UNSPECIFIED = 0, - METRIC_FIELD_NAME = 1, - METRIC_FIELD_DESCRIPTION = 2, - METRIC_FIELD_UNIT = 3, - METRIC_FIELD_RESOURCE_SCHEMA_URL = 10, - METRIC_FIELD_SCOPE_SCHEMA_URL = 11, - METRIC_FIELD_SCOPE_NAME = 12, - METRIC_FIELD_SCOPE_VERSION = 13, - _, -}; - -/// MetricType identifies the type of metric for matching. -pub const MetricType = enum(i32) { - METRIC_TYPE_UNSPECIFIED = 0, - METRIC_TYPE_GAUGE = 1, - METRIC_TYPE_SUM = 2, - METRIC_TYPE_HISTOGRAM = 3, - METRIC_TYPE_EXPONENTIAL_HISTOGRAM = 4, - METRIC_TYPE_SUMMARY = 5, - _, -}; - -/// AggregationTemporality defines how a metric aggregator reports aggregated values. -/// Mirrors opentelemetry.proto.metrics.v1.AggregationTemporality. -pub const AggregationTemporality = enum(i32) { - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0, - AGGREGATION_TEMPORALITY_DELTA = 1, - AGGREGATION_TEMPORALITY_CUMULATIVE = 2, - _, -}; - -/// MetricTarget defines matching and actions for metrics. -pub const MetricTarget = struct { - match: std.ArrayListUnmanaged(MetricMatcher) = .empty, - keep: bool = false, - - pub const _desc_table = .{ - .match = fd(1, .{ .repeated = .submessage }), - .keep = fd(2, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// MetricMatcher provides a way to match against metric telemetry data using known fields. -/// -/// IMPORTANT CONSTRAINTS: -/// - Multiple matchers are ANDed together: all matchers must match for the -/// overall match to succeed. -/// - The list of matchers should uniquely identify a specific pattern of telemetry -/// for that policy. Matchers should NOT be used as a catch-all; they should be -/// specific enough to target the intended telemetry precisely. -/// -/// All regex fields use RE2 syntax for consistency across implementations. -pub const MetricMatcher = struct { - negate: bool = false, - case_insensitive: bool = false, - field: ?field_union = null, - match: ?match_union = null, - - pub const _field_case = enum { - metric_field, - datapoint_attribute, - resource_attribute, - scope_attribute, - metric_type, - aggregation_temporality, - }; - pub const field_union = union(_field_case) { - metric_field: MetricField, - datapoint_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - metric_type: MetricType, - aggregation_temporality: AggregationTemporality, - pub const _desc_table = .{ - .metric_field = fd(1, .@"enum"), - .datapoint_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - .metric_type = fd(5, .@"enum"), - .aggregation_temporality = fd(6, .@"enum"), - }; - }; - - pub const _match_case = enum { - exact, - regex, - exists, - starts_with, - ends_with, - contains, - }; - pub const match_union = union(_match_case) { - exact: []const u8, - regex: []const u8, - exists: bool, - starts_with: []const u8, - ends_with: []const u8, - contains: []const u8, - pub const _desc_table = .{ - .exact = fd(10, .{ .scalar = .string }), - .regex = fd(11, .{ .scalar = .string }), - .exists = fd(12, .{ .scalar = .bool }), - .starts_with = fd(13, .{ .scalar = .string }), - .ends_with = fd(14, .{ .scalar = .string }), - .contains = fd(15, .{ .scalar = .string }), - }; - }; - - pub const _desc_table = .{ - .negate = fd(20, .{ .scalar = .bool }), - .case_insensitive = fd(21, .{ .scalar = .bool }), - .field = fd(null, .{ .oneof = field_union }), - .match = fd(null, .{ .oneof = match_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// TraceField identifies simple span fields (non-keyed). -pub const TraceField = enum(i32) { - TRACE_FIELD_UNSPECIFIED = 0, - TRACE_FIELD_NAME = 1, - TRACE_FIELD_TRACE_ID = 2, - TRACE_FIELD_SPAN_ID = 3, - TRACE_FIELD_PARENT_SPAN_ID = 4, - TRACE_FIELD_TRACE_STATE = 5, - TRACE_FIELD_RESOURCE_SCHEMA_URL = 10, - TRACE_FIELD_SCOPE_SCHEMA_URL = 11, - TRACE_FIELD_SCOPE_NAME = 12, - TRACE_FIELD_SCOPE_VERSION = 13, - _, -}; - -/// SpanKind identifies the type of span for matching. -/// Mirrors opentelemetry.proto.trace.v1.Span.SpanKind. -pub const SpanKind = enum(i32) { - SPAN_KIND_UNSPECIFIED = 0, - SPAN_KIND_INTERNAL = 1, - SPAN_KIND_SERVER = 2, - SPAN_KIND_CLIENT = 3, - SPAN_KIND_PRODUCER = 4, - SPAN_KIND_CONSUMER = 5, - _, -}; - -/// SpanStatusCode identifies the span status for matching. -/// Mirrors opentelemetry.proto.trace.v1.Status.StatusCode. -pub const SpanStatusCode = enum(i32) { - SPAN_STATUS_CODE_UNSPECIFIED = 0, - SPAN_STATUS_CODE_OK = 1, - SPAN_STATUS_CODE_ERROR = 2, - _, -}; - -/// SamplingMode determines how the sampling decision is made. -pub const SamplingMode = enum(i32) { - SAMPLING_MODE_UNSPECIFIED = 0, - SAMPLING_MODE_HASH_SEED = 1, - SAMPLING_MODE_PROPORTIONAL = 2, - SAMPLING_MODE_EQUALIZING = 3, - _, -}; - -/// TraceTarget defines matching and sampling actions for traces/spans. -pub const TraceTarget = struct { - match: std.ArrayListUnmanaged(TraceMatcher) = .empty, - keep: ?TraceSamplingConfig = null, - - pub const _desc_table = .{ - .match = fd(1, .{ .repeated = .submessage }), - .keep = fd(2, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// TraceMatcher provides a way to match against trace/span telemetry data using known fields. -/// -/// IMPORTANT CONSTRAINTS: -/// - Multiple matchers are ANDed together: all matchers must match for the -/// overall match to succeed. -/// - The list of matchers should uniquely identify a specific pattern of telemetry -/// for that policy. Matchers should NOT be used as a catch-all; they should be -/// specific enough to target the intended telemetry precisely. -/// -/// All regex fields use RE2 syntax for consistency across implementations. -pub const TraceMatcher = struct { - negate: bool = false, - case_insensitive: bool = false, - field: ?field_union = null, - match: ?match_union = null, - - pub const _field_case = enum { - trace_field, - span_attribute, - resource_attribute, - scope_attribute, - span_kind, - span_status, - event_name, - event_attribute, - link_trace_id, - }; - pub const field_union = union(_field_case) { - trace_field: TraceField, - span_attribute: AttributePath, - resource_attribute: AttributePath, - scope_attribute: AttributePath, - span_kind: SpanKind, - span_status: SpanStatusCode, - event_name: []const u8, - event_attribute: AttributePath, - link_trace_id: []const u8, - pub const _desc_table = .{ - .trace_field = fd(1, .@"enum"), - .span_attribute = fd(2, .submessage), - .resource_attribute = fd(3, .submessage), - .scope_attribute = fd(4, .submessage), - .span_kind = fd(5, .@"enum"), - .span_status = fd(6, .@"enum"), - .event_name = fd(7, .{ .scalar = .string }), - .event_attribute = fd(8, .submessage), - .link_trace_id = fd(9, .{ .scalar = .string }), - }; - }; - - pub const _match_case = enum { - exact, - regex, - exists, - starts_with, - ends_with, - contains, - }; - pub const match_union = union(_match_case) { - exact: []const u8, - regex: []const u8, - exists: bool, - starts_with: []const u8, - ends_with: []const u8, - contains: []const u8, - pub const _desc_table = .{ - .exact = fd(10, .{ .scalar = .string }), - .regex = fd(11, .{ .scalar = .string }), - .exists = fd(12, .{ .scalar = .bool }), - .starts_with = fd(13, .{ .scalar = .string }), - .ends_with = fd(14, .{ .scalar = .string }), - .contains = fd(15, .{ .scalar = .string }), - }; - }; - - pub const _desc_table = .{ - .negate = fd(20, .{ .scalar = .bool }), - .case_insensitive = fd(21, .{ .scalar = .bool }), - .field = fd(null, .{ .oneof = field_union }), - .match = fd(null, .{ .oneof = match_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// TraceSamplingConfig configures probabilistic sampling for traces. -/// -/// This configuration follows the OpenTelemetry probability sampling specification: -/// https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/ -/// -/// Implementations MUST follow tracestate standards to allow multi-stage sampling: -/// https://opentelemetry.io/docs/specs/otel/trace/tracestate-handling/#sampling-threshold-value-th -/// -/// The sampling decision is based on comparing a 56-bit randomness value (R) against -/// a rejection threshold (T). If R >= T, the span is kept; otherwise it is dropped. -/// The threshold is derived from the configured percentage: -/// T = (1 - percentage/100) * 2^56 -pub const TraceSamplingConfig = struct { - percentage: f32 = 0, - mode: ?SamplingMode = null, - sampling_precision: ?u32 = null, - hash_seed: ?u32 = null, - fail_closed: ?bool = null, - - pub const _desc_table = .{ - .percentage = fd(1, .{ .scalar = .float }), - .mode = fd(2, .@"enum"), - .sampling_precision = fd(3, .{ .scalar = .uint32 }), - .hash_seed = fd(4, .{ .scalar = .uint32 }), - .fail_closed = fd(5, .{ .scalar = .bool }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// PolicyStage identifies the execution stage for a policy. -pub const PolicyStage = enum(i32) { - POLICY_STAGE_UNSPECIFIED = 0, - POLICY_STAGE_LOG_FILTER = 1, - POLICY_STAGE_LOG_TRANSFORM = 2, - POLICY_STAGE_METRIC_FILTER = 3, - POLICY_STAGE_TRACE_SAMPLING = 4, - _, -}; - -pub const SyncType = enum(i32) { - SYNC_TYPE_UNSPECIFIED = 0, - SYNC_TYPE_FULL = 1, - _, -}; - -/// Policy represents a complete telemetry policy definition. -/// Policies are designed to be: -/// - Implementation Agnostic: Works in SDK, Collector, or any component -/// - Standalone: No need to understand pipeline configuration -/// - Dynamic: Can be updated post-instantiation -/// - Idempotent: Safe to apply to multiple components -/// - Fail-Open: Does not interfere with telemetry on failure -pub const Policy = struct { - id: []const u8 = &.{}, - name: []const u8 = &.{}, - description: []const u8 = &.{}, - enabled: bool = false, - created_at_unix_nano: u64 = 0, - modified_at_unix_nano: u64 = 0, - labels: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - target: ?target_union = null, - - pub const _target_case = enum { - log, - metric, - trace, - }; - pub const target_union = union(_target_case) { - log: LogTarget, - metric: MetricTarget, - trace: TraceTarget, - pub const _desc_table = .{ - .log = fd(10, .submessage), - .metric = fd(11, .submessage), - .trace = fd(12, .submessage), - }; - }; - - pub const _desc_table = .{ - .id = fd(1, .{ .scalar = .string }), - .name = fd(2, .{ .scalar = .string }), - .description = fd(3, .{ .scalar = .string }), - .enabled = fd(4, .{ .scalar = .bool }), - .created_at_unix_nano = fd(5, .{ .scalar = .fixed64 }), - .modified_at_unix_nano = fd(6, .{ .scalar = .fixed64 }), - .labels = fd(7, .{ .repeated = .submessage }), - .target = fd(null, .{ .oneof = target_union }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// ClientMetadata contains information about the client requesting policies. -pub const ClientMetadata = struct { - supported_policy_stages: std.ArrayListUnmanaged(PolicyStage) = .empty, - labels: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - resource_attributes: std.ArrayListUnmanaged(opentelemetry_proto_common_v1.KeyValue) = .empty, - - pub const _desc_table = .{ - .supported_policy_stages = fd(1, .{ .repeated = .@"enum" }), - .labels = fd(2, .{ .repeated = .submessage }), - .resource_attributes = fd(3, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// TransformStageStatus reports hits and misses for a single transform stage. -pub const TransformStageStatus = struct { - hits: i64 = 0, - misses: i64 = 0, - - pub const _desc_table = .{ - .hits = fd(1, .{ .scalar = .int64 }), - .misses = fd(2, .{ .scalar = .int64 }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// PolicySyncStatus reports the status of an individual policy during sync. -/// Used to communicate policy execution metrics and errors back to the provider. -pub const PolicySyncStatus = struct { - id: []const u8 = &.{}, - match_hits: i64 = 0, - match_misses: i64 = 0, - errors: std.ArrayListUnmanaged([]const u8) = .empty, - remove: ?TransformStageStatus = null, - redact: ?TransformStageStatus = null, - rename: ?TransformStageStatus = null, - add: ?TransformStageStatus = null, - - pub const _desc_table = .{ - .id = fd(1, .{ .scalar = .string }), - .match_hits = fd(2, .{ .scalar = .int64 }), - .match_misses = fd(3, .{ .scalar = .int64 }), - .errors = fd(4, .{ .repeated = .{ .scalar = .string } }), - .remove = fd(10, .submessage), - .redact = fd(11, .submessage), - .rename = fd(12, .submessage), - .add = fd(13, .submessage), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// SyncRequest is sent by clients to request policy updates. -pub const SyncRequest = struct { - client_metadata: ?ClientMetadata = null, - full_sync: bool = false, - last_sync_timestamp_unix_nano: u64 = 0, - last_successful_hash: []const u8 = &.{}, - policy_statuses: std.ArrayListUnmanaged(PolicySyncStatus) = .empty, - - pub const _desc_table = .{ - .client_metadata = fd(1, .submessage), - .full_sync = fd(2, .{ .scalar = .bool }), - .last_sync_timestamp_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .last_successful_hash = fd(4, .{ .scalar = .string }), - .policy_statuses = fd(5, .{ .repeated = .submessage }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; - -/// SyncResponse contains policy updates for the client. -pub const SyncResponse = struct { - policies: std.ArrayListUnmanaged(Policy) = .empty, - hash: []const u8 = &.{}, - sync_timestamp_unix_nano: u64 = 0, - recommended_sync_interval_seconds: u32 = 0, - sync_type: SyncType = @enumFromInt(0), - error_message: []const u8 = &.{}, - - pub const _desc_table = .{ - .policies = fd(1, .{ .repeated = .submessage }), - .hash = fd(2, .{ .scalar = .string }), - .sync_timestamp_unix_nano = fd(3, .{ .scalar = .fixed64 }), - .recommended_sync_interval_seconds = fd(4, .{ .scalar = .uint32 }), - .sync_type = fd(5, .@"enum"), - .error_message = fd(6, .{ .scalar = .string }), - }; - - /// Encodes the message to the writer - /// The allocator is used to generate submessages internally. - /// Hence, an ArenaAllocator is a preferred choice if allocations are a bottleneck. - pub fn encode( - self: @This(), - writer: *std.Io.Writer, - allocator: std.mem.Allocator, - ) (std.Io.Writer.Error || std.mem.Allocator.Error)!void { - return protobuf.encode(writer, allocator, self); - } - - /// Decodes the message from the bytes read from the reader. - pub fn decode( - reader: *std.Io.Reader, - allocator: std.mem.Allocator, - ) (protobuf.DecodingError || std.Io.Reader.Error || std.mem.Allocator.Error)!@This() { - return protobuf.decode(@This(), reader, allocator); - } - - /// Deinitializes and frees the memory associated with the message. - pub fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - return protobuf.deinit(allocator, self); - } - - /// Duplicates the message. - pub fn dupe(self: @This(), allocator: std.mem.Allocator) std.mem.Allocator.Error!@This() { - return protobuf.dupe(@This(), self, allocator); - } - - /// Decodes the message from the JSON string. - pub fn jsonDecode( - input: []const u8, - options: std.json.ParseOptions, - allocator: std.mem.Allocator, - ) !std.json.Parsed(@This()) { - return protobuf.json.decode(@This(), input, options, allocator); - } - - /// Encodes the message to a JSON string. - pub fn jsonEncode( - self: @This(), - options: std.json.Stringify.Options, - allocator: std.mem.Allocator, - ) ![]const u8 { - return protobuf.json.encode(self, options, allocator); - } - - /// This method is used by std.json - /// internally for deserialization. DO NOT RENAME! - pub fn jsonParse( - allocator: std.mem.Allocator, - source: anytype, - options: std.json.ParseOptions, - ) !@This() { - return protobuf.json.parse(@This(), allocator, source, options); - } - - /// This method is used by std.json - /// internally for serialization. DO NOT RENAME! - pub fn jsonStringify(self: *const @This(), jws: anytype) !void { - return protobuf.json.stringify(@This(), self, jws); - } -}; diff --git a/src/proxy/server.zig b/src/proxy/server.zig index 3c85d6f..ec4fcdc 100644 --- a/src/proxy/server.zig +++ b/src/proxy/server.zig @@ -4,7 +4,7 @@ const compress = @import("compress.zig"); const proxy_module = @import("../modules/proxy_module.zig"); const router_mod = @import("router.zig"); const upstream_client = @import("upstream_client.zig"); -const o11y = @import("../observability/root.zig"); +const o11y = @import("o11y"); const EventBus = o11y.EventBus; const ModuleId = proxy_module.ModuleId; diff --git a/src/root.zig b/src/root.zig index 46b5c76..346f8d3 100644 --- a/src/root.zig +++ b/src/root.zig @@ -1,12 +1,13 @@ //! By convention, root.zig is the root source file when making a library. const std = @import("std"); +const policy_zig = @import("policy_zig"); // ============================================================================= // Public module exports for distributions // ============================================================================= -// Policy package (centralized policy management) -pub const policy = @import("policy/root.zig"); +// Policy package (from policy-zig external dependency) +pub const policy = policy_zig; // Config modules (non-policy configuration) pub const config_types = @import("config/types.zig"); @@ -25,10 +26,6 @@ pub const prometheus_module = @import("modules/prometheus_module.zig"); // Prometheus module pub const prometheus = @import("prometheus/root.zig"); -// Hyperscan/Vectorscan bindings -pub const hyperscan = @import("hyperscan/hyperscan.zig"); -pub const matcher_index = @import("policy/matcher_index.zig"); - // ============================================================================= // Distribution entry points // ============================================================================= @@ -49,16 +46,13 @@ pub const lambda = @import("lambda/root.zig"); pub const zonfig = @import("zonfig/root.zig"); pub fn bufferedPrint() !void { - // Stdout is for the actual output of your application, for example if you - // are implementing gzip, then only the compressed bytes should be sent to - // stdout, not any debugging messages. var stdout_buffer: [1024]u8 = undefined; var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; try stdout.print("Run `zig build test` to run the tests.\n", .{}); - try stdout.flush(); // Don't forget to flush! + try stdout.flush(); } pub fn add(a: i32, b: i32) i32 { @@ -70,25 +64,18 @@ test "basic add functionality" { } // Import modules to include their tests -// Note: jsonpath.zig is tested separately via jsonpath_tests in build.zig -// because it requires C++ linkage that would conflict with the exe build test { _ = @import("config/types.zig"); - _ = @import("policy/root.zig"); - _ = @import("policy/policy_engine.zig"); - _ = @import("policy/matcher_index.zig"); _ = @import("modules/proxy_module.zig"); _ = @import("proxy/compress.zig"); _ = @import("proxy/router.zig"); _ = @import("proxy/upstream_client.zig"); - // Note: proxy/server.zig requires httpz which is only available in exe build _ = @import("modules/passthrough_module.zig"); _ = @import("modules/datadog_module.zig"); _ = @import("modules/datadog_logs_v2.zig"); _ = @import("modules/otlp_module.zig"); _ = @import("modules/otlp_logs.zig"); _ = @import("modules/health_module.zig"); - _ = @import("hyperscan/hyperscan.zig"); _ = @import("prometheus/root.zig"); _ = @import("modules/prometheus_module.zig"); _ = @import("lambda/root.zig"); From eec52c736752077cbe86742afa8dd7e73143c9a2 Mon Sep 17 00:00:00 2001 From: jaronoff97 Date: Thu, 12 Feb 2026 13:38:56 -0500 Subject: [PATCH 2/2] fix url --- build.zig.zon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.zig.zon b/build.zig.zon index 7573e10..c4401bf 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -41,7 +41,7 @@ .hash = "zimdjson-0.1.1-89pgxT5VBgBMxsGpAd90DI0e-DzHGKiRVlquliE5FivH", }, .policy_zig = .{ - .url = "git+https://github.com/usetero/policy-zig?ref=v0.1.3", + .url = "git+https://github.com/usetero/policy-zig?ref=v0.1.3#3367463db1fdb322d66e7a64aa5d8b00111dc521", .hash = "policy_zig-0.1.3-5_dp3l37FgCw1ZfsRdVHkOJONEbu01hNh1xJD5jl_TeR", }, },