diff --git a/Dockerfile b/Dockerfile index 9e0d1027f4..ca7b4be646 100644 --- a/Dockerfile +++ b/Dockerfile @@ -601,7 +601,7 @@ ARG OPENJDK_MAJOR_VERSION RUN set -x; apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get -y install git systemd \ autoconf libtool libcurl4-openssl-dev libltdl-dev libssl-dev libyajl-dev \ - build-essential cmake bison flex file systemd-dev \ + build-essential cmake bison flex file systemd-dev libsystemd-dev \ devscripts cdbs pkg-config zip COPY --from=openjdk-install /usr/local/java-${OPENJDK_MAJOR_VERSION}-openjdk/ /usr/local/java-${OPENJDK_MAJOR_VERSION}-openjdk ENV JAVA_HOME /usr/local/java-${OPENJDK_MAJOR_VERSION}-openjdk/ @@ -1221,6 +1221,106 @@ COPY --from=plucky-build /tmp/google-cloud-ops-agent.tgz /google-cloud-ops-agent COPY --from=plucky-build /google-cloud-ops-agent*.deb / COPY --from=plucky-build /google-cloud-ops-agent-plugin*.tar.gz / +# ====================================== +# Build Ops Agent for ubuntu-questing +# ====================================== + +FROM ubuntu:questing AS questing-build-base +ARG OPENJDK_MAJOR_VERSION + +RUN set -x; apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get -y install git systemd \ + autoconf libtool libcurl4-openssl-dev libltdl-dev libssl-dev libyajl-dev \ + build-essential cmake bison flex file systemd-dev debhelper libsystemd-dev tzdata \ + devscripts cdbs pkg-config openjdk-${OPENJDK_MAJOR_VERSION}-jdk zip + +SHELL ["/bin/bash", "-c"] + +# Install golang +ARG TARGETARCH +ARG GO_VERSION +ADD https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz /tmp/go${GO_VERSION}.tar.gz +RUN set -xe; \ + tar -xf /tmp/go${GO_VERSION}.tar.gz -C /usr/local +ENV PATH="${PATH}:/usr/local/go/bin" + + +FROM questing-build-base AS questing-build-otel +WORKDIR /work +# Download golang deps +COPY ./submodules/opentelemetry-operations-collector/go.mod ./submodules/opentelemetry-operations-collector/go.sum submodules/opentelemetry-operations-collector/ +RUN cd submodules/opentelemetry-operations-collector && go mod download + +COPY ./submodules/opentelemetry-java-contrib submodules/opentelemetry-java-contrib +# Install gradle. The first invocation of gradlew does this +RUN cd submodules/opentelemetry-java-contrib && ./gradlew --no-daemon -Djdk.lang.Process.launchMechanism=vfork tasks +COPY ./submodules/opentelemetry-operations-collector submodules/opentelemetry-operations-collector +COPY ./builds/otel.sh . +RUN \ + unset OTEL_TRACES_EXPORTER && \ + unset OTEL_EXPORTER_OTLP_TRACES_ENDPOINT && \ + unset OTEL_EXPORTER_OTLP_TRACES_PROTOCOL && \ + ./otel.sh /work/cache/ + +FROM questing-build-base AS questing-build-fluent-bit +WORKDIR /work +COPY ./submodules/fluent-bit submodules/fluent-bit +COPY ./builds/fluent_bit.sh . +RUN ./fluent_bit.sh /work/cache/ + + +FROM questing-build-base AS questing-build-systemd +WORKDIR /work +COPY ./systemd systemd +COPY ./builds/systemd.sh . +RUN ./systemd.sh /work/cache/ + + +FROM questing-build-base AS questing-build-golang-base +WORKDIR /work +COPY go.mod go.sum ./ +# Fetch dependencies +RUN go mod download +COPY confgenerator confgenerator +COPY apps apps +COPY internal internal + + +FROM questing-build-golang-base AS questing-build-wrapper +WORKDIR /work +COPY cmd/agent_wrapper cmd/agent_wrapper +COPY ./builds/agent_wrapper.sh . +RUN ./agent_wrapper.sh /work/cache/ + + +FROM questing-build-golang-base AS questing-build +WORKDIR /work +COPY . /work + +# Run the build script once to build the ops agent engine to a cache +RUN mkdir -p /tmp/cache_run/golang && cp -r . /tmp/cache_run/golang +WORKDIR /tmp/cache_run/golang +RUN ./pkg/deb/build.sh &> /dev/null || true +WORKDIR /work + +COPY ./confgenerator/default-config.yaml /work/cache/etc/google-cloud-ops-agent/config.yaml +COPY --from=questing-build-otel /work/cache /work/cache +COPY --from=questing-build-fluent-bit /work/cache /work/cache +COPY --from=questing-build-systemd /work/cache /work/cache +COPY --from=questing-build-wrapper /work/cache /work/cache +RUN ./pkg/deb/build.sh + +COPY cmd/ops_agent_uap_plugin cmd/ops_agent_uap_plugin +COPY ./builds/ops_agent_plugin.sh . +RUN ./ops_agent_plugin.sh /work/cache/ +RUN ./pkg/plugin/build.sh /work/cache questing + + +FROM scratch AS questing +COPY --from=questing-build /tmp/google-cloud-ops-agent.tgz /google-cloud-ops-agent-ubuntu-questing.tgz +COPY --from=questing-build /google-cloud-ops-agent*.deb / +COPY --from=questing-build /google-cloud-ops-agent-plugin*.tar.gz / + FROM scratch COPY --from=centos8 /* / COPY --from=rockylinux9 /* / @@ -1232,4 +1332,5 @@ COPY --from=sles12 /* / COPY --from=sles15 /* / COPY --from=jammy /* / COPY --from=noble /* / -COPY --from=plucky /* / \ No newline at end of file +COPY --from=plucky /* / +COPY --from=questing /* / \ No newline at end of file diff --git a/VERSION b/VERSION index 3162ed84db..17e89adaa4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -PKG_VERSION="2.61.0" +PKG_VERSION="2.62.0" diff --git a/apps/active_directory_ds.go b/apps/active_directory_ds.go index c1ebb4bd5b..26bd1a005f 100644 --- a/apps/active_directory_ds.go +++ b/apps/active_directory_ds.go @@ -32,8 +32,8 @@ func (r MetricsReceiverActiveDirectoryDS) Type() string { return "active_directory_ds" } -func (r MetricsReceiverActiveDirectoryDS) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { - return []otel.ReceiverPipeline{{ +func (r MetricsReceiverActiveDirectoryDS) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "active_directory_ds", Config: map[string]interface{}{ @@ -51,7 +51,7 @@ func (r MetricsReceiverActiveDirectoryDS) Pipelines(_ context.Context) ([]otel.R ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/aerospike.go b/apps/aerospike.go index 3a24a89dad..5e8f31f8a3 100644 --- a/apps/aerospike.go +++ b/apps/aerospike.go @@ -51,7 +51,7 @@ var ( ) // Pipelines is the OTEL pipelines created from MetricsReceiverAerospike -func (r MetricsReceiverAerospike) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverAerospike) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultAerospikeEndpoint } @@ -76,7 +76,7 @@ func (r MetricsReceiverAerospike) Pipelines(_ context.Context) ([]otel.ReceiverP endpoint = r.Endpoint } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "aerospike", Config: map[string]interface{}{ @@ -101,7 +101,7 @@ func (r MetricsReceiverAerospike) Pipelines(_ context.Context) ([]otel.ReceiverP ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/apache.go b/apps/apache.go index c0b2f904f6..4983576a8c 100644 --- a/apps/apache.go +++ b/apps/apache.go @@ -35,11 +35,12 @@ func (r MetricsReceiverApache) Type() string { return "apache" } -func (r MetricsReceiverApache) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverApache) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.ServerStatusURL == "" { r.ServerStatusURL = defaultServerStatusURL } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "apache", Config: map[string]interface{}{ @@ -64,7 +65,7 @@ func (r MetricsReceiverApache) Pipelines(_ context.Context) ([]otel.ReceiverPipe ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/couchbase.go b/apps/couchbase.go index 95a5c5a534..95f9721198 100644 --- a/apps/couchbase.go +++ b/apps/couchbase.go @@ -43,7 +43,7 @@ func (r MetricsReceiverCouchbase) Type() string { } // Pipelines will construct the prometheus receiver configuration -func (r MetricsReceiverCouchbase) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverCouchbase) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { targets := []string{r.Endpoint} if r.Endpoint == "" { targets = []string{defaultCouchbaseEndpoint} @@ -75,7 +75,8 @@ func (r MetricsReceiverCouchbase) Pipelines(_ context.Context) ([]otel.ReceiverP }, }, } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "prometheus", Config: config, @@ -169,7 +170,7 @@ func (r MetricsReceiverCouchbase) Pipelines(_ context.Context) ([]otel.ReceiverP ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } type couchbaseMetric struct { diff --git a/apps/couchdb.go b/apps/couchdb.go index cb3a786fa8..a7ba365983 100644 --- a/apps/couchdb.go +++ b/apps/couchdb.go @@ -39,11 +39,12 @@ func (MetricsReceiverCouchdb) Type() string { return "couchdb" } -func (r MetricsReceiverCouchdb) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverCouchdb) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultCouchdbEndpoint } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "couchdb", Config: map[string]interface{}{ @@ -64,7 +65,7 @@ func (r MetricsReceiverCouchdb) Pipelines(_ context.Context) ([]otel.ReceiverPip ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/dcgm.go b/apps/dcgm.go index 15a9b562ca..7fd426d8b4 100644 --- a/apps/dcgm.go +++ b/apps/dcgm.go @@ -36,7 +36,7 @@ func (r MetricsReceiverDcgm) Type() string { return "dcgm" } -func (r MetricsReceiverDcgm) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverDcgm) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultDcgmEndpoint } @@ -128,7 +128,7 @@ func (r MetricsReceiverDcgm) Pipelines(_ context.Context) ([]otel.ReceiverPipeli } } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "dcgm", Config: map[string]interface{}{ @@ -203,8 +203,9 @@ func (r MetricsReceiverDcgm) Pipelines(_ context.Context) ([]otel.ReceiverPipeli otel.SetScopeName("agent.googleapis.com/"+r.Type()), otel.SetScopeVersion("1.0"), ), - }}, - }}, nil + }, + }, + }, ctx)}, nil } func init() { diff --git a/apps/elasticsearch.go b/apps/elasticsearch.go index a3ea9fd89d..4b248ef4c6 100644 --- a/apps/elasticsearch.go +++ b/apps/elasticsearch.go @@ -44,7 +44,7 @@ func (r MetricsReceiverElasticsearch) Type() string { return "elasticsearch" } -func (r MetricsReceiverElasticsearch) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverElasticsearch) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultElasticsearchEndpoint } @@ -72,7 +72,7 @@ func (r MetricsReceiverElasticsearch) Pipelines(_ context.Context) ([]otel.Recei "metrics": metricsConfig, } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "elasticsearch", Config: cfg, @@ -94,7 +94,7 @@ func (r MetricsReceiverElasticsearch) Pipelines(_ context.Context) ([]otel.Recei ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func (r MetricsReceiverElasticsearch) skipJVMMetricsConfig(metricsConfig map[string]interface{}) { diff --git a/apps/flink.go b/apps/flink.go index edae9557b9..0121a7c147 100644 --- a/apps/flink.go +++ b/apps/flink.go @@ -33,11 +33,12 @@ func (MetricsReceiverFlink) Type() string { const defaultFlinkEndpoint = "http://localhost:8081" -func (r MetricsReceiverFlink) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverFlink) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultFlinkEndpoint } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "flinkmetrics", Config: map[string]interface{}{ @@ -66,7 +67,7 @@ func (r MetricsReceiverFlink) Pipelines(_ context.Context) ([]otel.ReceiverPipel ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/iis.go b/apps/iis.go index cd8e454dd3..c8f9107d8b 100644 --- a/apps/iis.go +++ b/apps/iis.go @@ -36,9 +36,9 @@ func (r MetricsReceiverIis) Type() string { return "iis" } -func (r MetricsReceiverIis) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverIis) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.ReceiverVersion == "2" { - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "iis", Config: map[string]interface{}{ @@ -72,11 +72,11 @@ func (r MetricsReceiverIis) Pipelines(_ context.Context) ([]otel.ReceiverPipelin ), otel.NormalizeSums(), }}, - }}, nil + }, ctx)}, nil } // Return version 1 if version is anything other than 2 - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "windowsperfcounters", Config: map[string]interface{}{ @@ -144,7 +144,7 @@ func (r MetricsReceiverIis) Pipelines(_ context.Context) ([]otel.ReceiverPipelin otel.SetScopeVersion("1.0"), ), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/memcached.go b/apps/memcached.go index fda7f45269..6c68db5174 100644 --- a/apps/memcached.go +++ b/apps/memcached.go @@ -35,12 +35,12 @@ func (r MetricsReceiverMemcached) Type() string { return "memcached" } -func (r MetricsReceiverMemcached) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverMemcached) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultMemcachedTCPEndpoint } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "memcached", Config: map[string]interface{}{ @@ -65,7 +65,7 @@ func (r MetricsReceiverMemcached) Pipelines(_ context.Context) ([]otel.ReceiverP ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/mongodb.go b/apps/mongodb.go index a0992b5b4a..04b70a0bc8 100644 --- a/apps/mongodb.go +++ b/apps/mongodb.go @@ -46,7 +46,7 @@ func (r MetricsReceiverMongoDB) Type() string { return "mongodb" } -func (r MetricsReceiverMongoDB) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverMongoDB) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { transport := "tcp" if r.Endpoint == "" { r.Endpoint = defaultMongodbEndpoint @@ -72,7 +72,7 @@ func (r MetricsReceiverMongoDB) Pipelines(_ context.Context) ([]otel.ReceiverPip config["tls"] = r.TLSConfig(false) } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: r.Type(), Config: config, @@ -88,7 +88,7 @@ func (r MetricsReceiverMongoDB) Pipelines(_ context.Context) ([]otel.ReceiverPip ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/mssql.go b/apps/mssql.go index 3d6cbe5aea..74e50eb52c 100644 --- a/apps/mssql.go +++ b/apps/mssql.go @@ -34,9 +34,9 @@ func (MetricsReceiverMssql) Type() string { return "mssql" } -func (m MetricsReceiverMssql) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (m MetricsReceiverMssql) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if m.ReceiverVersion == "2" { - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "sqlserver", Config: map[string]interface{}{ @@ -59,10 +59,10 @@ func (m MetricsReceiverMssql) Pipelines(_ context.Context) ([]otel.ReceiverPipel otel.MetricsRemoveServiceAttributes(), otel.NormalizeSums(), }}, - }}, nil + }, ctx)}, nil } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "windowsperfcounters", Config: map[string]interface{}{ @@ -108,7 +108,7 @@ func (m MetricsReceiverMssql) Pipelines(_ context.Context) ([]otel.ReceiverPipel otel.SetScopeVersion("1.0"), ), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/mysql.go b/apps/mysql.go index 86b378d3e3..676beec307 100644 --- a/apps/mysql.go +++ b/apps/mysql.go @@ -41,7 +41,7 @@ func (r MetricsReceiverMySql) Type() string { return "mysql" } -func (r MetricsReceiverMySql) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverMySql) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { transport := "tcp" if r.Endpoint == "" { transport = "unix" @@ -55,7 +55,7 @@ func (r MetricsReceiverMySql) Pipelines(_ context.Context) ([]otel.ReceiverPipel } return []otel.ReceiverPipeline{ - { + confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "mysql", Config: map[string]interface{}{ @@ -125,8 +125,7 @@ func (r MetricsReceiverMySql) Pipelines(_ context.Context) ([]otel.ReceiverPipel ), otel.MetricsRemoveServiceAttributes(), }}, - }, - }, nil + }, ctx)}, nil } func init() { diff --git a/apps/nginx.go b/apps/nginx.go index 3de0f8ff78..8c95204aac 100644 --- a/apps/nginx.go +++ b/apps/nginx.go @@ -35,11 +35,12 @@ func (r MetricsReceiverNginx) Type() string { return "nginx" } -func (r MetricsReceiverNginx) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverNginx) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.StubStatusURL == "" { r.StubStatusURL = defaultStubStatusURL } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "nginx", Config: map[string]interface{}{ @@ -57,8 +58,7 @@ func (r MetricsReceiverNginx) Pipelines(_ context.Context) ([]otel.ReceiverPipel otel.SetScopeVersion("1.0"), ), otel.MetricsRemoveServiceAttributes(), - }}, - }}, nil + }}}, ctx)}, nil } func init() { diff --git a/apps/oracledb.go b/apps/oracledb.go index 135972fc82..bcb1261048 100644 --- a/apps/oracledb.go +++ b/apps/oracledb.go @@ -23,7 +23,6 @@ import ( "strings" "github.com/GoogleCloudPlatform/ops-agent/confgenerator" - "github.com/GoogleCloudPlatform/ops-agent/confgenerator/fluentbit" "github.com/GoogleCloudPlatform/ops-agent/confgenerator/otel" "github.com/GoogleCloudPlatform/ops-agent/internal/secret" ) @@ -50,7 +49,7 @@ func (r MetricsReceiverOracleDB) Type() string { return "oracledb" } -func (r MetricsReceiverOracleDB) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverOracleDB) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { endpoint := r.Endpoint if r.Endpoint == "" { endpoint = defaultOracleDBEndpoint @@ -99,7 +98,8 @@ func (r MetricsReceiverOracleDB) Pipelines(_ context.Context) ([]otel.ReceiverPi "datasource": datasource, "queries": sqlReceiverQueriesConfig(oracleQueries), } - return []otel.ReceiverPipeline{{ + + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "sqlquery", Config: config, @@ -125,7 +125,7 @@ func (r MetricsReceiverOracleDB) Pipelines(_ context.Context) ([]otel.ReceiverPi ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } var oracleQueries = []sqlReceiverQuery{ @@ -769,64 +769,62 @@ func init() { confgenerator.MetricsReceiverTypes.RegisterType(func() confgenerator.MetricsReceiver { return &MetricsReceiverOracleDB{} }) } -type LoggingProcessorOracleDBAlert struct { - confgenerator.ConfigComponent `yaml:",inline"` +type LoggingProcessorMacroOracleDBAlert struct { } -func (lr LoggingProcessorOracleDBAlert) Type() string { +func (lr LoggingProcessorMacroOracleDBAlert) Type() string { return "oracledb_alert" } -func (lr LoggingProcessorOracleDBAlert) Components(ctx context.Context, tag string, uid string) []fluentbit.Component { - components := confgenerator.LoggingProcessorParseMultilineRegex{ - LoggingProcessorParseRegexComplex: confgenerator.LoggingProcessorParseRegexComplex{ - Parsers: []confgenerator.RegexParser{ - { - // Sample log: 2021-12-21T10:19:47.339827-05:00 - // Thread 1 opened at log sequence 1 - // Current log# 1 seq# 1 mem# 0: /u01/oracle/oradata/DB19C/redo01.log - // Successful open of redo thread 1 - Regex: `^(?\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z))\n(?[\s\S]+)`, - Parser: confgenerator.ParserShared{ - TimeKey: "timestamp", - TimeFormat: "%Y-%m-%dT%H:%M:%S.%L%z", +func (lr LoggingProcessorMacroOracleDBAlert) Expand(ctx context.Context) []confgenerator.InternalLoggingProcessor { + severityVal := "ALERT" + return []confgenerator.InternalLoggingProcessor{ + confgenerator.LoggingProcessorParseMultilineRegex{ + LoggingProcessorParseRegexComplex: confgenerator.LoggingProcessorParseRegexComplex{ + Parsers: []confgenerator.RegexParser{ + { + // Sample log: 2021-12-21T10:19:47.339827-05:00 + // Thread 1 opened at log sequence 1 + // Current log# 1 seq# 1 mem# 0: /u01/oracle/oradata/DB19C/redo01.log + // Successful open of redo thread 1 + Regex: `^(?\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z))\n(?[\s\S]+)`, + Parser: confgenerator.ParserShared{ + TimeKey: "timestamp", + TimeFormat: "%Y-%m-%dT%H:%M:%S.%L%z", + }, }, }, }, - }, - Rules: []confgenerator.MultilineRule{ - { - StateName: "start_state", - NextState: "cont", - Regex: `^\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z)`, - }, - { - StateName: "cont", - NextState: "cont", - Regex: `^(?!\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z)).*$`, + Rules: []confgenerator.MultilineRule{ + { + StateName: "start_state", + NextState: "cont", + Regex: `^\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z)`, + }, + { + StateName: "cont", + NextState: "cont", + Regex: `^(?!\d+-\d+-\d+T\d+:\d+:\d+.\d+(?:[-+]\d+:\d+|Z)).*$`, + }, }, }, - }.Components(ctx, tag, uid) - - severityVal := "ALERT" - components = append(components, confgenerator.LoggingProcessorModifyFields{ Fields: map[string]*confgenerator.ModifyField{ "severity": {StaticValue: &severityVal}, InstrumentationSourceLabel: instrumentationSourceValue(lr.Type()), }, - }.Components(ctx, tag, uid)...) - return components + }, + } } -type LoggingReceiverOracleDBAlert struct { - LoggingProcessorOracleDBAlert `yaml:",inline"` - ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline" validate:"structonly"` - OracleHome string `yaml:"oracle_home,omitempty" validate:"required_without=IncludePaths,excluded_with=IncludePaths"` - IncludePaths []string `yaml:"include_paths,omitempty" validate:"required_without=OracleHome,excluded_with=OracleHome"` +type LoggingReceiverMacroOracleDBAlert struct { + LoggingProcessorMacroOracleDBAlert `yaml:",inline"` + ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline" validate:"structonly"` + OracleHome string `yaml:"oracle_home,omitempty" validate:"required_without=IncludePaths,excluded_with=IncludePaths"` + IncludePaths []string `yaml:"include_paths,omitempty" validate:"required_without=OracleHome,excluded_with=OracleHome"` } -func (lr LoggingReceiverOracleDBAlert) Components(ctx context.Context, tag string) []fluentbit.Component { +func (lr LoggingReceiverMacroOracleDBAlert) Expand(ctx context.Context) (confgenerator.InternalLoggingReceiver, []confgenerator.InternalLoggingProcessor) { if len(lr.OracleHome) > 0 { lr.IncludePaths = []string{ path.Join(lr.OracleHome, "/diag/rdbms/*/*/trace/alert_*.log"), @@ -835,106 +833,101 @@ func (lr LoggingReceiverOracleDBAlert) Components(ctx context.Context, tag strin lr.ReceiverMixin.IncludePaths = lr.IncludePaths - c := lr.ReceiverMixin.Components(ctx, tag) - c = append(c, lr.LoggingProcessorOracleDBAlert.Components(ctx, tag, lr.Type())...) - return c + return &lr.ReceiverMixin, lr.LoggingProcessorMacroOracleDBAlert.Expand(ctx) } -type LoggingProcessorOracleDBAudit struct { - confgenerator.ConfigComponent `yaml:",inline"` +type LoggingProcessorMacroOracleDBAudit struct { } -func (lr LoggingProcessorOracleDBAudit) Type() string { +func (lr LoggingProcessorMacroOracleDBAudit) Type() string { return "oracledb_audit" } -func (lr LoggingProcessorOracleDBAudit) Components(ctx context.Context, tag string, uid string) []fluentbit.Component { - components := confgenerator.LoggingProcessorParseMultilineRegex{ - LoggingProcessorParseRegexComplex: confgenerator.LoggingProcessorParseRegexComplex{ - Parsers: []confgenerator.RegexParser{ - { - // Sample log: Wed Sep 14 16:18:03 2022 +00:00 - // LENGTH : '623' - // ACTION :[373] 'select distinct 'ALTER SYSTEM KILL SESSION ''' || stat.sid || ',' || - // sess.serial# || - // decode(substr(inst.version, 1, 4), - // '12.1', ''' immediate ', ''' force timeout 0 ') || - // '-- process 73841' - // FROM SYS.V$mystat stat, v$session sess, v$instance inst - // where stat.sid=sess.sid - // union all - // select '/' from dual' - // DATABASE USER:[1] '/' - // PRIVILEGE :[6] 'SYSDBA' - // CLIENT USER:[6] 'oracle' - // CLIENT TERMINAL:[5] 'pts/1' - // STATUS:[1] '0' - // DBID:[10] '1643176521' - // SESSIONID:[10] '4294967295' - // USERHOST:[7] 'oradb19' - // CLIENT ADDRESS:[0] '' - // ACTION NUMBER:[1] '3' - Regex: `^(?\w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\d+\s+(?:[-+]\d+:\d+|Z))\n` + - `LENGTH\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `ACTION\s*:(?:\[\d*\])?\s*'(?[\s\S]*)'\n` + - `DATABASE USER\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `PRIVILEGE\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `CLIENT USER\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `CLIENT TERMINAL\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `STATUS\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `DBID\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `SESSIONID\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `USERHOST\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `CLIENT ADDRESS\s*:(?:\[\d*\])?\s*'(?.*)'\n` + - `ACTION NUMBER\s*:(?:\[\d*\])?\s*'(?.*)'\n?`, - Parser: confgenerator.ParserShared{ - TimeKey: "timestamp", - TimeFormat: "%a %b %d %H:%M:%S %Y %z", - Types: map[string]string{ - "length": "int", - "action_number": "int", - "dbid": "int", - "sessionid": "int", - "status": "int", +func (lr LoggingProcessorMacroOracleDBAudit) Expand(ctx context.Context) []confgenerator.InternalLoggingProcessor { + severityVal := "INFO" + return []confgenerator.InternalLoggingProcessor{ + confgenerator.LoggingProcessorParseMultilineRegex{ + LoggingProcessorParseRegexComplex: confgenerator.LoggingProcessorParseRegexComplex{ + Parsers: []confgenerator.RegexParser{ + { + // Sample log: Wed Sep 14 16:18:03 2022 +00:00 + // LENGTH : '623' + // ACTION :[373] 'select distinct 'ALTER SYSTEM KILL SESSION ''' || stat.sid || ',' || + // sess.serial# || + // decode(substr(inst.version, 1, 4), + // '12.1', ''' immediate ', ''' force timeout 0 ') || + // '-- process 73841' + // FROM SYS.V$mystat stat, v$session sess, v$instance inst + // where stat.sid=sess.sid + // union all + // select '/' from dual' + // DATABASE USER:[1] '/' + // PRIVILEGE :[6] 'SYSDBA' + // CLIENT USER:[6] 'oracle' + // CLIENT TERMINAL:[5] 'pts/1' + // STATUS:[1] '0' + // DBID:[10] '1643176521' + // SESSIONID:[10] '4294967295' + // USERHOST:[7] 'oradb19' + // CLIENT ADDRESS:[0] '' + // ACTION NUMBER:[1] '3' + Regex: `^(?\w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\d+\s+(?:[-+]\d+:\d+|Z))\n` + + `LENGTH\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `ACTION\s*:(?:\[\d*\])?\s*'(?[\s\S]*)'\n` + + `DATABASE USER\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `PRIVILEGE\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `CLIENT USER\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `CLIENT TERMINAL\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `STATUS\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `DBID\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `SESSIONID\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `USERHOST\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `CLIENT ADDRESS\s*:(?:\[\d*\])?\s*'(?.*)'\n` + + `ACTION NUMBER\s*:(?:\[\d*\])?\s*'(?.*)'\n?`, + Parser: confgenerator.ParserShared{ + TimeKey: "timestamp", + TimeFormat: "%a %b %d %H:%M:%S %Y %z", + Types: map[string]string{ + "length": "int", + "action_number": "int", + "dbid": "int", + "sessionid": "int", + "status": "int", + }, }, }, }, }, - }, - Rules: []confgenerator.MultilineRule{ - { - StateName: "start_state", - NextState: "cont", - Regex: `^\w+ \w+ {1,2}\d+ {1,2}\d+:\d+:\d+ \d+ (?:[-+]\d+:\d+|Z)`, - }, - { - StateName: "cont", - NextState: "cont", - Regex: `^(?!\w+ \w+ {1,2}\d+ {1,2}\d+:\d+:\d+ \d+ (?:[-+]\d+:\d+|Z)).*$`, + Rules: []confgenerator.MultilineRule{ + { + StateName: "start_state", + NextState: "cont", + Regex: `^\w+ \w+ {1,2}\d+ {1,2}\d+:\d+:\d+ \d+ (?:[-+]\d+:\d+|Z)`, + }, + { + StateName: "cont", + NextState: "cont", + Regex: `^(?!\w+ \w+ {1,2}\d+ {1,2}\d+:\d+:\d+ \d+ (?:[-+]\d+:\d+|Z)).*$`, + }, }, }, - }.Components(ctx, tag, uid) - - severityVal := "INFO" - - components = append(components, confgenerator.LoggingProcessorModifyFields{ Fields: map[string]*confgenerator.ModifyField{ "severity": {StaticValue: &severityVal}, InstrumentationSourceLabel: instrumentationSourceValue(lr.Type()), }, - }.Components(ctx, tag, uid)...) - return components + }, + } } -type LoggingReceiverOracleDBAudit struct { - LoggingProcessorOracleDBAudit `yaml:",inline"` - ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline" validate:"structonly"` - OracleHome string `yaml:"oracle_home,omitempty" validate:"required_without=IncludePaths,excluded_with=IncludePaths"` - IncludePaths []string `yaml:"include_paths,omitempty" validate:"required_without=OracleHome,excluded_with=OracleHome"` +type LoggingReceiverMacroOracleDBAudit struct { + LoggingProcessorMacroOracleDBAudit `yaml:",inline"` + ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline" validate:"structonly"` + OracleHome string `yaml:"oracle_home,omitempty" validate:"required_without=IncludePaths,excluded_with=IncludePaths"` + IncludePaths []string `yaml:"include_paths,omitempty" validate:"required_without=OracleHome,excluded_with=OracleHome"` } -func (lr LoggingReceiverOracleDBAudit) Components(ctx context.Context, tag string) []fluentbit.Component { +func (lr LoggingReceiverMacroOracleDBAudit) Expand(ctx context.Context) (confgenerator.InternalLoggingReceiver, []confgenerator.InternalLoggingProcessor) { if len(lr.OracleHome) > 0 { lr.IncludePaths = []string{ path.Join(lr.OracleHome, "/admin/*/adump/*.aud"), @@ -943,14 +936,17 @@ func (lr LoggingReceiverOracleDBAudit) Components(ctx context.Context, tag strin lr.ReceiverMixin.IncludePaths = lr.IncludePaths - c := lr.ReceiverMixin.Components(ctx, tag) - c = append(c, lr.LoggingProcessorOracleDBAudit.Components(ctx, tag, lr.Type())...) - return c + return &lr.ReceiverMixin, lr.LoggingProcessorMacroOracleDBAudit.Expand(ctx) } func init() { - confgenerator.LoggingReceiverTypes.RegisterType(func() confgenerator.LoggingReceiver { return &LoggingReceiverOracleDBAlert{} }) - confgenerator.LoggingProcessorTypes.RegisterType(func() confgenerator.LoggingProcessor { return &LoggingProcessorOracleDBAlert{} }) - confgenerator.LoggingReceiverTypes.RegisterType(func() confgenerator.LoggingReceiver { return &LoggingReceiverOracleDBAudit{} }) - confgenerator.LoggingProcessorTypes.RegisterType(func() confgenerator.LoggingProcessor { return &LoggingProcessorOracleDBAudit{} }) + confgenerator.RegisterLoggingProcessorMacro[LoggingProcessorMacroOracleDBAlert]() + confgenerator.RegisterLoggingReceiverMacro(func() LoggingReceiverMacroOracleDBAlert { + return LoggingReceiverMacroOracleDBAlert{} + }) + confgenerator.RegisterLoggingProcessorMacro[LoggingProcessorMacroOracleDBAudit]() + confgenerator.RegisterLoggingReceiverMacro(func() LoggingReceiverMacroOracleDBAudit { + return LoggingReceiverMacroOracleDBAudit{} + }) + } diff --git a/apps/postgresql.go b/apps/postgresql.go index 9b96d899d2..febc9c65a4 100644 --- a/apps/postgresql.go +++ b/apps/postgresql.go @@ -44,7 +44,7 @@ func (r MetricsReceiverPostgresql) Type() string { return "postgresql" } -func (r MetricsReceiverPostgresql) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverPostgresql) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { transport := "tcp" if r.Endpoint == "" { transport = "unix" @@ -75,7 +75,7 @@ func (r MetricsReceiverPostgresql) Pipelines(_ context.Context) ([]otel.Receiver cfg["tls"] = r.TLSConfig(true) } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "postgresql", Config: cfg, @@ -102,7 +102,7 @@ func (r MetricsReceiverPostgresql) Pipelines(_ context.Context) ([]otel.Receiver ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/rabbitmq.go b/apps/rabbitmq.go index b04a77881d..f937b531c0 100644 --- a/apps/rabbitmq.go +++ b/apps/rabbitmq.go @@ -123,7 +123,7 @@ func (r MetricsReceiverRabbitmq) Type() string { return "rabbitmq" } -func (r MetricsReceiverRabbitmq) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverRabbitmq) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultRabbitmqTCPEndpoint } @@ -136,7 +136,7 @@ func (r MetricsReceiverRabbitmq) Pipelines(_ context.Context) ([]otel.ReceiverPi "tls": r.TLSConfig(true), } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "rabbitmq", Config: cfg, @@ -155,7 +155,7 @@ func (r MetricsReceiverRabbitmq) Pipelines(_ context.Context) ([]otel.ReceiverPi ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/redis.go b/apps/redis.go index 720da4bcd5..1205d61fc7 100644 --- a/apps/redis.go +++ b/apps/redis.go @@ -39,7 +39,7 @@ func (r MetricsReceiverRedis) Type() string { return "redis" } -func (r MetricsReceiverRedis) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverRedis) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Address == "" { r.Address = defaultRedisEndpoint } @@ -51,7 +51,7 @@ func (r MetricsReceiverRedis) Pipelines(_ context.Context) ([]otel.ReceiverPipel transport = "tcp" } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "redis", Config: map[string]interface{}{ @@ -79,7 +79,7 @@ func (r MetricsReceiverRedis) Pipelines(_ context.Context) ([]otel.ReceiverPipel ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/saphana.go b/apps/saphana.go index bf5515e8e1..373eae1943 100644 --- a/apps/saphana.go +++ b/apps/saphana.go @@ -137,12 +137,12 @@ func (s MetricsReceiverSapHana) Type() string { return "saphana" } -func (s MetricsReceiverSapHana) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (s MetricsReceiverSapHana) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if s.Endpoint == "" { s.Endpoint = defaultSapHanaEndpoint } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "saphana", Config: map[string]interface{}{ @@ -170,7 +170,7 @@ func (s MetricsReceiverSapHana) Pipelines(_ context.Context) ([]otel.ReceiverPip ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/varnish.go b/apps/varnish.go index 3c92e92ba0..fd5c562e43 100644 --- a/apps/varnish.go +++ b/apps/varnish.go @@ -32,8 +32,8 @@ func (MetricsReceiverVarnish) Type() string { return "varnish" } -func (r MetricsReceiverVarnish) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { - return []otel.ReceiverPipeline{{ +func (r MetricsReceiverVarnish) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "varnish", Config: map[string]interface{}{ @@ -53,7 +53,7 @@ func (r MetricsReceiverVarnish) Pipelines(_ context.Context) ([]otel.ReceiverPip ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } func init() { diff --git a/apps/vault.go b/apps/vault.go index 60ce399863..5c31ed3333 100644 --- a/apps/vault.go +++ b/apps/vault.go @@ -19,7 +19,6 @@ import ( "fmt" "github.com/GoogleCloudPlatform/ops-agent/confgenerator" - "github.com/GoogleCloudPlatform/ops-agent/confgenerator/fluentbit" "github.com/GoogleCloudPlatform/ops-agent/confgenerator/otel" "github.com/GoogleCloudPlatform/ops-agent/internal/secret" ) @@ -75,7 +74,7 @@ func (r MetricsReceiverVault) Type() string { return "vault" } -func (r MetricsReceiverVault) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverVault) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultVaultEndpoint } @@ -126,7 +125,7 @@ func (r MetricsReceiverVault) Pipelines(_ context.Context) ([]otel.ReceiverPipel queries = append(queries, metricRenewRevokeTransforms...) queries = append(queries, metricDetailTransforms...) - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "prometheus", Config: map[string]interface{}{ @@ -180,7 +179,7 @@ func (r MetricsReceiverVault) Pipelines(_ context.Context) ([]otel.ReceiverPipel ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } type metricTransformer struct { @@ -313,63 +312,56 @@ func init() { confgenerator.MetricsReceiverTypes.RegisterType(func() confgenerator.MetricsReceiver { return &MetricsReceiverVault{} }) } -type LoggingProcessorVaultJson struct { - confgenerator.ConfigComponent `yaml:",inline"` -} +type LoggingProcessorMacroVaultJson struct{} -func (LoggingProcessorVaultJson) Type() string { +func (LoggingProcessorMacroVaultJson) Type() string { return "vault_audit" } -func (p LoggingProcessorVaultJson) Components(ctx context.Context, tag, uid string) []fluentbit.Component { - c := []fluentbit.Component{} - - // sample log line: - // {"time":"2022-06-07T20:34:34.392078404Z","type":"request","auth":{"token_type":"default"},"request":{"id":"aa005196-0280-381d-ebeb-1a083bdf5675","operation":"update","namespace":{"id":"root"},"path":"sys/audit/test"}} - jsonParser := &confgenerator.LoggingProcessorParseJson{ - ParserShared: confgenerator.ParserShared{ - TimeKey: "time", - TimeFormat: "%Y-%m-%dT%H:%M:%S.%L%z", +func (p LoggingProcessorMacroVaultJson) Expand(ctx context.Context) []confgenerator.InternalLoggingProcessor { + return []confgenerator.InternalLoggingProcessor{ + confgenerator.LoggingProcessorParseMultilineRegex{ + Rules: []confgenerator.MultilineRule{ + { + StateName: "start_state", + NextState: "cont", + Regex: `^{.*`, + }, + { + StateName: "cont", + NextState: "cont", + Regex: `^(?!{.*)`, + }, + }, }, - } - - c = append(c, confgenerator.LoggingProcessorModifyFields{ Fields: map[string]*confgenerator.ModifyField{ InstrumentationSourceLabel: instrumentationSourceValue(p.Type()), }, - }.Components(ctx, tag, uid)..., - ) - c = append(c, jsonParser.Components(ctx, tag, uid)...) - return c + }, + // sample log line: + // {"time":"2022-06-07T20:34:34.392078404Z","type":"request","auth":{"token_type":"default"},"request":{"id":"aa005196-0280-381d-ebeb-1a083bdf5675","operation":"update","namespace":{"id":"root"},"path":"sys/audit/test"}} + confgenerator.LoggingProcessorParseJson{ + ParserShared: confgenerator.ParserShared{ + TimeKey: "time", + TimeFormat: "%Y-%m-%dT%H:%M:%S.%L%z", + }, + }, + } } -type LoggingReceiverVaultAuditJson struct { - LoggingProcessorVaultJson `yaml:",inline"` - ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline"` - IncludePaths []string `yaml:"include_paths,omitempty" validate:"required"` +type LoggingReceiverMacroVaultAuditJson struct { + LoggingProcessorMacroVaultJson `yaml:",inline"` + ReceiverMixin confgenerator.LoggingReceiverFilesMixin `yaml:",inline"` + IncludePaths []string `yaml:"include_paths,omitempty" validate:"required"` } -func (r LoggingReceiverVaultAuditJson) Components(ctx context.Context, tag string) []fluentbit.Component { +func (r LoggingReceiverMacroVaultAuditJson) Expand(ctx context.Context) (confgenerator.InternalLoggingReceiver, []confgenerator.InternalLoggingProcessor) { r.ReceiverMixin.IncludePaths = r.IncludePaths - - r.ReceiverMixin.MultilineRules = []confgenerator.MultilineRule{ - { - StateName: "start_state", - NextState: "cont", - Regex: `^{.*`, - }, - { - StateName: "cont", - NextState: "cont", - Regex: `^(?!{.*)`, - }, - } - - c := r.ReceiverMixin.Components(ctx, tag) - return append(c, r.LoggingProcessorVaultJson.Components(ctx, tag, r.LoggingProcessorVaultJson.Type())...) + return &r.ReceiverMixin, r.LoggingProcessorMacroVaultJson.Expand(ctx) } func init() { - confgenerator.LoggingReceiverTypes.RegisterType(func() confgenerator.LoggingReceiver { return &LoggingReceiverVaultAuditJson{} }) + confgenerator.RegisterLoggingReceiverMacro(func() LoggingReceiverMacroVaultAuditJson { return LoggingReceiverMacroVaultAuditJson{} }) + confgenerator.RegisterLoggingProcessorMacro[LoggingProcessorMacroVaultJson]() } diff --git a/apps/zookeeper.go b/apps/zookeeper.go index 38a4ee682e..8f8f9d9412 100644 --- a/apps/zookeeper.go +++ b/apps/zookeeper.go @@ -39,12 +39,12 @@ func (MetricsReceiverZookeeper) Type() string { return "zookeeper" } -func (r MetricsReceiverZookeeper) Pipelines(_ context.Context) ([]otel.ReceiverPipeline, error) { +func (r MetricsReceiverZookeeper) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { if r.Endpoint == "" { r.Endpoint = defaultZookeeperEndpoint } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{confgenerator.ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "zookeeper", Config: map[string]interface{}{ @@ -63,7 +63,7 @@ func (r MetricsReceiverZookeeper) Pipelines(_ context.Context) ([]otel.ReceiverP ), otel.MetricsRemoveServiceAttributes(), }}, - }}, nil + }, ctx)}, nil } type LoggingProcessorMacroZookeeperGeneral struct{} diff --git a/builds/fluent_bit.sh b/builds/fluent_bit.sh index 09970980a8..56263357cc 100755 --- a/builds/fluent_bit.sh +++ b/builds/fluent_bit.sh @@ -27,6 +27,7 @@ cmake .. -DCMAKE_INSTALL_PREFIX=$fluent_bit_dir \ -DFLB_HTTP_SERVER=ON -DFLB_DEBUG=OFF -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DWITHOUT_HEADERS=ON -DFLB_SHARED_LIB=OFF -DFLB_STREAM_PROCESSOR=OFF \ -DFLB_KAFKA=OFF \ + -DFLB_WASM=OFF \ -DFLB_CONFIG_YAML=OFF \ -DFLB_IN_SYSTEMD=On \ -DFLB_MSGPACK_TO_JSON_INIT_BUFFER_SIZE=1.5 -DFLB_MSGPACK_TO_JSON_REALLOC_BUFFER_SIZE=.10 \ diff --git a/cloudbuild/new-distro-detector/run.sh b/cloudbuild/new-distro-detector/run.sh index 423f3b24a3..407c793296 100644 --- a/cloudbuild/new-distro-detector/run.sh +++ b/cloudbuild/new-distro-detector/run.sh @@ -23,7 +23,6 @@ set -o pipefail PATTERNS_FILE="irrelevant_family_patterns.txt" echo '^cos- ^fedora- -^rhel- ^rocky-linux-[0-9]+-optimized-gcp ^sql- ^ubuntu-pro- diff --git a/cmd/ops_agent_uap_plugin/plugin.go b/cmd/ops_agent_uap_plugin/plugin.go index 387b54c2e3..39a241372f 100644 --- a/cmd/ops_agent_uap_plugin/plugin.go +++ b/cmd/ops_agent_uap_plugin/plugin.go @@ -16,8 +16,10 @@ package main import ( "context" + "errors" "flag" "fmt" + "log" "net" "os" "os/exec" @@ -45,18 +47,92 @@ var ( // implementations. type RunCommandFunc func(cmd *exec.Cmd) (string, error) +// RunSubAgentCommandFunc defines a function type that starts a subagent. If one subagent execution exited, other sugagents are also terminated via context cancellation. This abstraction is introduced +// primarily to facilitate testing by allowing the injection of mock +// implementations. +type RunSubAgentCommandFunc func(ctx context.Context, cancel CancelContextAndSetPluginErrorFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) + +// CancelContextAndSetPluginErrorFunc defines a function type that terminates the Ops Agent from running and records the latest error that occurred. +// This abstraction is introduced primarily to facilitate testing by allowing the injection of mock implementations. +type CancelContextAndSetPluginErrorFunc func(err *OpsAgentPluginError) + +type OpsAgentPluginError struct { + Message string + ShouldRestart bool +} + // PluginServer implements the plugin RPC server interface. type OpsAgentPluginServer struct { pb.UnimplementedGuestAgentPluginServer server *grpc.Server - // mu protects the cancel field. - mu sync.Mutex - cancel context.CancelFunc + // mu protects the cancel and the pluginError field. + mu sync.Mutex + cancel context.CancelFunc + pluginError *OpsAgentPluginError runCommand RunCommandFunc } +// Stop is the stop hook and implements any cleanup if required. +// Stop maybe called if plugin revision is being changed. +// For e.g. if plugins want to stop some task it was performing or remove some +// state before exiting it can be done on this request. +func (ps *OpsAgentPluginServer) Stop(ctx context.Context, msg *pb.StopRequest) (*pb.StopResponse, error) { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.pluginError = nil + if ps.cancel == nil { + log.Printf("The Ops Agent plugin is stopped already, skipping the current request") + return &pb.StopResponse{}, nil + } + log.Printf("Received a Stop request: %s. Stopping the Ops Agent", msg) + ps.cancel() + ps.cancel = nil + return &pb.StopResponse{}, nil +} + +// GetStatus is the health check agent would perform to make sure plugin process +// is alive. If request fails process is considered dead and relaunched. Plugins +// can share any additional information to report it to the service. For e.g. if +// plugins detect some non-fatal errors causing it unable to offer some features +// it can reported in status which is sent back to the service by agent. +func (ps *OpsAgentPluginServer) GetStatus(ctx context.Context, msg *pb.GetStatusRequest) (*pb.Status, error) { + log.Println("Received a GetStatus request") + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.cancel != nil { + log.Println("The Ops Agent plugin is running") + if ps.pluginError != nil { + return &pb.Status{Code: 0, Results: []string{"The Ops Agent Plugin is running ok, but has error: %s", ps.pluginError.Message}}, nil + } + return &pb.Status{Code: 0, Results: []string{"The Ops Agent Plugin is running ok."}}, nil + } + + if ps.pluginError != nil { + log.Printf("The Ops Agent plugin is not running, last error: %s", ps.pluginError.Message) + if ps.pluginError.ShouldRestart { + return nil, errors.New(ps.pluginError.Message) + } + return &pb.Status{Code: 1, Results: []string{fmt.Sprintf("The Ops Agent Plugin is not running: %s", ps.pluginError.Message)}}, nil + } + return &pb.Status{Code: 1, Results: []string{"The Ops Agent Plugin is not running."}}, nil +} + +// cancelAndSetPluginError terminates the current attempt of running the Ops Agent and records the latest error that occurred. +func (ps *OpsAgentPluginServer) cancelAndSetPluginError(e *OpsAgentPluginError) { + ps.mu.Lock() + defer ps.mu.Unlock() + if ps.cancel != nil { + ps.cancel() + ps.cancel = nil + } + if e != nil { + ps.pluginError = e + log.Print(e.Message) + } +} + func init() { flag.StringVar(&protocol, "protocol", "", "protocol to use uds/tcp") flag.StringVar(&address, "address", "", "address to start server listening on") @@ -98,6 +174,27 @@ func main() { } } +func runSubAgentCommand(ctx context.Context, cancelAndSetError CancelContextAndSetPluginErrorFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) { + defer wg.Done() + if cmd == nil { + return + } + + output, err := runCommand(cmd) + var pluginErr *OpsAgentPluginError + if err != nil { + // The command exits with errors might be due to context cancellation, e.g: Stop has been called. + if ctx.Err() != context.Canceled { + fullErr := fmt.Sprintf("command: %s exited with errors, not restarting.\nCommand output: %s\n Command error:%s", cmd.Args, string(output), err) + log.Print(fullErr) + pluginErr = &OpsAgentPluginError{Message: fullErr, ShouldRestart: true} + } + } else { + log.Printf("command: %s %s exited successfully.\nCommand output: %s", cmd.Path, cmd.Args, string(output)) + } + cancelAndSetError(pluginErr) +} + func writeCustomConfigToFile(req *pb.StartRequest, configPath string) error { customConfig := []byte{} switch req.GetServiceConfig().(type) { diff --git a/cmd/ops_agent_uap_plugin/plugin_test.go b/cmd/ops_agent_uap_plugin/plugin_test.go index 36d1bb3736..a6e0e038e8 100644 --- a/cmd/ops_agent_uap_plugin/plugin_test.go +++ b/cmd/ops_agent_uap_plugin/plugin_test.go @@ -19,6 +19,7 @@ import ( "os" "path/filepath" "testing" + "time" "buf.build/go/protoyaml" // Import the protoyaml-go package pb "github.com/GoogleCloudPlatform/google-guest-agent/pkg/proto/plugin_comm" @@ -35,6 +36,96 @@ func customLogPathByOsType(ctx context.Context) string { } return `C:\mylog` } + +func TestStop(t *testing.T) { + t.Parallel() + cases := []struct { + name string + cancel context.CancelFunc + opsAgentPluginError *OpsAgentPluginError + }{ + { + name: "Plugin already stopped", + cancel: nil, + }, + { + name: "Plugin running", + cancel: func() {}, // Non-nil function + opsAgentPluginError: &OpsAgentPluginError{Message: "error", ShouldRestart: false}, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ps := &OpsAgentPluginServer{cancel: tc.cancel, pluginError: tc.opsAgentPluginError} + _, err := ps.Stop(context.Background(), &pb.StopRequest{}) + if err != nil { + t.Errorf("got error from Stop(): %v, wanted nil", err) + } + + if ps.cancel != nil { + t.Error("got non-nil cancel function after calling Stop(), want nil") + } + if ps.pluginError != nil { + t.Error("got non-nil pluginError after calling Stop(), want nil") + } + }) + } +} + +func TestGetStatus(t *testing.T) { + t.Parallel() + cases := []struct { + name string + wantRPCError bool + + pluginServer *OpsAgentPluginServer + wantStatusCode int32 + }{ + { + name: "Plugin not running and has error that requires restart", + pluginServer: &OpsAgentPluginServer{cancel: nil, pluginError: &OpsAgentPluginError{Message: "error", ShouldRestart: true}}, + wantRPCError: true, + }, + { + name: "Plugin not running and has error that does not require restart", + pluginServer: &OpsAgentPluginServer{cancel: nil, pluginError: &OpsAgentPluginError{Message: "error", ShouldRestart: false}}, + wantStatusCode: 1, + }, + { + name: "Plugin not running and has no error", + wantStatusCode: 1, + pluginServer: &OpsAgentPluginServer{}, + }, + { + name: "Plugin running", + pluginServer: &OpsAgentPluginServer{cancel: func() {}, pluginError: nil}, + wantStatusCode: 0, + }, + { + name: "Plugin running and has error that does not require restart", + pluginServer: &OpsAgentPluginServer{cancel: func() {}, pluginError: &OpsAgentPluginError{Message: "error", ShouldRestart: false}}, + wantStatusCode: 0, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + status, err := tc.pluginServer.GetStatus(context.Background(), &pb.GetStatusRequest{}) + + if (err != nil) != tc.wantRPCError { + t.Errorf("got error from GetStatus: %v, wanted error: %v", err, tc.wantRPCError) + } + if !tc.wantRPCError && (status.Code != tc.wantStatusCode) { + t.Errorf("Got status code %d from GetStatus(), wanted %d", status.Code, tc.wantStatusCode) + } + }) + } +} func TestWriteCustomConfigToFile(t *testing.T) { yamlConfig := fmt.Sprintf(`logging: receivers: @@ -149,3 +240,24 @@ func TestWriteCustomConfigToFile_receivedEmptyCustomConfig(t *testing.T) { }) } } + +// TestHelperProcess isn't a real test. It's used as a helper process to mock +// command executions. +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + // Skip this test if it's not invoked explicitly as a helper + // process. return allows the next tests to continue running. + return + } + switch { + case os.Getenv("GO_HELPER_FAILURE") == "1": + os.Exit(1) + case os.Getenv("GO_HELPER_KILL_BY_SIGNALS") == "1": + time.Sleep(1 * time.Minute) + case os.Getenv("GO_HELPER_LONG_RUNNING") == "1": + time.Sleep(2 * time.Minute) + default: + // A "successful" mock execution exits with a successful (zero) exit code. + os.Exit(0) + } +} diff --git a/cmd/ops_agent_uap_plugin/service_linux.go b/cmd/ops_agent_uap_plugin/service_linux.go index 43fcf1890f..bdf08935aa 100644 --- a/cmd/ops_agent_uap_plugin/service_linux.go +++ b/cmd/ops_agent_uap_plugin/service_linux.go @@ -52,11 +52,6 @@ var ( AgentSystemdServiceNames = []string{"google-cloud-ops-agent.service", "stackdriver-agent.service", "google-fluentd.service"} ) -// RunSubAgentCommandFunc defines a function type that starts a subagent. If one subagent execution exited, other sugagents are also terminated via context cancellation. This abstraction is introduced -// primarily to facilitate testing by allowing the injection of mock -// implementations. -type RunSubAgentCommandFunc func(ctx context.Context, cancel context.CancelFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) - // Start starts the plugin and initiates the plugin functionality. // Until plugin receives Start request plugin is expected to be not functioning // and just listening on the address handed off waiting for the request. @@ -67,26 +62,24 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) ps.mu.Unlock() return &pb.StartResponse{}, nil } - log.Printf("Received a Start request: %s. Starting the Ops Agent", msg) + log.Printf("Received a Start request: %s. Starting the Ops Agent", msg) pContext, cancel := context.WithCancel(context.Background()) ps.cancel = cancel ps.mu.Unlock() pluginInstallPath, err := os.Executable() if err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed, because it cannot determine the plugin install location: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed, because it cannot determine the plugin install location: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } pluginInstallPath, err = filepath.EvalSymlinks(pluginInstallPath) if err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed, because it cannot determine the plugin install location: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed, because it cannot determine the plugin install location: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } - pluginInstallDir := filepath.Dir(pluginInstallPath) + pluginInstallDir := filepath.Dir(pluginInstallPath) pluginStateDir := msg.GetConfig().GetStateDirectoryPath() if pluginStateDir == "" { pluginStateDir = DefaultPluginStateDirectory @@ -95,76 +88,32 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) // Find existing ops agent installation, and conflicting legacy agent installation. foundConflictingInstallations, err := findPreExistentAgents(pContext, ps.runCommand, AgentSystemdServiceNames) if foundConflictingInstallations || err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed, because it detected agent installations unmanaged by the VM Extension Manager: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } // Receive config from the Start request and write it to the Ops Agent config file. if err := writeCustomConfigToFile(msg, OpsAgentConfigLocationLinux); err != nil { - log.Printf("Start() failed: %s", err) - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed to write the custom Ops Agent config to file: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed to write the custom Ops Agent config to file: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } // Ops Agent config validation if err := validateOpsAgentConfig(pContext, pluginInstallDir, pluginStateDir, ps.runCommand); err != nil { - log.Printf("Start() failed: %s", err) - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed to validate the Ops Agent config: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed to validate the custom Ops Agent config: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } // Subagent config generation if err := generateSubagentConfigs(pContext, ps.runCommand, pluginInstallDir, pluginStateDir); err != nil { - log.Printf("Start() failed: %s", err) - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed to generate subagent configs: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{Message: fmt.Sprintf("Start() failed to generate subagent configs: %s", err), ShouldRestart: false}) return &pb.StartResponse{}, nil } // the subagent startups - cancelFunc := func() { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - } - go runSubagents(pContext, cancelFunc, pluginInstallDir, pluginStateDir, runSubAgentCommand, ps.runCommand) + go runSubagents(pContext, ps.cancelAndSetPluginError, pluginInstallDir, pluginStateDir, runSubAgentCommand, ps.runCommand) return &pb.StartResponse{}, nil } -// Stop is the stop hook and implements any cleanup if required. -// Stop maybe called if plugin revision is being changed. -// For e.g. if plugins want to stop some task it was performing or remove some -// state before exiting it can be done on this request. -func (ps *OpsAgentPluginServer) Stop(ctx context.Context, msg *pb.StopRequest) (*pb.StopResponse, error) { - ps.mu.Lock() - defer ps.mu.Unlock() - if ps.cancel == nil { - log.Printf("The Ops Agent plugin is stopped already, skipping the current request") - return &pb.StopResponse{}, nil - } - log.Printf("Received a Stop request: %s. Stopping the Ops Agent", msg) - ps.cancel() - ps.cancel = nil - return &pb.StopResponse{}, nil -} - -// GetStatus is the health check agent would perform to make sure plugin process -// is alive. If request fails process is considered dead and relaunched. Plugins -// can share any additional information to report it to the service. For e.g. if -// plugins detect some non-fatal errors causing it unable to offer some features -// it can reported in status which is sent back to the service by agent. -func (ps *OpsAgentPluginServer) GetStatus(ctx context.Context, msg *pb.GetStatusRequest) (*pb.Status, error) { - log.Println("Received a GetStatus request") - ps.mu.Lock() - defer ps.mu.Unlock() - if ps.cancel == nil { - log.Println("The Ops Agent plugin is not running") - return &pb.Status{Code: 1, Results: []string{"The Ops Agent Plugin is not running."}}, nil - } - log.Println("The Ops Agent plugin is running") - return &pb.Status{Code: 0, Results: []string{"The Ops Agent Plugin is running ok."}}, nil -} - // runSubagents starts up otel and fluent bit subagents in separate goroutines. // All child goroutines create a new context derived from the same parent context. // This ensures that crashes in one goroutine don't affect other goroutines. @@ -174,12 +123,11 @@ func (ps *OpsAgentPluginServer) GetStatus(ctx context.Context, msg *pb.GetStatus // // ctx: the parent context that all child goroutines share. // -// cancel: the cancel function for the parent context. By calling this function, the parent context is canceled, -// and GetStatus() returns a non-healthy status, signaling UAP to re-trigger Start(). -func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallDirectory string, pluginStateDirectory string, runSubAgentCommand RunSubAgentCommandFunc, runCommand RunCommandFunc) { +// cancelAndSetError: should be called by subagents from within go routines. It cancels the parent context, and collects the runtime errors from subagents and record them. The recorded errors are surfaced to users via GetStatus(). +func runSubagents(ctx context.Context, cancelAndSetError CancelContextAndSetPluginErrorFunc, pluginInstallDirectory string, pluginStateDirectory string, runSubAgentCommand RunSubAgentCommandFunc, runCommand RunCommandFunc) { // Register signal handler and implements its callback. - sigHandler(ctx, func(_ os.Signal) { - cancel() + sigHandler(ctx, func(s os.Signal) { + cancelAndSetError(&OpsAgentPluginError{Message: fmt.Sprintf("Received signal: %s, stopping the Ops Agent", s.String()), ShouldRestart: true}) }) var wg sync.WaitGroup @@ -191,7 +139,7 @@ func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallD "--feature-gates=receiver.prometheusreceiver.RemoveStartTimeAdjustment", ) wg.Add(1) - go runSubAgentCommand(ctx, cancel, runOtelCmd, runCommand, &wg) + go runSubAgentCommand(ctx, cancelAndSetError, runOtelCmd, runCommand, &wg) // Starting FluentBit runFluentBitCmd := exec.CommandContext(ctx, @@ -204,32 +152,11 @@ func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallD "--storage_path", path.Join(pluginStateDirectory, FluentBitStateDiectory, "buffers"), ) wg.Add(1) - go runSubAgentCommand(ctx, cancel, runFluentBitCmd, runCommand, &wg) + go runSubAgentCommand(ctx, cancelAndSetError, runFluentBitCmd, runCommand, &wg) wg.Wait() } -func runSubAgentCommand(ctx context.Context, cancel context.CancelFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) { - defer wg.Done() - if cmd == nil { - return - } - if ctx.Err() != nil { - // context has been cancelled - log.Printf("cannot execute command: %s, because the context has been cancelled", cmd.Args) - return - } - - output, err := runCommand(cmd) - if err != nil { - log.Printf("command: %s exited with errors, not restarting.\nCommand output: %s\n Command error:%s", cmd.Args, string(output), err) - } else { - log.Printf("command: %s %s exited successfully.\nCommand output: %s", cmd.Path, cmd.Args, string(output)) - } - cancel() // cancels the parent context which also stops other Ops Agent sub-binaries from running. - return -} - // sigHandler handles SIGTERM, SIGINT etc signals. The function provided in the // cancel argument handles internal framework termination and the plugin // interface notification of the "exiting" state. diff --git a/cmd/ops_agent_uap_plugin/service_linux_test.go b/cmd/ops_agent_uap_plugin/service_linux_test.go index c7eb3a373d..5bddbe8e18 100644 --- a/cmd/ops_agent_uap_plugin/service_linux_test.go +++ b/cmd/ops_agent_uap_plugin/service_linux_test.go @@ -165,10 +165,11 @@ func Test_generateSubagentConfigs(t *testing.T) { func TestStart(t *testing.T) { t.Parallel() cases := []struct { - name string - cancel context.CancelFunc - mockRunCommandFunc RunCommandFunc - wantCancelNil bool + name string + cancel context.CancelFunc + mockRunCommandFunc RunCommandFunc + wantCancelNil bool + wantOpsAgentPluginError bool }{ { name: "Happy path: plugin not already started, Start() exits successfully", @@ -190,7 +191,8 @@ func TestStart(t *testing.T) { mockRunCommandFunc: func(cmd *exec.Cmd) (string, error) { return "", fmt.Errorf("error") }, - wantCancelNil: true, + wantCancelNil: true, + wantOpsAgentPluginError: true, }, } @@ -203,106 +205,59 @@ func TestStart(t *testing.T) { if (ps.cancel == nil) != tc.wantCancelNil { t.Errorf("%v: Start() got cancel function: %v, want cancel function to be reset to nil: %v", tc.name, ps.cancel, tc.wantCancelNil) } - }) - } -} -func TestStop(t *testing.T) { - t.Parallel() - cases := []struct { - name string - cancel context.CancelFunc - }{ - { - name: "PluginAlreadyStopped", - cancel: nil, - }, - { - name: "PluginRunning", - cancel: func() {}, // Non-nil function - - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - ps := &OpsAgentPluginServer{cancel: tc.cancel} - _, err := ps.Stop(context.Background(), &pb.StopRequest{}) - if err != nil { - t.Errorf("got error from Stop(): %v, wanted nil", err) - } - - if ps.cancel != nil { - t.Error("got non-nil cancel function after calling Stop(), want nil") - } - }) - } -} - -func TestGetStatus(t *testing.T) { - t.Parallel() - cases := []struct { - name string - cancel context.CancelFunc - wantStatusCode int32 - }{ - { - name: "PluginNotRunning", - cancel: nil, - wantStatusCode: 1, - }, - { - name: "PluginRunning", - cancel: func() {}, // Non-nil function - wantStatusCode: 0, - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - ps := &OpsAgentPluginServer{cancel: tc.cancel} - status, err := ps.GetStatus(context.Background(), &pb.GetStatusRequest{}) - if err != nil { - t.Errorf("got error from GetStatus: %v, wanted nil", err) - } - gotStatusCode := status.Code - if gotStatusCode != tc.wantStatusCode { - t.Errorf("Got status code %d from GetStatus(), wanted %d", gotStatusCode, tc.wantStatusCode) + if (ps.pluginError != nil) != tc.wantOpsAgentPluginError { + t.Errorf("%v: Start() got pluginError: %v, want pluginError to be set: %v", tc.name, ps.pluginError, tc.wantOpsAgentPluginError) } - }) } } -func Test_runSubAgentCommand_CancelContextWhenCmdExitsWithErrors(t *testing.T) { +func Test_runSubAgentCommand_CancelContextAndSetPluginErrorWhenCmdExitsWithErrors(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestHelperProcess") cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", "GO_HELPER_FAILURE=1"} var wg sync.WaitGroup wg.Add(1) - runSubAgentCommand(ctx, cancel, cmd, runCommand, &wg) - if ctx.Err() == nil { + + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, runCommand, &wg) + if ctx.Err() != context.Canceled { t.Error("runSubAgentCommand() did not cancel context but should") } + if pluginServer.pluginError == nil { + t.Error("runSubAgentCommand() did not set pluginError but should") + } + if !pluginServer.pluginError.ShouldRestart { + t.Error("runSubAgentCommand() set pluginError.ShouldRestart to false, want true") + } } func Test_runSubAgentCommand_CancelContextWhenCmdExitsSuccessfully(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestHelperProcess") cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} var wg sync.WaitGroup wg.Add(1) - runSubAgentCommand(ctx, cancel, cmd, runCommand, &wg) - if ctx.Err() == nil { + + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, runCommand, &wg) + if ctx.Err() != context.Canceled { t.Error("runSubAgentCommand() did not cancel context but should") } + if pluginServer.pluginError != nil { + t.Errorf("runSubAgentCommand() set pluginError: %v, want nil", pluginServer.pluginError) + } } func Test_runSubAgentCommand_CancelContextWhenCmdTerminatedBySignals(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestHelperProcess") cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", "GO_HELPER_KILL_BY_SIGNALS=1"} var wg sync.WaitGroup @@ -316,40 +271,51 @@ func Test_runSubAgentCommand_CancelContextWhenCmdTerminatedBySignals(t *testing. err := cmd.Wait() return "", err } - runSubAgentCommand(ctx, cancel, cmd, mockRunCommandFunc, &wg) - if ctx.Err() == nil { + + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, mockRunCommandFunc, &wg) + if ctx.Err() != context.Canceled { + t.Error("runSubAgentCommand() didn't cancel the context but should") + } + if pluginServer.pluginError == nil { + t.Errorf("runSubAgentCommand() did not set pluginError but should") + } + if !pluginServer.pluginError.ShouldRestart { + t.Error("runSubAgentCommand() set pluginError.ShouldRestart to false, want true") + } +} + +func Test_runSubAgentCommand_WhenCmdExitsBecauseCtxIsCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + + cmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestHelperProcess") + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + var wg sync.WaitGroup + wg.Add(1) + + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, runCommand, &wg) + time.Sleep(3 * time.Second) + cancel() + + if ctx.Err() != context.Canceled { t.Error("runSubAgentCommand() didn't cancel the context but should") } + if pluginServer.pluginError != nil { + t.Errorf("runSubAgentCommand() set pluginError %v, want nil", pluginServer.pluginError) + } } func Test_runSubagents_TerminatesWhenSpawnedGoRoutinesReturn(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + mockCmd := exec.CommandContext(ctx, os.Args[0], "-test.run=TestHelperProcess") mockCmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", "GO_HELPER_FAILURE=1"} - mockRestartCommandFunc := func(ctx context.Context, cancel context.CancelFunc, _ *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) { + mockRunSubAgentCmd := func(ctx context.Context, cancel CancelContextAndSetPluginErrorFunc, _ *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) { runSubAgentCommand(ctx, cancel, mockCmd, runCommand, wg) } - cancel() // child go routines return immediately, because the parent context has been cancelled. - // the test times out and fails if runSubagents does not returns - runSubagents(ctx, cancel, "", "", mockRestartCommandFunc, runCommand) -} - -// TestHelperProcess isn't a real test. It's used as a helper process to mock -// command executions. -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - // Skip this test if it's not invoked explicitly as a helper - // process. return allows the next tests to continue running. - return - } - switch { - case os.Getenv("GO_HELPER_FAILURE") == "1": - os.Exit(1) - case os.Getenv("GO_HELPER_KILL_BY_SIGNALS") == "1": - time.Sleep(1 * time.Minute) - default: - // A "successful" mock execution exits with a successful (zero) exit code. - os.Exit(0) - } + runSubagents(ctx, pluginServer.cancelAndSetPluginError, "", "", mockRunSubAgentCmd, runCommand) } diff --git a/cmd/ops_agent_uap_plugin/service_windows.go b/cmd/ops_agent_uap_plugin/service_windows.go index 19c2d58565..6271cdf996 100644 --- a/cmd/ops_agent_uap_plugin/service_windows.go +++ b/cmd/ops_agent_uap_plugin/service_windows.go @@ -60,11 +60,6 @@ var ( OpsAgentConfigLocationWindows = filepath.Join("C:", "Program Files/Google/Cloud Operations/Ops Agent/config/config.yaml") ) -// RunSubAgentCommandFunc defines a function type that starts a subagent. If one subagent execution exited, other sugagents are also terminated via context cancellation. This abstraction is introduced -// primarily to facilitate testing by allowing the injection of mock -// implementations. -type RunSubAgentCommandFunc func(ctx context.Context, cancel context.CancelFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) - // Start starts the plugin and initiates the plugin functionality. // Until plugin receives Start request plugin is expected to be not functioning // and just listening on the address handed off waiting for the request. @@ -75,8 +70,8 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) ps.mu.Unlock() return &pb.StartResponse{}, nil } - log.Printf("Received a Start request: %s. Starting the Ops Agent", msg) + log.Printf("Received a Start request: %s. Starting the Ops Agent", msg) pContext, cancel := context.WithCancel(context.Background()) ps.cancel = cancel ps.mu.Unlock() @@ -84,16 +79,20 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) // Detect conflicting installations. preInstalledAgents, err := findPreExistentAgents(&windowsServiceManager{}, AgentWindowsServiceName) if len(preInstalledAgents) != 0 || err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed, because it detected agent installations unmanaged by the VM Extension Manager: %v %s", preInstalledAgents, err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } // Calculate plugin install and state dirs. pluginInstallDir, err := osext.ExecutableFolder() if err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed, because it cannot determine the plugin install location: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed, because it cannot determine the plugin install location: %s", err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } @@ -106,24 +105,30 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) // Create a windows Event logger. This is used to log generated subagent configs, and health check results. windowsEventLogger, err := createWindowsEventLogger() if err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) - log.Printf("Start() failed, because it failed to create Windows event logger: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed, because it failed to create Windows event logger: %s", err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } // Receive config from the Start request and write it to the Ops Agent config file. if err := writeCustomConfigToFile(msg, OpsAgentConfigLocationWindows); err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) windowsEventLogger.Close() - log.Printf("Start() failed, because it failed to write the custom Ops Agent config to file: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed to write the custom Ops Agent config to file: %s", err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } // Subagents config validation and generation. if err := generateSubAgentConfigs(ctx, OpsAgentConfigLocationWindows, pluginStateDir, windowsEventLogger); err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) windowsEventLogger.Close() - log.Printf("Start() failed at the subagent config validation and generation step: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed to validate the custom Ops Agent config, and generate sub-agents config: %s", err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } @@ -134,56 +139,23 @@ func (ps *OpsAgentPluginServer) Start(ctx context.Context, msg *pb.StartRequest) // Create a Windows Job object and stores its handle, to ensure that all child processes are killed when the parent process exits. _, err = createWindowsJobHandle() if err != nil { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) windowsEventLogger.Close() - log.Printf("Start() failed, because it failed to create a Windows Job object: %s", err) + ps.cancelAndSetPluginError(&OpsAgentPluginError{ + Message: fmt.Sprintf("Start() failed, because it failed to create a Windows Job object: %s", err), + ShouldRestart: false, + }) return &pb.StartResponse{}, nil } - cancelFunc := func() { - ps.Stop(ctx, &pb.StopRequest{Cleanup: false}) + cancelAndSetPluginErr := func(e *OpsAgentPluginError) { + ps.cancelAndSetPluginError(e) windowsEventLogger.Close() } - go runSubagents(pContext, cancelFunc, pluginInstallDir, pluginStateDir, runSubAgentCommand, ps.runCommand) - + go runSubagents(pContext, cancelAndSetPluginErr, pluginInstallDir, pluginStateDir, runSubAgentCommand, ps.runCommand) return &pb.StartResponse{}, nil } -// Stop is the stop hook and implements any cleanup if required. -// Stop maybe called if plugin revision is being changed. -// For e.g. if plugins want to stop some task it was performing or remove some -// state before exiting it can be done on this request. -func (ps *OpsAgentPluginServer) Stop(ctx context.Context, msg *pb.StopRequest) (*pb.StopResponse, error) { - ps.mu.Lock() - defer ps.mu.Unlock() - if ps.cancel == nil { - log.Printf("The Ops Agent plugin is stopped already, skipping the current request") - return &pb.StopResponse{}, nil - } - log.Printf("Received a Stop request: %s. Stopping the Ops Agent", msg) - ps.cancel() - ps.cancel = nil - return &pb.StopResponse{}, nil -} - -// GetStatus is the health check agent would perform to make sure plugin process -// is alive. If request fails process is considered dead and relaunched. Plugins -// can share any additional information to report it to the service. For e.g. if -// plugins detect some non-fatal errors causing it unable to offer some features -// it can reported in status which is sent back to the service by agent. -func (ps *OpsAgentPluginServer) GetStatus(ctx context.Context, msg *pb.GetStatusRequest) (*pb.Status, error) { - log.Println("Received a GetStatus request") - ps.mu.Lock() - defer ps.mu.Unlock() - if ps.cancel == nil { - log.Println("The Ops Agent plugin is not running") - return &pb.Status{Code: 1, Results: []string{"The Ops Agent Plugin is not running."}}, nil - } - log.Println("The Ops Agent plugin is running") - return &pb.Status{Code: 0, Results: []string{"The Ops Agent Plugin is running ok."}}, nil -} - // serviceManager is an interface to abstract the Windows service manager. This is used to facilitate testing. type serviceManager interface { Connect() (serviceManagerConnection, error) @@ -362,7 +334,7 @@ func createWindowsJobHandle() (windows.Handle, error) { // // cancel: the cancel function for the parent context. By calling this function, the parent context is canceled, // and GetStatus() returns a non-healthy status, signaling UAP to re-trigger Start(). -func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallDirectory string, pluginStateDirectory string, runSubAgentCommand RunSubAgentCommandFunc, runCommand RunCommandFunc) { +func runSubagents(ctx context.Context, cancelAndSetError CancelContextAndSetPluginErrorFunc, pluginInstallDirectory string, pluginStateDirectory string, runSubAgentCommand RunSubAgentCommandFunc, runCommand RunCommandFunc) { var wg sync.WaitGroup @@ -373,7 +345,7 @@ func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallD "--feature-gates=receiver.prometheusreceiver.RemoveStartTimeAdjustment", ) wg.Add(1) - go runSubAgentCommand(ctx, cancel, runOtelCmd, runCommand, &wg) + go runSubAgentCommand(ctx, cancelAndSetError, runOtelCmd, runCommand, &wg) // Starting Fluentbit runFluentBitCmd := exec.CommandContext(ctx, @@ -386,7 +358,7 @@ func runSubagents(ctx context.Context, cancel context.CancelFunc, pluginInstallD "--storage_path", path.Join(pluginStateDirectory, "run/buffers"), ) wg.Add(1) - go runSubAgentCommand(ctx, cancel, runFluentBitCmd, runCommand, &wg) + go runSubAgentCommand(ctx, cancelAndSetError, runFluentBitCmd, runCommand, &wg) wg.Wait() } @@ -402,25 +374,3 @@ func runCommand(cmd *exec.Cmd) (string, error) { } return string(out), err } - -func runSubAgentCommand(ctx context.Context, cancel context.CancelFunc, cmd *exec.Cmd, runCommand RunCommandFunc, wg *sync.WaitGroup) { - - defer wg.Done() - if cmd == nil { - return - } - if ctx.Err() != nil { - // context has been cancelled - log.Printf("cannot execute command: %s, because the context has been cancelled", cmd.Args) - return - } - - output, err := runCommand(cmd) - if err != nil { - log.Printf("command: %s exited with errors, not restarting.\nCommand output: %s\n Command error:%s", cmd.Args, string(output), err) - } else { - log.Printf("command: %s %s exited successfully.\nCommand output: %s", cmd.Path, cmd.Args, string(output)) - } - cancel() // cancels the parent context which also stops other Ops Agent sub-binaries from running. - return -} diff --git a/cmd/ops_agent_uap_plugin/service_windows_test.go b/cmd/ops_agent_uap_plugin/service_windows_test.go index 23dcdde945..23fff22593 100644 --- a/cmd/ops_agent_uap_plugin/service_windows_test.go +++ b/cmd/ops_agent_uap_plugin/service_windows_test.go @@ -267,89 +267,26 @@ func TestStart(t *testing.T) { } } -func TestStop(t *testing.T) { - cases := []struct { - name string - cancel context.CancelFunc - }{ - { - name: "PluginAlreadyStopped", - cancel: nil, - }, - { - name: "PluginRunning", - cancel: func() {}, // Non-nil function - - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - ps := &OpsAgentPluginServer{cancel: tc.cancel} - _, err := ps.Stop(context.Background(), &pb.StopRequest{}) - if err != nil { - t.Errorf("got error from Stop(): %v, wanted nil", err) - } - - if ps.cancel != nil { - t.Error("got non-nil cancel function after calling Stop(), want nil") - } - }) - } -} - -func TestGetStatus(t *testing.T) { - cases := []struct { - name string - cancel context.CancelFunc - wantStatusCode int32 - }{ - { - name: "PluginNotRunning", - cancel: nil, - wantStatusCode: 1, - }, - { - name: "PluginRunning", - cancel: func() {}, // Non-nil function - wantStatusCode: 0, - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - ps := &OpsAgentPluginServer{cancel: tc.cancel} - status, err := ps.GetStatus(context.Background(), &pb.GetStatusRequest{}) - if err != nil { - t.Errorf("got error from GetStatus: %v, wanted nil", err) - } - gotStatusCode := status.Code - if gotStatusCode != tc.wantStatusCode { - t.Errorf("Got status code %d from GetStatus(), wanted %d", gotStatusCode, tc.wantStatusCode) - } - - }) - } -} - func runCommandSuccessfully(_ *exec.Cmd) (string, error) { return "success", nil } func Test_runSubAgentCommand_CancelContextWhenCmdExitsSuccessfully(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel cmd := exec.CommandContext(ctx, "fake-command") var wg sync.WaitGroup wg.Add(1) - runSubAgentCommand(ctx, cancel, cmd, runCommandSuccessfully, &wg) - if ctx.Err() == nil { + + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, runCommandSuccessfully, &wg) + if ctx.Err() != context.Canceled { t.Error("runSubAgentCommand() did not cancel context but should") } + if pluginServer.pluginError != nil { + t.Errorf("runSubAgentCommand() set pluginError: %v, want nil", pluginServer.pluginError) + } } func runCommandAndFailed(_ *exec.Cmd) (string, error) { @@ -358,14 +295,49 @@ func runCommandAndFailed(_ *exec.Cmd) (string, error) { func Test_runSubAgentCommand_CancelContextWhenCmdExitsWithErrors(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel cmd := exec.CommandContext(ctx, "fake-command") var wg sync.WaitGroup wg.Add(1) - runSubAgentCommand(ctx, cancel, cmd, runCommandAndFailed, &wg) - if ctx.Err() == nil { + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, runCommandAndFailed, &wg) + if ctx.Err() != context.Canceled { t.Error("runSubAgentCommand() did not cancel context but should") } + if pluginServer.pluginError == nil { + t.Errorf("runSubAgentCommand() did not set pluginError but should") + } + if !pluginServer.pluginError.ShouldRestart { + t.Error("runSubAgentCommand() set pluginError.ShouldRestart to false, want true") + } +} + +func Test_runSubAgentCommand_WhenCmdExitsBecauseCtxIsCancelled(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + pluginServer := &OpsAgentPluginServer{} + pluginServer.cancel = cancel + cmd := exec.CommandContext(ctx, "fake-command") + mockRunCommand := func(cmd *exec.Cmd) (string, error) { + time.Sleep(10 * time.Second) + return runCommand(cmd) + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + time.Sleep(2 * time.Second) + cancel() + }() + runSubAgentCommand(ctx, pluginServer.cancelAndSetPluginError, cmd, mockRunCommand, &wg) + + if ctx.Err() != context.Canceled { + t.Error("runSubAgentCommand() didn't cancel the context but should") + } + if pluginServer.pluginError != nil { + t.Errorf("runSubAgentCommand() set pluginError %v, want nil", pluginServer.pluginError) + } } func Test_runCommand(t *testing.T) { @@ -389,22 +361,3 @@ func Test_runCommandFailure(t *testing.T) { t.Error("runCommand got nil error, want exec failure") } } - -// TestHelperProcess isn't a real test. It's used as a helper process to mock -// command executions. -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - // Skip this test if it's not invoked explicitly as a helper - // process. return allows the next tests to continue running. - return - } - switch { - case os.Getenv("GO_HELPER_FAILURE") == "1": - os.Exit(1) - case os.Getenv("GO_HELPER_KILL_BY_SIGNALS") == "1": - time.Sleep(1 * time.Minute) - default: - // A "successful" mock execution exits with a successful (zero) exit code. - os.Exit(0) - } -} diff --git a/confgenerator/confgenerator.go b/confgenerator/confgenerator.go index 77d77b77aa..a6a8ebfd30 100644 --- a/confgenerator/confgenerator.go +++ b/confgenerator/confgenerator.go @@ -57,6 +57,22 @@ func googleCloudExporter(userAgent string, instrumentationLabels bool, serviceRe } } +func ConvertToOtlpExporter(receiver otel.ReceiverPipeline, ctx context.Context) otel.ReceiverPipeline { + expOtlpExporter := experimentsFromContext(ctx)["otlp_exporter"] + resource, _ := platform.FromContext(ctx).GetResource() + if !expOtlpExporter { + return receiver + } + _, err := receiver.ExporterTypes["metrics"] + if !err { + return receiver + } + receiver.ExporterTypes["metrics"] = otel.OTLP + + receiver.Processors["metrics"] = append(receiver.Processors["metrics"], otel.GCPProjectID(resource.ProjectName())) + return receiver +} + func otlpExporter(userAgent string) otel.Component { return otel.Component{ Type: "otlphttp", @@ -132,7 +148,7 @@ func (uc *UnifiedConfig) GenerateOtelConfig(ctx context.Context, outDir string) otel.GMP: googleManagedPrometheusExporter(userAgent), otel.OTLP: otlpExporter(userAgent), }, - }.Generate(ctx) + }.Generate(ctx, exp_otlp_exporter) if err != nil { return "", err } diff --git a/confgenerator/config.go b/confgenerator/config.go index cacea53d30..68412782e7 100644 --- a/confgenerator/config.go +++ b/confgenerator/config.go @@ -702,7 +702,7 @@ func (m MetricsReceiverSharedJVM) ConfigurePipelines(targetSystem string, proces if err != nil { return nil, fmt.Errorf("failed to discover the location of the JMX metrics exporter: %w", err) } - + ctx := context.Background() config := map[string]interface{}{ "target_system": targetSystem, "collection_interval": m.CollectionIntervalString(), @@ -723,13 +723,13 @@ func (m MetricsReceiverSharedJVM) ConfigurePipelines(targetSystem string, proces config["password"] = secretPassword } - return []otel.ReceiverPipeline{{ + return []otel.ReceiverPipeline{ConvertToOtlpExporter(otel.ReceiverPipeline{ Receiver: otel.Component{ Type: "jmx", Config: config, }, Processors: map[string][]otel.Component{"metrics": processors}, - }}, nil + }, ctx)}, nil } type MetricsReceiverSharedCollectJVM struct { diff --git a/confgenerator/filter/internal/ast/ast.go b/confgenerator/filter/internal/ast/ast.go index 937dcc4d6c..db8943dd2d 100644 --- a/confgenerator/filter/internal/ast/ast.go +++ b/confgenerator/filter/internal/ast/ast.go @@ -498,23 +498,23 @@ func (r Restriction) OTTLExpression() (ottl.Value, error) { return nil, fmt.Errorf("unimplemented operator: %s", r.Operator) case ":": // substring match, case insensitive - expr = ottl.IsMatch(lhs, fmt.Sprintf(`(?i)%s`, regexp.QuoteMeta(r.RHS))) + expr = ottl.IsMatchRubyRegex(lhs, fmt.Sprintf(`(?i)%s`, regexp.QuoteMeta(r.RHS))) case "=~", "!~": // regex match, case sensitive - if _, err := regexp.Compile(r.RHS); err != nil { - return nil, fmt.Errorf("unsupported regex %q: %w", r.RHS, err) - } - - expr = ottl.IsMatch(lhs, r.RHS) - // TODO: Support Ruby regex syntax + // TODO: b/436898109 - Enable regex validity config checks when Ruby Regex library + // is added to the Ops Agent build. This requires "CGO_ENABLED=1". + // if _, err := regexp.Compile(r.RHS); err != nil { + // return nil, fmt.Errorf("unsupported regex %q: %w", r.RHS, err) + //} + expr = ottl.IsMatchRubyRegex(lhs, r.RHS) if r.Operator == "!~" { expr = ottl.Not(expr) } case "=", "!=": // equality, case insensitive - expr = ottl.IsMatch(lhs, fmt.Sprintf(`(?i)^%s$`, regexp.QuoteMeta(r.RHS))) + expr = ottl.IsMatchRubyRegex(lhs, fmt.Sprintf(`(?i)^%s$`, regexp.QuoteMeta(r.RHS))) if r.Operator == "!=" { expr = ottl.Not(expr) } diff --git a/confgenerator/logging_macros.go b/confgenerator/logging_macros.go index 3c645291be..d0c3a2dd41 100644 --- a/confgenerator/logging_macros.go +++ b/confgenerator/logging_macros.go @@ -62,14 +62,14 @@ func (cr loggingReceiverMacroAdapter[LRM]) Components(ctx context.Context, tag s func (cr loggingReceiverMacroAdapter[LRM]) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { receiver, processors := cr.Expand(ctx) - if r, ok := any(receiver).(OTelReceiver); ok { + if r, ok := any(receiver).(InternalOTelReceiver); ok { rps, err := r.Pipelines(ctx) if err != nil { return nil, err } for _, pipeline := range rps { for _, p := range processors { - if p, ok := p.(OTelProcessor); ok { + if p, ok := p.(InternalOTelProcessor); ok { c, err := p.Processors(ctx) if err != nil { return nil, err @@ -124,7 +124,7 @@ func (cp loggingProcessorMacroAdapter[LPM]) Components(ctx context.Context, tag func (cp loggingProcessorMacroAdapter[LPM]) Processors(ctx context.Context) ([]otel.Component, error) { var processors []otel.Component for _, lp := range cp.Expand(ctx) { - if p, ok := any(lp).(OTelProcessor); ok { + if p, ok := any(lp).(InternalOTelProcessor); ok { c, err := p.Processors(ctx) if err != nil { return nil, err diff --git a/confgenerator/logging_modify_fields.go b/confgenerator/logging_modify_fields.go index c984a35b20..941a1ca9ef 100644 --- a/confgenerator/logging_modify_fields.go +++ b/confgenerator/logging_modify_fields.go @@ -407,8 +407,7 @@ func (p LoggingProcessorModifyFields) statements(_ context.Context) (ottl.Statem case "float": statements = statements.Append(value.Set(ottl.ToFloat(value))) case "YesNoBoolean": - // TODO - return nil, fmt.Errorf("YesNoBoolean unsupported") + statements = statements.Append(value.SetToYesNoBoolean(value)) } if field.CustomConvertFunc != nil { diff --git a/confgenerator/logging_processors.go b/confgenerator/logging_processors.go index e0fa81429d..820119928f 100644 --- a/confgenerator/logging_processors.go +++ b/confgenerator/logging_processors.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "sort" + "strings" "github.com/GoogleCloudPlatform/ops-agent/confgenerator/filter" "github.com/GoogleCloudPlatform/ops-agent/confgenerator/fluentbit" @@ -155,7 +156,9 @@ func (p ParserShared) TimestampStatements() (ottl.Statements, error) { func (p ParserShared) TypesStatements() (ottl.Statements, error) { var out ottl.Statements - for field, fieldType := range p.Types { + // Sort map keys to always get the same statements order and error message. + for _, field := range GetSortedKeys(p.Types) { + fieldType := p.Types[field] m, err := filter.NewMemberLegacy(field) if err != nil { return nil, err @@ -329,7 +332,11 @@ func (p LoggingProcessorParseRegex) Processors(ctx context.Context) ([]otel.Comp cachedParsedRegex := ottl.LValue{"cache", "__parsed_regex"} statements := ottl.NewStatements( - cachedParsedRegex.SetIf(ottl.ExtractPatternsRubyRegex(fromAccessor, p.Regex), fromAccessor.IsPresent()), + // Set `OmitEmptyValues : true` to have the same behaviour as fluent-bit `parse_regex` with `Skip_Empty_Values: true`. + cachedParsedRegex.SetIf(ottl.ExtractPatternsRubyRegex(fromAccessor, p.Regex, true), ottl.And( + fromAccessor.IsPresent(), + ottl.IsMatchRubyRegex(fromAccessor, p.Regex), + )), fromAccessor.DeleteIf(cachedParsedRegex.IsPresent()), ottl.LValue{"body"}.MergeMapsIf(cachedParsedRegex, "upsert", cachedParsedRegex.IsPresent()), cachedParsedRegex.Delete(), @@ -385,6 +392,23 @@ func (p LoggingProcessorParseRegexComplex) Components(ctx context.Context, tag, return components } +func (p LoggingProcessorParseRegexComplex) Processors(ctx context.Context) ([]otel.Component, error) { + processors := []otel.Component{} + for _, parserConfig := range p.Parsers { + parseRegex := LoggingProcessorParseRegex{ + ParserShared: parserConfig.Parser, + Regex: parserConfig.Regex, + Field: p.Field, + } + parseRegexProcessors, err := parseRegex.Processors(ctx) + if err != nil { + return nil, err + } + processors = append(processors, parseRegexProcessors...) + } + return processors, nil +} + type MultilineRule = fluentbit.MultilineRule // A LoggingProcessorParseMultilineRegex applies a set of regex rules to the specified lines, storing the named capture groups as keys in the log record. @@ -433,6 +457,55 @@ func (p LoggingProcessorParseMultilineRegex) Components(ctx context.Context, tag ) } +func (p LoggingProcessorParseMultilineRegex) Processors(ctx context.Context) ([]otel.Component, error) { + var exprParts []string + for _, r := range p.Rules { + // The current "recombine" operator multiline support only supports setting a "start_state" ("is_first_entry"). + // TODO: b/459877163 - Update implementation when opentelemetry supports "state-machine" multiline parsing. + if r.StateName == "start_state" { + exprParts = append(exprParts, fmt.Sprintf("body.message matches %q", r.Regex)) + } + } + isFirstEntryExpr := strings.Join(exprParts, " or ") + + logsTransform := []otel.Component{ + { + Type: "logstransform", + Config: map[string]any{ + "operators": []map[string]any{ + { + "type": "add", + "field": "attributes.__source_identifier", + "value": `EXPR(attributes["agent.googleapis.com/log_file_path"] ?? "")`, + }, + { + "type": "recombine", + "combine_field": "body.message", + "is_first_entry": isFirstEntryExpr, + // Take the timestamp and other attributes from the first entry. + "overwrite_with": "oldest", + // Use the log file path to disambiguate if present. + "source_identifier": `attributes.__source_identifier`, + // Set time interval (same as fluent-bit "flush_timeout") to wait for secondary logs to be appended. + "force_flush_period": "1000ms", + }, + { + "type": "remove", + "field": "attributes.__source_identifier", + }, + }, + }, + }, + } + + parseRegexComplexComponents, err := p.LoggingProcessorParseRegexComplex.Processors(ctx) + if err != nil { + return nil, err + } + + return append(logsTransform, parseRegexComplexComponents...), nil +} + func init() { LoggingProcessorTypes.RegisterType(func() LoggingProcessor { return &LoggingProcessorParseRegex{} }) } diff --git a/confgenerator/logging_receivers.go b/confgenerator/logging_receivers.go index a8185da7cd..a0d8310a0d 100644 --- a/confgenerator/logging_receivers.go +++ b/confgenerator/logging_receivers.go @@ -200,18 +200,19 @@ func (r LoggingReceiverFilesMixin) Components(ctx context.Context, tag string) [ func (r LoggingReceiverFilesMixin) Pipelines(ctx context.Context) ([]otel.ReceiverPipeline, error) { operators := []map[string]any{} receiver_config := map[string]any{ - "include": r.IncludePaths, - "exclude": r.ExcludePaths, - "start_at": "beginning", - "include_file_name": false, + "include": r.IncludePaths, + "exclude": r.ExcludePaths, + "start_at": "beginning", + "include_file_name": false, + "preserve_leading_whitespaces": true, + "preserve_trailing_whitespaces": true, } if i := r.WildcardRefreshInterval; i != nil { receiver_config["poll_interval"] = i.String() } // TODO: Configure `storage` to store file checkpoints - // TODO: Configure multiline rules if len(r.MultilineRules) > 0 { - return nil, fmt.Errorf("multiline rules are not supported in otel") + return nil, fmt.Errorf("setting multiline rules in otel filelog receiver is not supported") } // TODO: Support BufferInMemory // OTel parses the log to `body` by default; put it in a `message` field to match fluent-bit's behavior. diff --git a/confgenerator/otel/modular.go b/confgenerator/otel/modular.go index c0cd79f851..212b62b35d 100644 --- a/confgenerator/otel/modular.go +++ b/confgenerator/otel/modular.go @@ -134,7 +134,7 @@ type ModularConfig struct { // processors: [filter/mypipe_1, metrics_filter/mypipe_2, resourcedetection/_global_0] // extensions: [googleclientauth] // exporters: [googlecloud] -func (c ModularConfig) Generate(ctx context.Context) (string, error) { +func (c ModularConfig) Generate(ctx context.Context, expOtlpExporter bool) (string, error) { pl := platform.FromContext(ctx) receivers := map[string]interface{}{} processors := map[string]interface{}{} @@ -251,6 +251,12 @@ func (c ModularConfig) Generate(ctx context.Context) (string, error) { if name, ok := resourceDetectionProcessorNames[rdm]; ok { processorNames = append(processorNames, name) processors[name] = resourceDetectionProcessors[rdm].Config + // b/459468648 + if expOtlpExporter { + copyProcessor := CopyHostIDToInstanceID() + processorNames = append(processorNames, copyProcessor.name("_global_0")) + processors[copyProcessor.name("_global_0")] = copyProcessor.Config + } } exporterType := receiverPipeline.ExporterTypes[pipeline.Type] if _, ok := exporterNames[exporterType]; !ok { diff --git a/confgenerator/otel/ottl/ottl.go b/confgenerator/otel/ottl/ottl.go index c0486da1ee..1d29252093 100644 --- a/confgenerator/otel/ottl/ottl.go +++ b/confgenerator/otel/ottl/ottl.go @@ -168,8 +168,8 @@ func ParseJSON(a Value) Value { return valuef(`ParseJSON(%s)`, a) } -func ExtractPatternsRubyRegex(a Value, pattern string) Value { - return valuef(`ExtractPatternsRubyRegex(%s, %q)`, a, pattern) +func ExtractPatternsRubyRegex(a Value, pattern string, omitEmptyValues bool) Value { + return valuef(`ExtractPatternsRubyRegex(%s, %q, %v)`, a, pattern, omitEmptyValues) } func ConvertCase(a Value, toCase string) Value { @@ -188,6 +188,10 @@ func IsMatch(target Value, pattern string) Value { return valuef(`IsMatch(%s, %q)`, target, pattern) } +func IsMatchRubyRegex(target Value, pattern string) Value { + return valuef(`IsMatchRubyRegex(%s, %q)`, target, pattern) +} + func Equals(a, b Value) Value { return valuef(`%s == %s`, a, b) } @@ -241,6 +245,17 @@ func (a LValue) SetToBool(b Value) Statements { return out } +func (a LValue) SetToYesNoBoolean(b Value) Statements { + cache := LValue{"cache", "__yes_no_bool"} + out := Statements{ + statementf(`set(%s, true) where (%s and %s == "Yes")`, cache, IsNotNil(b), b), + statementf(`set(%s, false) where (%s and %s != "Yes")`, cache, IsNotNil(b), b), + } + out = out.Append(a.SetIf(cache, cache.IsPresent())) + out = out.Append(cache.Delete()) + return out +} + // Delete removes a (potentially nested) key from its parent maps, if that key exists. func (a LValue) Delete() Statements { parent := a[:len(a)-1] diff --git a/confgenerator/otel/processors.go b/confgenerator/otel/processors.go index fa50aa44e8..b6cb6bc4b5 100644 --- a/confgenerator/otel/processors.go +++ b/confgenerator/otel/processors.go @@ -291,6 +291,23 @@ func MetricsRemoveServiceAttributes() Component { } } +func CopyHostIDToInstanceID() Component { + return Component{ + Type: "transform", + Config: map[string]any{ + "metric_statements": []map[string]any{ + { + "context": "resource", + "error_mode": "silent", + "statements": []string{ + `set(attributes["instance_id"], attributes["host.id"])`, + }, + }, + }, + }, + } +} + // TransformQueryContext is a type wrapper for the context of a query expression within the transoform processor type TransformQueryContext string diff --git a/confgenerator/testdata/feature/golden.csv b/confgenerator/testdata/feature/golden.csv index b0b8416221..60f65a5b2a 100644 --- a/confgenerator/testdata/feature/golden.csv +++ b/confgenerator/testdata/feature/golden.csv @@ -8,8 +8,6 @@ App,Field,Override, *apps.LoggingProcessorCouchbaseHTTPAccess,confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, *apps.LoggingProcessorCouchbaseHTTPAccess,confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, *apps.LoggingProcessorIisAccess,confgenerator.ConfigComponent.Type, -*apps.LoggingProcessorOracleDBAlert,confgenerator.ConfigComponent.Type, -*apps.LoggingProcessorOracleDBAudit,confgenerator.ConfigComponent.Type, *apps.LoggingReceiverActiveDirectoryDS,confgenerator.ConfigComponent.Type, *apps.LoggingReceiverCouchbase,confgenerator.ConfigComponent.Type, *apps.LoggingReceiverCouchbase,confgenerator.LoggingReceiverFilesMixin.BufferInMemory, @@ -23,18 +21,6 @@ App,Field,Override, *apps.LoggingReceiverMongodb,confgenerator.LoggingReceiverFilesMixin.BufferInMemory, *apps.LoggingReceiverMongodb,confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, *apps.LoggingReceiverMongodb,confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, -*apps.LoggingReceiverOracleDBAlert,apps.LoggingProcessorOracleDBAlert.confgenerator.ConfigComponent.Type, -*apps.LoggingReceiverOracleDBAlert,confgenerator.LoggingReceiverFilesMixin.BufferInMemory, -*apps.LoggingReceiverOracleDBAlert,confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, -*apps.LoggingReceiverOracleDBAlert,confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, -*apps.LoggingReceiverOracleDBAudit,apps.LoggingProcessorOracleDBAudit.confgenerator.ConfigComponent.Type, -*apps.LoggingReceiverOracleDBAudit,confgenerator.LoggingReceiverFilesMixin.BufferInMemory, -*apps.LoggingReceiverOracleDBAudit,confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, -*apps.LoggingReceiverOracleDBAudit,confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, -*apps.LoggingReceiverVaultAuditJson,apps.LoggingProcessorVaultJson.confgenerator.ConfigComponent.Type, -*apps.LoggingReceiverVaultAuditJson,confgenerator.LoggingReceiverFilesMixin.BufferInMemory, -*apps.LoggingReceiverVaultAuditJson,confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, -*apps.LoggingReceiverVaultAuditJson,confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, *apps.MetricsProcessorExcludeMetrics,confgenerator.ConfigComponent.Type, *apps.MetricsReceiverActiveDirectoryDS,confgenerator.ConfigComponent.Type, *apps.MetricsReceiverActivemq,confgenerator.ConfigComponent.Type, @@ -152,6 +138,8 @@ App,Field,Override, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroMysqlSlow],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroNginxAccess],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroNginxError],confgenerator.ConfigComponent.Type, +*confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroOracleDBAlert],confgenerator.ConfigComponent.Type, +*confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroOracleDBAudit],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroPostgresql],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroRabbitmq],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroRedis],confgenerator.ConfigComponent.Type, @@ -161,6 +149,7 @@ App,Field,Override, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroTomcatAccess],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroTomcatSystem],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroVarnish],confgenerator.ConfigComponent.Type, +*confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroVaultJson],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroWildflySystem],confgenerator.ConfigComponent.Type, *confgenerator.loggingProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroZookeeperGeneral],confgenerator.ConfigComponent.Type, *confgenerator.loggingReceiverMacroAdapter[*github.com/GoogleCloudPlatform/ops-agent/confgenerator.loggingFilesProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroApacheAccess]],ReceiverMacro, @@ -219,3 +208,15 @@ App,Field,Override, *confgenerator.loggingReceiverMacroAdapter[*github.com/GoogleCloudPlatform/ops-agent/confgenerator.loggingFilesProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroWildflySystem]],confgenerator.ConfigComponent.Type, *confgenerator.loggingReceiverMacroAdapter[*github.com/GoogleCloudPlatform/ops-agent/confgenerator.loggingFilesProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroZookeeperGeneral]],ReceiverMacro, *confgenerator.loggingReceiverMacroAdapter[*github.com/GoogleCloudPlatform/ops-agent/confgenerator.loggingFilesProcessorMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingProcessorMacroZookeeperGeneral]],confgenerator.ConfigComponent.Type, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAlert],apps.LoggingReceiverMacroOracleDBAlert.confgenerator.LoggingReceiverFilesMixin.BufferInMemory, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAlert],apps.LoggingReceiverMacroOracleDBAlert.confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAlert],apps.LoggingReceiverMacroOracleDBAlert.confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAlert],confgenerator.ConfigComponent.Type, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAudit],apps.LoggingReceiverMacroOracleDBAudit.confgenerator.LoggingReceiverFilesMixin.BufferInMemory, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAudit],apps.LoggingReceiverMacroOracleDBAudit.confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAudit],apps.LoggingReceiverMacroOracleDBAudit.confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroOracleDBAudit],confgenerator.ConfigComponent.Type, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroVaultAuditJson],apps.LoggingReceiverMacroVaultAuditJson.confgenerator.LoggingReceiverFilesMixin.BufferInMemory, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroVaultAuditJson],apps.LoggingReceiverMacroVaultAuditJson.confgenerator.LoggingReceiverFilesMixin.RecordLogFilePath, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroVaultAuditJson],apps.LoggingReceiverMacroVaultAuditJson.confgenerator.LoggingReceiverFilesMixin.WildcardRefreshInterval, +*confgenerator.loggingReceiverMacroAdapter[github.com/GoogleCloudPlatform/ops-agent/apps.LoggingReceiverMacroVaultAuditJson],confgenerator.ConfigComponent.Type, diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/error deleted file mode 100644 index c5a6bfcb3f..0000000000 --- a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/error +++ /dev/null @@ -1 +0,0 @@ -processor "simple" has invalid configuration: failed to parse omit_if condition "jsonPayload.omitted =~ \"(?!foo)bar\"": unsupported regex "(?!foo)bar": error parsing regexp: invalid or unsupported Perl syntax: `(?!` \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux/error deleted file mode 100644 index c5a6bfcb3f..0000000000 --- a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux/error +++ /dev/null @@ -1 +0,0 @@ -processor "simple" has invalid configuration: failed to parse omit_if condition "jsonPayload.omitted =~ \"(?!foo)bar\"": unsupported regex "(?!foo)bar": error parsing regexp: invalid or unsupported Perl syntax: `(?!` \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows-2012/error deleted file mode 100644 index c5a6bfcb3f..0000000000 --- a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows-2012/error +++ /dev/null @@ -1 +0,0 @@ -processor "simple" has invalid configuration: failed to parse omit_if condition "jsonPayload.omitted =~ \"(?!foo)bar\"": unsupported regex "(?!foo)bar": error parsing regexp: invalid or unsupported Perl syntax: `(?!` \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows/error deleted file mode 100644 index c5a6bfcb3f..0000000000 --- a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/windows/error +++ /dev/null @@ -1 +0,0 @@ -processor "simple" has invalid configuration: failed to parse omit_if condition "jsonPayload.omitted =~ \"(?!foo)bar\"": unsupported regex "(?!foo)bar": error parsing regexp: invalid or unsupported Perl syntax: `(?!` \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux-gpu/error index 4234c581d9..b1d3076381 100644 --- a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux-gpu/error +++ b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux-gpu/error @@ -1 +1 @@ -logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux/error index 4234c581d9..b1d3076381 100644 --- a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux/error +++ b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/linux/error @@ -1 +1 @@ -logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows-2012/error index 2b0b761c84..81aa4d2801 100644 --- a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows-2012/error +++ b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows-2012/error @@ -1 +1 @@ -logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows/error index 2b0b761c84..81aa4d2801 100644 --- a/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows/error +++ b/confgenerator/testdata/goldens/invalid-logging-otel-unsupported_processor/golden/windows/error @@ -1 +1 @@ -logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux-gpu/error index 41783f233b..8ed152dbea 100644 --- a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux-gpu/error +++ b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux-gpu/error @@ -1 +1 @@ -logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux/error index 41783f233b..8ed152dbea 100644 --- a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux/error +++ b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/linux/error @@ -1 +1 @@ -logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows-2012/error index 7c76f2b1ee..fa19c4e4fd 100644 --- a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows-2012/error +++ b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows-2012/error @@ -1 +1 @@ -logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows/error index 7c76f2b1ee..fa19c4e4fd 100644 --- a/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows/error +++ b/confgenerator/testdata/goldens/invalid-logging-processor_unsupported_type/golden/windows/error @@ -1 +1 @@ -logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, wildfly_system, zookeeper_general]. \ No newline at end of file +logging processor with type "unsupported_type" is not supported. Supported logging processor types: [apache_access, apache_error, cassandra_debug, cassandra_gc, cassandra_system, couchdb, elasticsearch_gc, elasticsearch_json, exclude_logs, flink, hadoop, hbase_system, iis_access, jetty_access, kafka, modify_fields, mysql_error, mysql_general, mysql_slow, nginx_access, nginx_error, oracledb_alert, oracledb_audit, parse_json, parse_multiline, parse_regex, postgresql_general, rabbitmq, redis, saphana, solr_system, tomcat_access, tomcat_system, varnish, vault_audit, wildfly_system, zookeeper_general]. \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux-gpu/error index 8898f9bba8..8f2a7108d0 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux-gpu/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux-gpu/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_alert: - ^ - 18 | type: oracledb_alert - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux/error index 8898f9bba8..8f2a7108d0 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/linux/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_alert: - ^ - 18 | type: oracledb_alert - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows-2012/error index 8898f9bba8..8f2a7108d0 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows-2012/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows-2012/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_alert: - ^ - 18 | type: oracledb_alert - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows/error index 8898f9bba8..8f2a7108d0 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_alert_location_missing/golden/windows/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_alert: - ^ - 18 | type: oracledb_alert - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAlert.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAlert.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux-gpu/error index 7581efda55..c66eae441f 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux-gpu/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux-gpu/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_audit: - ^ - 18 | type: oracledb_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux/error index 7581efda55..c66eae441f 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/linux/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_audit: - ^ - 18 | type: oracledb_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows-2012/error index 7581efda55..c66eae441f 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows-2012/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows-2012/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_audit: - ^ - 18 | type: oracledb_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows/error index 7581efda55..c66eae441f 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_oracledb_audit_location_missing/golden/windows/error @@ -1,8 +1 @@ -[17:19] Key: 'LoggingReceiverOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag - 15 | logging: - 16 | receivers: -> 17 | oracledb_audit: - ^ - 18 | type: oracledb_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +Key: 'LoggingReceiverMacroOracleDBAudit.include_paths' Error:Field validation for 'include_paths' failed on the 'required_without' tag,Key: 'LoggingReceiverMacroOracleDBAudit.oracle_home' Error:Field validation for 'oracle_home' failed on the 'required_without' tag \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux-gpu/error b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux-gpu/error index e252b0d5d1..7107dfae58 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux-gpu/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux-gpu/error @@ -1,8 +1 @@ -[17:16] "include_paths" is a required field - 15 | logging: - 16 | receivers: -> 17 | vault_audit: - ^ - 18 | type: vault_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +"include_paths" is a required field \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux/error b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux/error index e252b0d5d1..7107dfae58 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/linux/error @@ -1,8 +1 @@ -[17:16] "include_paths" is a required field - 15 | logging: - 16 | receivers: -> 17 | vault_audit: - ^ - 18 | type: vault_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +"include_paths" is a required field \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows-2012/error b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows-2012/error index e252b0d5d1..7107dfae58 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows-2012/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows-2012/error @@ -1,8 +1 @@ -[17:16] "include_paths" is a required field - 15 | logging: - 16 | receivers: -> 17 | vault_audit: - ^ - 18 | type: vault_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +"include_paths" is a required field \ No newline at end of file diff --git a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows/error b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows/error index e252b0d5d1..7107dfae58 100644 --- a/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows/error +++ b/confgenerator/testdata/goldens/invalid-logging-receiver_vault_missing_required_include_path/golden/windows/error @@ -1,8 +1 @@ -[17:16] "include_paths" is a required field - 15 | logging: - 16 | receivers: -> 17 | vault_audit: - ^ - 18 | type: vault_audit - 19 | service: - 20 | pipelines: \ No newline at end of file +"include_paths" is a required field \ No newline at end of file diff --git a/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/EXPERIMENTAL_FEATURES b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/EXPERIMENTAL_FEATURES new file mode 100644 index 0000000000..d934c8d47b --- /dev/null +++ b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/EXPERIMENTAL_FEATURES @@ -0,0 +1 @@ +otel_logging diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/0f15dbe303dc7122d43443c9a4c31632.lua b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/0f15dbe303dc7122d43443c9a4c31632.lua similarity index 100% rename from confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/0f15dbe303dc7122d43443c9a4c31632.lua rename to confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/0f15dbe303dc7122d43443c9a4c31632.lua diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/4d6012ff003886818fb9b9285b4af962.lua b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/4d6012ff003886818fb9b9285b4af962.lua similarity index 100% rename from confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/4d6012ff003886818fb9b9285b4af962.lua rename to confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/4d6012ff003886818fb9b9285b4af962.lua diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/5fc5f42c16c9e1ab8292e3d42f74f3be.lua b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/5fc5f42c16c9e1ab8292e3d42f74f3be.lua similarity index 100% rename from confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/5fc5f42c16c9e1ab8292e3d42f74f3be.lua rename to confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/5fc5f42c16c9e1ab8292e3d42f74f3be.lua diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/b4a0dead382dce7b4fe011d3f59fdb6d.lua b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/b4a0dead382dce7b4fe011d3f59fdb6d.lua similarity index 100% rename from confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/b4a0dead382dce7b4fe011d3f59fdb6d.lua rename to confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/b4a0dead382dce7b4fe011d3f59fdb6d.lua diff --git a/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/enabled_receivers_otlp.json b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/enabled_receivers_otlp.json new file mode 100644 index 0000000000..b936b3a79c --- /dev/null +++ b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/enabled_receivers_otlp.json @@ -0,0 +1 @@ +{"resourceMetrics":[{"resource":{"attributes":[{"key":"k","value":{"stringValue":"v"}}]},"scopeMetrics":[{"scope":{},"metrics":[{"name":"agent.googleapis.com/agent/ops_agent/enabled_receivers","gauge":{"dataPoints":[{"attributes":[{"key":"telemetry_type","value":{"stringValue":"metrics"}},{"key":"receiver_type","value":{"stringValue":"hostmetrics"}}],"asInt":"1"},{"attributes":[{"key":"telemetry_type","value":{"stringValue":"logs"}},{"key":"receiver_type","value":{"stringValue":"files"}}],"asInt":"2"}]}}]}]}]} \ No newline at end of file diff --git a/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/feature_tracking_otlp.json b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/feature_tracking_otlp.json new file mode 100644 index 0000000000..88d146765c --- /dev/null +++ b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/feature_tracking_otlp.json @@ -0,0 +1 @@ +{"resourceMetrics":[{"resource":{"attributes":[{"key":"k","value":{"stringValue":"v"}}]},"scopeMetrics":[{"scope":{},"metrics":[{"name":"agent.googleapis.com/agent/internal/ops/feature_tracking","gauge":{"dataPoints":[{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"service:pipelines"}},{"key":"key","value":{"stringValue":"default_pipeline_overridden"}},{"key":"value","value":{"stringValue":"false"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"metrics"}},{"key":"feature","value":{"stringValue":"service:pipelines"}},{"key":"key","value":{"stringValue":"default_pipeline_overridden"}},{"key":"value","value":{"stringValue":"false"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"global"}},{"key":"feature","value":{"stringValue":"default:self_log"}},{"key":"key","value":{"stringValue":"default_self_log_file_collection"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"service:otel_logging"}},{"key":"key","value":{"stringValue":"otel_logging_supported_config"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"receivers:files"}},{"key":"key","value":{"stringValue":"[0].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"receivers:files"}},{"key":"key","value":{"stringValue":"[0].include_paths.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[0].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[0].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[1].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[1].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[2].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[2].match_any.__length"}},{"key":"value","value":{"stringValue":"2"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[3].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[3].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[4].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[4].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[5].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[5].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[6].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[6].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[7].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[7].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[8].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[8].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[9].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[9].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[10].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[10].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[11].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[11].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[12].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[12].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[13].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[13].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[14].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[14].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[15].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[15].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[16].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[16].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[17].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[17].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[18].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[18].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[19].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[19].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[20].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[20].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[21].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[21].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[22].enabled"}},{"key":"value","value":{"stringValue":"true"}}],"asInt":"1"},{"attributes":[{"key":"module","value":{"stringValue":"logging"}},{"key":"feature","value":{"stringValue":"processors:exclude_logs"}},{"key":"key","value":{"stringValue":"[22].match_any.__length"}},{"key":"value","value":{"stringValue":"1"}}],"asInt":"1"}]}}]}]}]} \ No newline at end of file diff --git a/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/features.yaml b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/features.yaml new file mode 100644 index 0000000000..94f56f78c5 --- /dev/null +++ b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/features.yaml @@ -0,0 +1,208 @@ +- module: logging + feature: service:pipelines + key: default_pipeline_overridden + value: "false" +- module: metrics + feature: service:pipelines + key: default_pipeline_overridden + value: "false" +- module: global + feature: default:self_log + key: default_self_log_file_collection + value: "true" +- module: logging + feature: service:otel_logging + key: otel_logging_supported_config + value: "true" +- module: logging + feature: receivers:files + key: "[0].enabled" + value: "true" +- module: logging + feature: receivers:files + key: "[0].include_paths.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[0].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[0].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[1].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[1].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[2].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[2].match_any.__length" + value: "2" +- module: logging + feature: processors:exclude_logs + key: "[3].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[3].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[4].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[4].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[5].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[5].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[6].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[6].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[7].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[7].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[8].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[8].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[9].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[9].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[10].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[10].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[11].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[11].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[12].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[12].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[13].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[13].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[14].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[14].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[15].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[15].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[16].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[16].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[17].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[17].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[18].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[18].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[19].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[19].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[20].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[20].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[21].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[21].match_any.__length" + value: "1" +- module: logging + feature: processors:exclude_logs + key: "[22].enabled" + value: "true" +- module: logging + feature: processors:exclude_logs + key: "[22].match_any.__length" + value: "1" diff --git a/confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/fluent_bit_main.conf b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/fluent_bit_main.conf similarity index 100% rename from confgenerator/testdata/goldens/invalid-logging-otel-processor_modify_fields_ruby_regex/golden/linux-gpu/fluent_bit_main.conf rename to confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/fluent_bit_main.conf diff --git a/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/fluent_bit_parser.conf b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/fluent_bit_parser.conf new file mode 100644 index 0000000000..e6c4f531ea --- /dev/null +++ b/confgenerator/testdata/goldens/logging-otel-processor_exclude_logs/golden/linux-gpu/fluent_bit_parser.conf @@ -0,0 +1,13 @@ +[PARSER] + Format regex + Name ops-agent-fluent-bit.fluent-bit-self-log-regex-parsing + Regex (?\[[ ]*(?