From f23479d05533c1b4116f5ff1a30f4aea5831e041 Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Wed, 5 Feb 2025 16:38:18 +0200 Subject: [PATCH 01/14] [chore][pkg/stanza] remove code for stable filelog.container.removeOriginalTimeField fg (#37645) `filelog.container.removeOriginalTimeField` was moved to `stable` in `v0.118.0`. We can remove it's implementation entirely and cleanup the package. Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33389. Signed-off-by: ChrsMark --- pkg/stanza/operator/parser/container/config.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/pkg/stanza/operator/parser/container/config.go b/pkg/stanza/operator/parser/container/config.go index e2263aafb182..1eeb08f3685b 100644 --- a/pkg/stanza/operator/parser/container/config.go +++ b/pkg/stanza/operator/parser/container/config.go @@ -8,7 +8,6 @@ import ( "sync" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/featuregate" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors" @@ -19,19 +18,9 @@ import ( ) const ( - operatorType = "container" - recombineSourceIdentifier = attrs.LogFilePath - recombineIsLastEntry = "attributes.logtag == 'F'" - removeOriginalTimeFieldFeatureFlag = "filelog.container.removeOriginalTimeField" -) - -var _ = featuregate.GlobalRegistry().MustRegister( - removeOriginalTimeFieldFeatureFlag, - featuregate.StageStable, - featuregate.WithRegisterDescription("When enabled, deletes the original `time` field from the Log Attributes. Time is parsed to Timestamp field, which should be used instead."), - featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33389"), - featuregate.WithRegisterFromVersion("v0.105.0"), - featuregate.WithRegisterToVersion("v0.118.0"), + operatorType = "container" + recombineSourceIdentifier = attrs.LogFilePath + recombineIsLastEntry = "attributes.logtag == 'F'" ) func init() { From 6d870481315f3b94bec5e9169414e7d84089508c Mon Sep 17 00:00:00 2001 From: odubajDT <93584209+odubajDT@users.noreply.github.com> Date: Wed, 5 Feb 2025 15:38:26 +0100 Subject: [PATCH 02/14] [receiver/hostmetrics] remove `receiver.hostmetrics.normalizeProcessCPUUtilization` feature gate (#37641) #### Description #### Link to tracking issue Fixes #34763 Signed-off-by: odubajDT --- .chloggen/hostmetricsreceiver-feature.yaml | 27 +++++++++++++++++++ .../ucal/cpu_utilization_calculator.go | 12 --------- 2 files changed, 27 insertions(+), 12 deletions(-) create mode 100644 .chloggen/hostmetricsreceiver-feature.yaml diff --git a/.chloggen/hostmetricsreceiver-feature.yaml b/.chloggen/hostmetricsreceiver-feature.yaml new file mode 100644 index 000000000000..faf2e3ea10f0 --- /dev/null +++ b/.chloggen/hostmetricsreceiver-feature.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/hostmetrics + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Remove receiver.hostmetrics.normalizeProcessCPUUtilization feature gate" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34763] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/ucal/cpu_utilization_calculator.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/ucal/cpu_utilization_calculator.go index 946ca96dafb1..0de9e0541f2b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/ucal/cpu_utilization_calculator.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/ucal/cpu_utilization_calculator.go @@ -8,21 +8,9 @@ import ( "time" "github.com/shirou/gopsutil/v4/cpu" - "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" ) -func init() { - _ = featuregate.GlobalRegistry().MustRegister( - "receiver.hostmetrics.normalizeProcessCPUUtilization", - featuregate.StageStable, - featuregate.WithRegisterDescription("When enabled, normalizes the process.cpu.utilization metric onto the interval [0-1] by dividing the value by the number of logical processors."), - featuregate.WithRegisterFromVersion("v0.97.0"), - featuregate.WithRegisterToVersion("v0.112.0"), - featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/31368"), - ) -} - // CPUUtilization stores the utilization percents [0-1] for the different cpu states type CPUUtilization struct { User float64 From 1ff2f3fc575d1f5bde8759ac738f50c4ddf161d8 Mon Sep 17 00:00:00 2001 From: ms-hujia <48512395+ms-hujia@users.noreply.github.com> Date: Wed, 5 Feb 2025 23:14:06 +0800 Subject: [PATCH 03/14] [bearertokenauthextension] Load token lazily for gRPC AUTH to fix token refresh issue (#36749) #### Description For the gRPC client AUTH part of bearertokenauthextension, there's a token refresh issue which causes all telemetry rejected by server after a period of time. The old token is always used even if the token is refreshed in filesystem. The root cause is the token value is cached during the `PerRPCCredentials()` instead of retrieving the latest. This PR aims to fix it by loading the token lazily in `GetRequestMetadata()`. #### Link to tracking issue N/A. #### Testing After I built a new docker image with this fix and tested in my environment, the token refresh issue didn't happen again after the first token expired. #### Documentation N/A. --- ...nauthextension-fix-grpc-token-refresh.yaml | 27 ++++++++++++ .../bearertokenauth.go | 6 +-- .../bearertokenauth_test.go | 42 ++++++++++++++++--- 3 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 .chloggen/bearertokenauthextension-fix-grpc-token-refresh.yaml diff --git a/.chloggen/bearertokenauthextension-fix-grpc-token-refresh.yaml b/.chloggen/bearertokenauthextension-fix-grpc-token-refresh.yaml new file mode 100644 index 000000000000..45dfd929d294 --- /dev/null +++ b/.chloggen/bearertokenauthextension-fix-grpc-token-refresh.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: bearertokenauthextension + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Load token lazily for gRPC AUTH to fix token refresh issue + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36749] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/extension/bearertokenauthextension/bearertokenauth.go b/extension/bearertokenauthextension/bearertokenauth.go index e7a8ad3e4212..28ecbf965e08 100644 --- a/extension/bearertokenauthextension/bearertokenauth.go +++ b/extension/bearertokenauthextension/bearertokenauth.go @@ -23,12 +23,12 @@ var _ credentials.PerRPCCredentials = (*PerRPCAuth)(nil) // PerRPCAuth is a gRPC credentials.PerRPCCredentials implementation that returns an 'authorization' header. type PerRPCAuth struct { - metadata map[string]string + auth *BearerTokenAuth } // GetRequestMetadata returns the request metadata to be used with the RPC. func (c *PerRPCAuth) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { - return c.metadata, nil + return map[string]string{"authorization": c.auth.authorizationValue()}, nil } // RequireTransportSecurity always returns true for this implementation. Passing bearer tokens in plain-text connections is a bad idea. @@ -171,7 +171,7 @@ func (b *BearerTokenAuth) Shutdown(_ context.Context) error { // PerRPCCredentials returns PerRPCAuth an implementation of credentials.PerRPCCredentials that func (b *BearerTokenAuth) PerRPCCredentials() (credentials.PerRPCCredentials, error) { return &PerRPCAuth{ - metadata: map[string]string{"authorization": b.authorizationValue()}, + auth: b, }, nil } diff --git a/extension/bearertokenauthextension/bearertokenauth_test.go b/extension/bearertokenauthextension/bearertokenauth_test.go index a6257a8511bd..b454d9580ad4 100644 --- a/extension/bearertokenauthextension/bearertokenauth_test.go +++ b/extension/bearertokenauthextension/bearertokenauth_test.go @@ -18,15 +18,19 @@ import ( ) func TestPerRPCAuth(t *testing.T) { - metadata := map[string]string{ - "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", - } + cfg := createDefaultConfig().(*Config) + cfg.BearerToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." // test meta data is properly - perRPCAuth := &PerRPCAuth{metadata: metadata} + bauth := newBearerTokenAuth(cfg, nil) + assert.NotNil(t, bauth) + perRPCAuth := &PerRPCAuth{auth: bauth} md, err := perRPCAuth.GetRequestMetadata(context.Background()) assert.NoError(t, err) - assert.Equal(t, md, metadata) + expectedMetadata := map[string]string{ + "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + } + assert.Equal(t, expectedMetadata, md) // always true ok := perRPCAuth.RequireTransportSecurity() @@ -202,6 +206,34 @@ func TestBearerTokenFileContentUpdate(t *testing.T) { assert.Equal(t, authHeaderValue, fmt.Sprintf("%s %s", scheme, string(token))) } +func TestBearerTokenUpdateForGrpc(t *testing.T) { + // prepare + cfg := createDefaultConfig().(*Config) + cfg.BearerToken = "1234" + + bauth := newBearerTokenAuth(cfg, zaptest.NewLogger(t)) + assert.NotNil(t, bauth) + + perRPCAuth, err := bauth.PerRPCCredentials() + assert.NoError(t, err) + + ctx := context.Background() + assert.NoError(t, bauth.Start(ctx, componenttest.NewNopHost())) + + // initial token, OK + md, err := perRPCAuth.GetRequestMetadata(context.Background()) + assert.NoError(t, err) + assert.Equal(t, map[string]string{"authorization": "Bearer " + "1234"}, md) + + // update the token + bauth.setAuthorizationValue("5678") + md, err = perRPCAuth.GetRequestMetadata(context.Background()) + assert.NoError(t, err) + assert.Equal(t, map[string]string{"authorization": "Bearer " + "5678"}, md) + + assert.NoError(t, bauth.Shutdown(context.Background())) +} + func TestBearerServerAuthenticateWithScheme(t *testing.T) { const token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." // #nosec cfg := createDefaultConfig().(*Config) From 10a547a96296f438f649e2ab8fcea005acb7fed7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 10:14:46 -0500 Subject: [PATCH 04/14] chore(deps): update lycheeverse/lychee-action digest to f613c4a (#37692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | lycheeverse/lychee-action | action | digest | `f796c8b` -> `f613c4a` | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Configuration 📅 **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Yang Song --- .github/workflows/changelog.yml | 2 +- .github/workflows/check-links.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index f63252da003a..80ab0e520aa9 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -87,7 +87,7 @@ jobs: - name: Link Checker if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}} id: lychee - uses: lycheeverse/lychee-action@f796c8b7d468feb9b8c0a46da3fac0af6874d374 + uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 with: args: "--verbose --no-progress ./changelog_preview.md --config .github/lychee.toml" failIfEmpty: false diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml index 17aa274c1369..88ee6a871fbc 100644 --- a/.github/workflows/check-links.yaml +++ b/.github/workflows/check-links.yaml @@ -42,7 +42,7 @@ jobs: - name: Link Checker id: lychee - uses: lycheeverse/lychee-action@f796c8b7d468feb9b8c0a46da3fac0af6874d374 + uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 with: args: "--verbose --no-progress ${{needs.changedfiles.outputs.md_files}} ${{needs.changedfiles.outputs.yaml_files}} --config .github/lychee.toml" failIfEmpty: false From 3cc420cb5558f3b7176e8b356be55d22332a7271 Mon Sep 17 00:00:00 2001 From: Ankit Patel <8731662+ankitpatel96@users.noreply.github.com> Date: Wed, 5 Feb 2025 12:07:51 -0500 Subject: [PATCH 05/14] [chore] Schema Processor Revamp [Part 3] - Translation (#37403) #### Description This is a slice of changes from https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/35248 This PR details Translation which defines the complete abstraction of schema translation file. It manages Revisions to move between two versions of a schema #### Testing Unit tests --- processor/schemaprocessor/go.mod | 10 + processor/schemaprocessor/go.sum | 4 + .../internal/translation/testdata/1.6.1 | 7 + .../internal/translation/testdata/1.9.0 | 16 + .../testdata/complex_changeset.yml | 83 +++ .../internal/translation/translation.go | 333 +++++++++++ .../translation/translation_helpers_test.go | 370 ++++++++++++ .../internal/translation/translation_noop.go | 40 ++ .../translation/translation_race_test.go | 108 ++++ .../internal/translation/translation_test.go | 555 ++++++++++++++++++ .../internal/translation/version.go | 20 + 11 files changed, 1546 insertions(+) create mode 100644 processor/schemaprocessor/internal/translation/testdata/1.6.1 create mode 100644 processor/schemaprocessor/internal/translation/testdata/1.9.0 create mode 100644 processor/schemaprocessor/internal/translation/testdata/complex_changeset.yml create mode 100644 processor/schemaprocessor/internal/translation/translation.go create mode 100644 processor/schemaprocessor/internal/translation/translation_helpers_test.go create mode 100644 processor/schemaprocessor/internal/translation/translation_noop.go create mode 100644 processor/schemaprocessor/internal/translation/translation_race_test.go create mode 100644 processor/schemaprocessor/internal/translation/translation_test.go diff --git a/processor/schemaprocessor/go.mod b/processor/schemaprocessor/go.mod index f99a90571b6f..9c7dfac2a207 100644 --- a/processor/schemaprocessor/go.mod +++ b/processor/schemaprocessor/go.mod @@ -4,6 +4,7 @@ go 1.22.0 require ( github.com/google/go-cmp v0.6.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/component v0.119.0 go.opentelemetry.io/collector/component/componenttest v0.119.0 @@ -21,6 +22,8 @@ require ( ) require ( + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect @@ -39,6 +42,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.119.0 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.11.1 // indirect @@ -77,3 +81,9 @@ retract ( v0.76.1 v0.65.0 ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden diff --git a/processor/schemaprocessor/go.sum b/processor/schemaprocessor/go.sum index 00d0fadcebb9..b4c5ae22126c 100644 --- a/processor/schemaprocessor/go.sum +++ b/processor/schemaprocessor/go.sum @@ -1,3 +1,7 @@ +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/processor/schemaprocessor/internal/translation/testdata/1.6.1 b/processor/schemaprocessor/internal/translation/testdata/1.6.1 new file mode 100644 index 000000000000..d64ad41dc893 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/testdata/1.6.1 @@ -0,0 +1,7 @@ +file_format: 1.0.0 +schema_url: https://opentelemetry.io/schemas/1.6.1 +versions: + 1.6.1: + 1.5.0: + 1.4.0: + 1.0.0: diff --git a/processor/schemaprocessor/internal/translation/testdata/1.9.0 b/processor/schemaprocessor/internal/translation/testdata/1.9.0 new file mode 100644 index 000000000000..fa2e5de525d0 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/testdata/1.9.0 @@ -0,0 +1,16 @@ +file_format: 1.0.0 +schema_url: https://opentelemetry.io/schemas/1.9.0 +versions: + 1.9.0: + 1.8.0: + spans: + changes: + - rename_attributes: + attribute_map: + db.cassandra.keyspace: db.name + db.hbase.namespace: db.name + 1.7.0: + 1.6.1: + 1.5.0: + 1.4.0: + 1.0.0: diff --git a/processor/schemaprocessor/internal/translation/testdata/complex_changeset.yml b/processor/schemaprocessor/internal/translation/testdata/complex_changeset.yml new file mode 100644 index 000000000000..c5caf6b07f59 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/testdata/complex_changeset.yml @@ -0,0 +1,83 @@ +--- +file_format: 1.0.0 +schema_url: https://example.com/1.7.0 +versions: + 1.7.0: + logs: + changes: + - rename_attributes: + attribute_map: + process.stacktrace: application.stacktrace + 1.5.0: + metrics: + changes: + - rename_metrics: + container.restart: container.restart.total + 1.4.0: # The time we cared about privacy within spans and span events + spans: + changes: + - rename_attributes: + attribute_map: + user.operation: privacy.user.operation + span_events: + changes: + - rename_attributes: + attribute_map: + net.user.ip: privacy.net.user.ip + 1.2.0: + all: + changes: + - rename_attributes: + attribute_map: + test.suite: test.name + 1.1.0: + all: + changes: + - rename_attributes: + attribute_map: + test-suite: test.suite + resources: + changes: + - rename_attributes: + attribute_map: + resource-description: resource.description + spans: + changes: + - rename_attributes: + attribute_map: + operation: user.operation + - rename_attributes: + attribute_map: + operation.failure: operation.failed.reason + apply_to_spans: + - HTTP GET + - HTTP POST + span_events: + changes: + - rename_events: + name_map: + stacktrace: stack_trace + - rename_attributes: + attribute_map: + net.peer.ip : net.user.ip + - rename_attributes: + attribute_map: + proxy-addr: proxy.addr + apply_to_events: + - proxy.dial + metrics: + changes: + - rename_metrics: + container.respawn: container.restart + - rename_attributes: + attribute_map: + container-exit-code: container.exit.status + apply_to_metrics: + - container.stop + - container.restart + logs: + changes: + - rename_attributes: + attribute_map: + go.stacktrace: process.stacktrace + 1.0.0: diff --git a/processor/schemaprocessor/internal/translation/translation.go b/processor/schemaprocessor/internal/translation/translation.go new file mode 100644 index 000000000000..71aeabeab625 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/translation.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package translation // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/schemaprocessor/internal/translation" + +import ( + "io" + "sort" + + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + encoder "go.opentelemetry.io/otel/schema/v1.0" + "go.opentelemetry.io/otel/schema/v1.0/ast" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/schemaprocessor/internal/alias" +) + +// Translation defines the complete abstraction of schema translation file +// that is defined as part of the https://opentelemetry.io/docs/reference/specification/schemas/file_format_v1.0.0/ +// Each instance of Translation is "Target Aware", meaning that given a schemaURL as an input +// it will convert from the given input, to the configured target. +// +// Note: as an optimisation, once a Translation is returned from the manager, +// +// there is no checking the incoming signals if the schema family is a match. +type Translation interface { + // SupportedVersion checks to see if the provided version is defined as part + // of this translation since it is useful to know if the translation is missing + // updates. + SupportedVersion(v *Version) bool + + // ApplyAllResourceChanges will modify the resource part of the incoming signals + // This applies to all telemetry types and should be applied there + ApplyAllResourceChanges(in alias.Resource, inSchemaURL string) error + + // ApplyScopeSpanChanges will modify all spans and span events within the incoming signals + ApplyScopeSpanChanges(in ptrace.ScopeSpans, inSchemaURL string) error + + // ApplyScopeLogChanges will modify all logs within the incoming signal + ApplyScopeLogChanges(in plog.ScopeLogs, inSchemaURL string) error + + // ApplyScopeMetricChanges will update all metrics including + // histograms, exponential histograms, summaries, sum and gauges + ApplyScopeMetricChanges(in pmetric.ScopeMetrics, inSchemaURL string) error +} + +type translator struct { + targetSchemaURL string + target *Version + indexes map[Version]int // map from version to index in revisions containing the pertinent Version + revisions []RevisionV1 + + log *zap.Logger +} + +type iterator func() (r RevisionV1, more bool) + +var ( + _ sort.Interface = (*translator)(nil) + _ Translation = (*translator)(nil) +) + +func (t *translator) loadTranslation(content *ast.Schema) error { + var errs error + t.log.Debug("Updating translation") + for v, def := range content.Versions { + version, err := NewVersion(string(v)) + if err != nil { + errs = multierr.Append(errs, err) + continue + } + _, exist := t.indexes[*version] + if exist { + continue + } + t.log.Debug("Creating new entry", + zap.Stringer("version", version), + ) + t.indexes[*version], t.revisions = len(t.revisions), append(t.revisions, + *NewRevision(version, def), + ) + } + sort.Sort(t) + + t.log.Debug("Finished update") + return errs +} + +func newTranslatorFromSchema(log *zap.Logger, targetSchemaURL string, schemaFileSchema *ast.Schema) (*translator, error) { + _, target, err := GetFamilyAndVersion(targetSchemaURL) + if err != nil { + return nil, err + } + t := &translator{ + targetSchemaURL: targetSchemaURL, + target: target, + log: log, + indexes: map[Version]int{}, + } + + if err := t.loadTranslation(schemaFileSchema); err != nil { + return nil, err + } + return t, nil +} + +func newTranslatorFromReader(log *zap.Logger, targetSchemaURL string, content io.Reader) (*translator, error) { + schemaFileSchema, err := encoder.Parse(content) + if err != nil { + return nil, err + } + var t *translator + if t, err = newTranslatorFromSchema(log, targetSchemaURL, schemaFileSchema); err != nil { + return nil, err + } + return t, nil +} + +func (t *translator) Len() int { + return len(t.revisions) +} + +func (t *translator) Less(i, j int) bool { + return t.revisions[i].Version().LessThan(t.revisions[j].Version()) +} + +func (t *translator) Swap(i, j int) { + a, b := t.revisions[i].Version(), t.revisions[j].Version() + t.indexes[*a], t.indexes[*b] = j, i + t.revisions[i], t.revisions[j] = t.revisions[j], t.revisions[i] +} + +func (t *translator) SupportedVersion(v *Version) bool { + _, ok := t.indexes[*v] + return ok +} + +func (t *translator) ApplyAllResourceChanges(resource alias.Resource, inSchemaURL string) error { + _, ver, err := GetFamilyAndVersion(inSchemaURL) + if err != nil { + return err + } + it, status := t.iterator(ver) + for rev, more := it(); more; rev, more = it() { + switch status { + case Update: + err = rev.all.Apply(resource.Resource()) + if err != nil { + return err + } + err = rev.resources.Apply(resource.Resource()) + if err != nil { + return err + } + case Revert: + err = rev.resources.Rollback(resource.Resource()) + if err != nil { + return err + } + err = rev.all.Rollback(resource.Resource()) + if err != nil { + return err + } + } + } + resource.SetSchemaUrl(t.targetSchemaURL) + return nil +} + +func (t *translator) ApplyScopeLogChanges(scopeLogs plog.ScopeLogs, inSchemaURL string) error { + _, ver, err := GetFamilyAndVersion(inSchemaURL) + if err != nil { + return err + } + it, status := t.iterator(ver) + if status == NoChange { + return nil + } + for rev, more := it(); more; rev, more = it() { + for l := 0; l < scopeLogs.LogRecords().Len(); l++ { + log := scopeLogs.LogRecords().At(l) + switch status { + case Update: + err = rev.all.Apply(log) + if err != nil { + return err + } + err = rev.logs.Apply(log) + if err != nil { + return err + } + case Revert: + err = rev.logs.Rollback(log) + if err != nil { + return err + } + err = rev.all.Rollback(log) + if err != nil { + return err + } + } + } + } + scopeLogs.SetSchemaUrl(t.targetSchemaURL) + return nil +} + +func (t *translator) ApplyScopeSpanChanges(scopeSpans ptrace.ScopeSpans, inSchemaURL string) error { + _, ver, err := GetFamilyAndVersion(inSchemaURL) + if err != nil { + return err + } + it, status := t.iterator(ver) + for rev, more := it(); more; rev, more = it() { + for i := 0; i < scopeSpans.Spans().Len(); i++ { + span := scopeSpans.Spans().At(i) + switch status { + case Update: + err = rev.all.Apply(span) + if err != nil { + return err + } + err = rev.spans.Apply(span) + if err != nil { + return err + } + for e := 0; e < span.Events().Len(); e++ { + event := span.Events().At(e) + err = rev.all.Apply(event) + if err != nil { + return err + } + } + if err = rev.spanEvents.Apply(span); err != nil { + return err + } + case Revert: + if err = rev.spanEvents.Rollback(span); err != nil { + return err + } + for e := 0; e < span.Events().Len(); e++ { + event := span.Events().At(e) + err = rev.all.Rollback(event) + if err != nil { + return err + } + } + err = rev.spans.Rollback(span) + if err != nil { + return err + } + err = rev.all.Rollback(span) + if err != nil { + return err + } + } + } + scopeSpans.SetSchemaUrl(t.targetSchemaURL) + } + return nil +} + +func (t *translator) ApplyScopeMetricChanges(scopeMetrics pmetric.ScopeMetrics, inSchemaURL string) error { + _, ver, err := GetFamilyAndVersion(inSchemaURL) + if err != nil { + return err + } + it, status := t.iterator(ver) + for rev, more := it(); more; rev, more = it() { + for i := 0; i < scopeMetrics.Metrics().Len(); i++ { + metric := scopeMetrics.Metrics().At(i) + switch status { + case Update: + if err = rev.all.Apply(metric); err != nil { + return err + } + if err = rev.metrics.Apply(metric); err != nil { + return err + } + case Revert: + if err = rev.metrics.Rollback(metric); err != nil { + return err + } + if err = rev.all.Rollback(metric); err != nil { + return err + } + } + } + } + scopeMetrics.SetSchemaUrl(t.targetSchemaURL) + return nil +} + +// iterator abstracts the logic to perform the migrations of "From Version to Version". +// The return values an iterator type and translation status that +// should be compared against Revert, Update, NoChange +// to determine what should be applied. +// In the event that the ChangeSet has not yet been created (it is possible on a cold start) +// then the iterator will wait til the update has been made +// +// Note: Once an iterator has been made, the passed context MUST cancel or run to completion +// +// in order for the read lock to be released if either Revert or Upgrade has been returned. +func (t *translator) iterator(from *Version) (iterator, int) { + status := from.Compare(t.target) + if status == NoChange || !t.SupportedVersion(from) { + return func() (r RevisionV1, more bool) { return RevisionV1{}, false }, NoChange + } + it, stop := t.indexes[*from], t.indexes[*t.target] + if status == Update { + // In the event of an update, the iterator needs to also run that version + // for the signal to be the correct version. + stop++ + + // we need to not run the starting version to start with, that's already been done! + it++ + } + return func() (RevisionV1, bool) { + // Performs a bounds check and if it has reached stop + if it < 0 || it == len(t.revisions) || it == stop { + return RevisionV1{}, false + } + + r := t.revisions[it] + // The iterator value needs to move the opposite direction of what + // status is defined as so subtracting it to progress the iterator. + it -= status + return r, true + }, status +} diff --git a/processor/schemaprocessor/internal/translation/translation_helpers_test.go b/processor/schemaprocessor/internal/translation/translation_helpers_test.go new file mode 100644 index 000000000000..3f7d1cceaa36 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/translation_helpers_test.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package translation + +import ( + "bytes" + "embed" + "fmt" + "io" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +const ( + TranslationVersion190 = "1.9.0" + TranslationVersion161 = "1.6.1" + + prefix = "testdata" +) + +//go:embed testdata +var testdataFiles embed.FS + +func LoadTranslationVersion(tb testing.TB, name string) io.Reader { + tb.Helper() + + f, err := testdataFiles.Open(path.Join(prefix, name)) + if !assert.NoError(tb, err, "Must not error when trying to open file") { + return bytes.NewBuffer(nil) + } + tb.Cleanup(func() { + assert.NoError(tb, f.Close(), "Must not have issues trying to close static file") + }) + return f +} + +func NewExampleLogs(tb testing.TB, at Version) plog.Logs { + tb.Helper() + + schemaURL := fmt.Sprint("https://example.com/", at.String()) + + logs := plog.NewLogs() + + for i := 0; i < 10; i++ { + log := logs.ResourceLogs().AppendEmpty() + log.SetSchemaUrl(schemaURL) + + sl := log.ScopeLogs().AppendEmpty() + sl.SetSchemaUrl(schemaURL) + switch at { + case Version{1, 7, 0}: + log.Resource().Attributes().PutStr("test.name", tb.Name()) + + l := sl.LogRecords().AppendEmpty() + l.Attributes().PutStr("application.stacktrace", "func main() { panic('boom') }") + l.SetSeverityText("ERROR") + l.Body().SetStr("bad program") + case Version{1, 5, 0}, Version{1, 4, 0}: + // No changes to log during this versions + fallthrough + case Version{1, 2, 0}: + log.Resource().Attributes().PutStr("test.name", tb.Name()) + + l := sl.LogRecords().AppendEmpty() + l.Attributes().PutStr("process.stacktrace", "func main() { panic('boom') }") + l.SetSeverityText("ERROR") + l.Body().SetStr("bad program") + case Version{1, 1, 0}: + log.Resource().Attributes().PutStr("test.suite", tb.Name()) + + l := sl.LogRecords().AppendEmpty() + l.Attributes().PutStr("process.stacktrace", "func main() { panic('boom') }") + l.SetSeverityText("ERROR") + l.Body().SetStr("bad program") + case Version{1, 0, 0}: + log.Resource().Attributes().PutStr("test-suite", tb.Name()) + + l := sl.LogRecords().AppendEmpty() + l.Attributes().PutStr("go.stacktrace", "func main() { panic('boom') }") + l.SetSeverityText("ERROR") + l.Body().SetStr("bad program") + default: + tb.Log("Unknown log version provided", at.String()) + tb.FailNow() + } + } + + return logs +} + +func NewExampleMetrics(tb testing.TB, at Version) pmetric.Metrics { + tb.Helper() + + schemaURL := fmt.Sprint("https://example.com/", at.String()) + + metrics := pmetric.NewMetrics() + for i := 0; i < 10; i++ { + metric := metrics.ResourceMetrics().AppendEmpty() + metric.SetSchemaUrl(schemaURL) + + sMetric := metric.ScopeMetrics().AppendEmpty() + sMetric.SetSchemaUrl(schemaURL) + + for j := 0; j < 5; j++ { + switch at { + case Version{1, 7, 0}, Version{1, 5, 0}: + metric.Resource().Attributes().PutStr("test.name", tb.Name()) + + m := sMetric.Metrics().AppendEmpty() + m.SetName("container.restart.total") + m.SetEmptyHistogram() + hist := m.Histogram().DataPoints().AppendEmpty() + hist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart.total") + m.SetEmptyExponentialHistogram() + ehist := m.ExponentialHistogram().DataPoints().AppendEmpty() + ehist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart.total") + m.SetEmptySum() + sum := m.Sum().DataPoints().AppendEmpty() + sum.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart.total") + m.SetEmptySummary() + summary := m.Summary().DataPoints().AppendEmpty() + summary.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart.total") + m.SetEmptyGauge() + gauge := m.Gauge().DataPoints().AppendEmpty() + gauge.Attributes().PutInt("container.exit.status", 124) + + case Version{1, 4, 0}, Version{1, 2, 0}: + metric.Resource().Attributes().PutStr("test.name", tb.Name()) + + m := sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyHistogram() + hist := m.Histogram().DataPoints().AppendEmpty() + hist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyExponentialHistogram() + ehist := m.ExponentialHistogram().DataPoints().AppendEmpty() + ehist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptySum() + sum := m.Sum().DataPoints().AppendEmpty() + sum.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptySummary() + summary := m.Summary().DataPoints().AppendEmpty() + summary.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyGauge() + gauge := m.Gauge().DataPoints().AppendEmpty() + gauge.Attributes().PutInt("container.exit.status", 124) + case Version{1, 1, 0}: + metric.Resource().Attributes().PutStr("test.suite", tb.Name()) + + m := sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyHistogram() + hist := m.Histogram().DataPoints().AppendEmpty() + hist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyExponentialHistogram() + ehist := m.ExponentialHistogram().DataPoints().AppendEmpty() + ehist.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptySum() + sum := m.Sum().DataPoints().AppendEmpty() + sum.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptySummary() + summary := m.Summary().DataPoints().AppendEmpty() + summary.Attributes().PutInt("container.exit.status", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.restart") + m.SetEmptyGauge() + gauge := m.Gauge().DataPoints().AppendEmpty() + gauge.Attributes().PutInt("container.exit.status", 124) + case Version{1, 0, 0}: + metric.Resource().Attributes().PutStr("test-suite", tb.Name()) + + m := sMetric.Metrics().AppendEmpty() + m.SetName("container.respawn") + m.SetEmptyHistogram() + hist := m.Histogram().DataPoints().AppendEmpty() + hist.Attributes().PutInt("container-exit-code", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.respawn") + m.SetEmptyExponentialHistogram() + ehist := m.ExponentialHistogram().DataPoints().AppendEmpty() + ehist.Attributes().PutInt("container-exit-code", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.respawn") + m.SetEmptySum() + sum := m.Sum().DataPoints().AppendEmpty() + sum.Attributes().PutInt("container-exit-code", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.respawn") + m.SetEmptySummary() + summary := m.Summary().DataPoints().AppendEmpty() + summary.Attributes().PutInt("container-exit-code", 124) + + m = sMetric.Metrics().AppendEmpty() + m.SetName("container.respawn") + m.SetEmptyGauge() + gauge := m.Gauge().DataPoints().AppendEmpty() + gauge.Attributes().PutInt("container-exit-code", 124) + default: + tb.Log("Unknown metric version provided", at.String()) + tb.FailNow() + } + } + } + return metrics +} + +func NewExampleSpans(tb testing.TB, at Version) ptrace.Traces { + tb.Helper() + + schemaURL := fmt.Sprint("https://example.com/", at.String()) + traces := ptrace.NewTraces() + + for i := 0; i < 10; i++ { + traces := traces.ResourceSpans().AppendEmpty() + traces.SetSchemaUrl(schemaURL) + + spans := traces.ScopeSpans().AppendEmpty() + spans.SetSchemaUrl(schemaURL) + switch at { + case Version{1, 7, 0}, Version{1, 5, 0}, Version{1, 4, 0}: + traces.Resource().Attributes().PutStr("test.name", tb.Name()) + + span := spans.Spans().AppendEmpty() + span.SetName("POST /user/login") + span.Attributes().PutStr("privacy.user.operation", "password encryption") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET") + span.Attributes().PutStr("privacy.user.operation", "password encryption") + span.Attributes().PutStr("operation.failed.reason", "password too short") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET1") + span.Attributes().PutStr("privacy.user.operation", "password encryption") + span.Attributes().PutStr("operation.failure", "password too short") + + event := span.Events().AppendEmpty() + event.SetName("stack_trace") + event.Attributes().PutStr("privacy.net.user.ip", "127.0.0.1") + + event = span.Events().AppendEmpty() + event.SetName("proxy.dial") + event.Attributes().PutStr("proxy.addr", "127.0.0.1:5000") + event.Attributes().PutStr("privacy.net.user.ip", "127.0.0.1") + case Version{1, 2, 0}: + traces.Resource().Attributes().PutStr("test.name", tb.Name()) + + span := spans.Spans().AppendEmpty() + span.SetName("POST /user/login") + span.Attributes().PutStr("user.operation", "password encryption") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET") + span.Attributes().PutStr("user.operation", "password encryption") + span.Attributes().PutStr("operation.failed.reason", "password too short") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET1") + span.Attributes().PutStr("user.operation", "password encryption") + span.Attributes().PutStr("operation.failure", "password too short") + + event := span.Events().AppendEmpty() + event.SetName("stack_trace") + event.Attributes().PutStr("net.user.ip", "127.0.0.1") + + event = span.Events().AppendEmpty() + event.SetName("proxy.dial") + event.Attributes().PutStr("proxy.addr", "127.0.0.1:5000") + event.Attributes().PutStr("net.user.ip", "127.0.0.1") + case Version{1, 1, 0}: + traces.Resource().Attributes().PutStr("test.suite", tb.Name()) + + span := spans.Spans().AppendEmpty() + span.SetName("POST /user/login") + span.Attributes().PutStr("user.operation", "password encryption") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET") + span.Attributes().PutStr("user.operation", "password encryption") + span.Attributes().PutStr("operation.failed.reason", "password too short") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET1") + span.Attributes().PutStr("user.operation", "password encryption") + span.Attributes().PutStr("operation.failure", "password too short") + + event := span.Events().AppendEmpty() + event.SetName("stack_trace") + event.Attributes().PutStr("net.user.ip", "127.0.0.1") + + event = span.Events().AppendEmpty() + event.SetName("proxy.dial") + event.Attributes().PutStr("proxy.addr", "127.0.0.1:5000") + event.Attributes().PutStr("net.user.ip", "127.0.0.1") + case Version{1, 0, 0}: + traces.Resource().Attributes().PutStr("test-suite", tb.Name()) + + span := spans.Spans().AppendEmpty() + span.SetName("POST /user/login") + span.Attributes().PutStr("operation", "password encryption") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET") + span.Attributes().PutStr("operation", "password encryption") + span.Attributes().PutStr("operation.failure", "password too short") + + span = spans.Spans().AppendEmpty() + span.SetName("HTTP GET1") + span.Attributes().PutStr("operation", "password encryption") + span.Attributes().PutStr("operation.failure", "password too short") + + event := span.Events().AppendEmpty() + event.SetName("stacktrace") + event.Attributes().PutStr("net.peer.ip", "127.0.0.1") + + event = span.Events().AppendEmpty() + event.SetName("proxy.dial") + event.Attributes().PutStr("proxy-addr", "127.0.0.1:5000") + event.Attributes().PutStr("net.peer.ip", "127.0.0.1") + default: + tb.Log("Unknown trace version provided", at.String()) + tb.FailNow() + } + } + + return traces +} diff --git a/processor/schemaprocessor/internal/translation/translation_noop.go b/processor/schemaprocessor/internal/translation/translation_noop.go new file mode 100644 index 000000000000..0ce105fa2d36 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/translation_noop.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package translation // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/schemaprocessor/internal/translation" + +import ( + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/schemaprocessor/internal/alias" +) + +// NopTranslation defines a translation that performs no action +// or would force the processing path of no action. +// Used to be able to reduce the need of branching and +// keeping the same logic path +type nopTranslation struct{} + +var _ Translation = (*nopTranslation)(nil) + +func (nopTranslation) SupportedVersion(_ *Version) bool { + return false +} + +func (nopTranslation) ApplyAllResourceChanges(_ alias.Resource, _ string) error { + return nil +} + +func (nopTranslation) ApplyScopeSpanChanges(_ ptrace.ScopeSpans, _ string) error { + return nil +} + +func (nopTranslation) ApplyScopeLogChanges(_ plog.ScopeLogs, _ string) error { + return nil +} + +func (nopTranslation) ApplyScopeMetricChanges(_ pmetric.ScopeMetrics, _ string) error { + return nil +} diff --git a/processor/schemaprocessor/internal/translation/translation_race_test.go b/processor/schemaprocessor/internal/translation/translation_race_test.go new file mode 100644 index 000000000000..bb971441e7ed --- /dev/null +++ b/processor/schemaprocessor/internal/translation/translation_race_test.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package translation + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/schemaprocessor/internal/fixture" +) + +func TestRaceTranslationSpanChanges(t *testing.T) { + t.Parallel() + + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error when creating translator") + + fixture.ParallelRaceCompute(t, 10, func() error { + for i := 0; i < 10; i++ { + v := &Version{1, 0, 0} + spans := NewExampleSpans(t, *v) + for i := 0; i < spans.ResourceSpans().Len(); i++ { + rSpan := spans.ResourceSpans().At(i) + if err := tn.ApplyAllResourceChanges(rSpan, rSpan.SchemaUrl()); err != nil { + return err + } + for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + span := rSpan.ScopeSpans().At(j) + if err := tn.ApplyScopeSpanChanges(span, span.SchemaUrl()); err != nil { + return err + } + } + } + } + return nil + }) +} + +func TestRaceTranslationMetricChanges(t *testing.T) { + t.Parallel() + + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error when creating translator") + + fixture.ParallelRaceCompute(t, 10, func() error { + for i := 0; i < 10; i++ { + spans := NewExampleSpans(t, Version{1, 0, 0}) + for i := 0; i < spans.ResourceSpans().Len(); i++ { + rSpan := spans.ResourceSpans().At(i) + err := tn.ApplyAllResourceChanges(rSpan, rSpan.SchemaUrl()) + if err != nil { + return err + } + for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + span := rSpan.ScopeSpans().At(j) + err := tn.ApplyScopeSpanChanges(span, span.SchemaUrl()) + if err != nil { + return err + } + } + } + } + return nil + }) +} + +func TestRaceTranslationLogChanges(t *testing.T) { + t.Parallel() + + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error when creating translator") + + fixture.ParallelRaceCompute(t, 10, func() error { + for i := 0; i < 10; i++ { + metrics := NewExampleMetrics(t, Version{1, 0, 0}) + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rMetrics := metrics.ResourceMetrics().At(i) + err := tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) + if err != nil { + return err + } + for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + metric := rMetrics.ScopeMetrics().At(j) + err := tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) + if err != nil { + return err + } + } + } + } + return nil + }) +} diff --git a/processor/schemaprocessor/internal/translation/translation_test.go b/processor/schemaprocessor/internal/translation/translation_test.go new file mode 100644 index 000000000000..5dcc9f1c5638 --- /dev/null +++ b/processor/schemaprocessor/internal/translation/translation_test.go @@ -0,0 +1,555 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package translation + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" +) + +func TestTranslationSupportedVersion(t *testing.T) { + t.Parallel() + + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + "https://opentelemetry.io/schemas/1.9.0", + LoadTranslationVersion(t, TranslationVersion190), + ) + require.NoError(t, err, "Must not error when creating translator") + + tests := []struct { + scenario string + version *Version + supported bool + }{ + { + scenario: "Known supported version", + version: &Version{1, 0, 0}, + supported: true, + }, + { + scenario: "Unsupported version", + version: &Version{1, 33, 7}, + supported: false, + }, + } + + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + assert.Equal( + t, + tc.supported, + tn.SupportedVersion(tc.version), + "Must match the expected supported version", + ) + }) + } +} + +func TestTranslationIteratorExact(t *testing.T) { + t.Parallel() + + tests := []struct { + scenario string + target string + income string + status int + versions []Version + }{ + { + scenario: "No update", + target: "https://opentelemetry.io/schemas/1.9.0", + income: "https://opentelemetry.io/schemas/1.9.0", + status: NoChange, + versions: []Version{}, + }, + { + scenario: "Update", + target: "https://opentelemetry.io/schemas/1.9.0", + income: "https://opentelemetry.io/schemas/1.6.1", + status: Update, + versions: []Version{ + {1, 7, 0}, + {1, 8, 0}, + {1, 9, 0}, + }, + }, + { + scenario: "Revert", + target: "https://opentelemetry.io/schemas/1.6.1", + income: "https://opentelemetry.io/schemas/1.9.0", + status: Revert, + versions: []Version{ + {1, 9, 0}, + {1, 8, 0}, + {1, 7, 0}, + }, + }, + { + scenario: "Unsupported / Unknown version", + target: "https://opentelemetry.io/schemas/1.6.1", + income: "https://opentelemetry.io/schemas/2.4.0", + status: NoChange, + versions: []Version{}, + }, + } + + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + tn, err := newTranslatorFromReader(zaptest.NewLogger(t), tc.target, LoadTranslationVersion(t, TranslationVersion190)) + require.NoError(t, err, "Must have no error when creating translator") + + _, inVersion, err := GetFamilyAndVersion(tc.income) + require.NoError(t, err, "Must not error when parsing inVersion from schemaURL") + + it, status := tn.iterator(inVersion) + assert.Equal(t, tc.status, status, "Must match the expected status") + iterationVersions := make([]Version, 0) + for rev, more := it(); more; rev, more = it() { + iterationVersions = append(iterationVersions, *rev.Version()) + } + assert.EqualValues(t, tc.versions, iterationVersions) + }) + } +} + +func TestTranslationIterator(t *testing.T) { + tn, err := newTranslatorFromReader(zaptest.NewLogger(t), "https://opentelemetry.io/schemas/1.9.0", LoadTranslationVersion(t, TranslationVersion190)) + require.NoError(t, err, "Must have no error when creating translator") + + ver := &Version{1, 0, 0} + it, status := tn.iterator(ver) + assert.Equal(t, Update, status, "Must provide an update status") + + for rev, more := it(); more; rev, more = it() { + ver = rev.Version() + } + assert.EqualValues(t, &Version{1, 9, 0}, ver, "Must match the expected version number") +} + +func TestTranslationSpanChanges(t *testing.T) { + t.Parallel() + + tests := []struct { + scenario string + target Version + income Version + }{ + { + scenario: "No update", + target: Version{1, 1, 0}, + income: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.1.0", + income: Version{1, 0, 0}, + target: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.2.0", + income: Version{1, 0, 0}, + target: Version{1, 2, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.4.0", + income: Version{1, 0, 0}, + target: Version{1, 4, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.5.0", + income: Version{1, 0, 0}, + target: Version{1, 5, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.7.0", + income: Version{1, 0, 0}, + target: Version{1, 7, 0}, + }, + { + scenario: "Downgrade to original version", + target: Version{1, 0, 0}, + income: Version{1, 7, 0}, + }, + } + + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + joinSchemaFamilyAndVersion("https://example.com/", &tc.target), + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) + spans := NewExampleSpans(t, tc.income) + for i := 0; i < spans.ResourceSpans().Len(); i++ { + rSpan := spans.ResourceSpans().At(i) + err := tn.ApplyAllResourceChanges(rSpan, inSchemaURL) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + span := rSpan.ScopeSpans().At(j) + err = tn.ApplyScopeSpanChanges(span, inSchemaURL) + require.NoError(t, err, "Must not error when applying scope span changes") + } + } + expect := NewExampleSpans(t, tc.target) + if diff := cmp.Diff(expect, spans, cmp.AllowUnexported(ptrace.Traces{})); diff != "" { + t.Errorf("Span mismatch (-want +got):\n%s", diff) + } + assert.EqualValues(t, expect, spans, "Must match the expected values") + }) + } +} + +func TestTranslationLogChanges(t *testing.T) { + t.Parallel() + + tests := []struct { + scenario string + target Version + income Version + }{ + { + scenario: "No update", + target: Version{1, 1, 0}, + income: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.1.0", + income: Version{1, 0, 0}, + target: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.2.0", + income: Version{1, 0, 0}, + target: Version{1, 2, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.4.0", + income: Version{1, 0, 0}, + target: Version{1, 4, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.5.0", + income: Version{1, 0, 0}, + target: Version{1, 5, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.7.0", + income: Version{1, 0, 0}, + target: Version{1, 7, 0}, + }, + { + scenario: "Downgrade to original version", + target: Version{1, 0, 0}, + income: Version{1, 7, 0}, + }, + } + + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + joinSchemaFamilyAndVersion("https://example.com/", &tc.target), + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) + logs := NewExampleLogs(t, tc.income) + for i := 0; i < logs.ResourceLogs().Len(); i++ { + rLogs := logs.ResourceLogs().At(i) + err = tn.ApplyAllResourceChanges(rLogs, inSchemaURL) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + log := rLogs.ScopeLogs().At(j) + err = tn.ApplyScopeLogChanges(log, inSchemaURL) + require.NoError(t, err, "Must not error when applying scope log changes") + } + } + expect := NewExampleLogs(t, tc.target) + assert.EqualValues(t, expect, logs, "Must match the expected values") + }) + } +} + +func TestTranslationMetricChanges(t *testing.T) { + t.Parallel() + + tests := []struct { + scenario string + target Version + income Version + }{ + { + scenario: "No update", + target: Version{1, 1, 0}, + income: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.1.0", + income: Version{1, 0, 0}, + target: Version{1, 1, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.2.0", + income: Version{1, 0, 0}, + target: Version{1, 2, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.4.0", + income: Version{1, 0, 0}, + target: Version{1, 4, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.5.0", + income: Version{1, 0, 0}, + target: Version{1, 5, 0}, + }, + { + scenario: "Upgrade 1.0.0 -> 1.7.0", + income: Version{1, 0, 0}, + target: Version{1, 7, 0}, + }, + { + scenario: "Downgrade to original version", + target: Version{1, 0, 0}, + income: Version{1, 7, 0}, + }, + } + + for _, tc := range tests { + t.Run(tc.scenario, func(t *testing.T) { + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + joinSchemaFamilyAndVersion("https://example.com/", &tc.target), + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) + metrics := NewExampleMetrics(t, tc.income) + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rMetrics := metrics.ResourceMetrics().At(i) + err = tn.ApplyAllResourceChanges(rMetrics, inSchemaURL) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + metric := rMetrics.ScopeMetrics().At(j) + err := tn.ApplyScopeMetricChanges(metric, inSchemaURL) + require.NoError(t, err, "Must not error when applying scope metric changes") + } + } + expect := NewExampleMetrics(t, tc.target) + assert.EqualValues(t, expect, metrics, "Must match the expected values") + }) + } +} + +func TestTranslationEquvialance_Logs(t *testing.T) { + t.Parallel() + + a, b := NewExampleLogs(t, Version{1, 0, 0}), NewExampleLogs(t, Version{1, 7, 0}) + + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + "https://example.com/1.4.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + for _, logs := range []plog.Logs{a, b} { + for i := 0; i < logs.ResourceLogs().Len(); i++ { + rLogs := logs.ResourceLogs().At(i) + err = tn.ApplyAllResourceChanges(rLogs, rLogs.SchemaUrl()) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + log := rLogs.ScopeLogs().At(j) + err = tn.ApplyScopeLogChanges(log, log.SchemaUrl()) + require.NoError(t, err, "Must not error when applying scope log changes") + } + } + } + expect := NewExampleLogs(t, Version{1, 4, 0}) + assert.EqualValues(t, expect, a, "Must match the expected value when upgrading versions") + assert.EqualValues(t, expect, b, "Must match the expected value when reverting versions") +} + +func TestTranslationEquvialance_Metrics(t *testing.T) { + t.Parallel() + + a, b := NewExampleMetrics(t, Version{1, 0, 0}), NewExampleMetrics(t, Version{1, 7, 0}) + + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + "https://example.com/1.4.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + for _, metrics := range []pmetric.Metrics{a, b} { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rMetrics := metrics.ResourceMetrics().At(i) + err = tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + metric := rMetrics.ScopeMetrics().At(j) + err = tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) + require.NoError(t, err, "Must not error when applying scope metric changes") + } + } + } + expect := NewExampleMetrics(t, Version{1, 4, 0}) + assert.NoError(t, pmetrictest.CompareMetrics(expect, a), "Must match the expected value when upgrading versions") + assert.NoError(t, pmetrictest.CompareMetrics(expect, b), "Must match the expected value when reverting versions") +} + +func TestTranslationEquvialance_Traces(t *testing.T) { + t.Parallel() + + a, b := NewExampleSpans(t, Version{1, 0, 0}), NewExampleSpans(t, Version{1, 7, 0}) + + tn, err := newTranslatorFromReader( + zaptest.NewLogger(t), + "https://example.com/1.4.0", + LoadTranslationVersion(t, "complex_changeset.yml"), + ) + require.NoError(t, err, "Must not error creating translator") + + for _, traces := range []ptrace.Traces{a, b} { + for i := 0; i < traces.ResourceSpans().Len(); i++ { + rSpans := traces.ResourceSpans().At(i) + err = tn.ApplyAllResourceChanges(rSpans, rSpans.SchemaUrl()) + require.NoError(t, err, "Must not error when applying resource changes") + for j := 0; j < rSpans.ScopeSpans().Len(); j++ { + spans := rSpans.ScopeSpans().At(j) + err = tn.ApplyScopeSpanChanges(spans, spans.SchemaUrl()) + require.NoError(t, err, "Must not error when applying scope span changes") + } + } + } + expect := NewExampleSpans(t, Version{1, 4, 0}) + assert.EqualValues(t, expect, a, "Must match the expected value when upgrading versions") + assert.EqualValues(t, expect, b, "Must match the expected value when reverting versions") +} + +func BenchmarkCreatingTranslation(b *testing.B) { + log := zap.NewNop() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + tn, err := newTranslatorFromReader( + log, + "https://opentelemetry.io/schemas/1.9.0", + LoadTranslationVersion(b, TranslationVersion190), + ) + assert.NoError(b, err, "Must not error when creating translator") + assert.NotNil(b, tn) + } +} + +func BenchmarkUpgradingMetrics(b *testing.B) { + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(b, "complex_changeset.yml"), + ) + require.NoError(b, err, "Must not error creating translator") + + metrics := NewExampleMetrics(b, Version{1, 0, 0}) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + m := pmetric.NewMetrics() + metrics.CopyTo(m) + b.StartTimer() + for i := 0; i < m.ResourceMetrics().Len(); i++ { + rMetrics := m.ResourceMetrics().At(i) + err = tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) + require.NoError(b, err, "Must not error when applying resource changes") + for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + metric := rMetrics.ScopeMetrics().At(j) + err = tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) + require.NoError(b, err, "Must not error when applying scope metric changes") + } + } + } +} + +func BenchmarkUpgradingTraces(b *testing.B) { + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(b, "complex_changeset.yml"), + ) + require.NoError(b, err, "Must not error creating translator") + + traces := NewExampleSpans(b, Version{1, 0, 0}) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + t := ptrace.NewTraces() + traces.CopyTo(t) + b.StartTimer() + for i := 0; i < t.ResourceSpans().Len(); i++ { + rSpans := t.ResourceSpans().At(i) + err = tn.ApplyAllResourceChanges(rSpans, rSpans.SchemaUrl()) + require.NoError(b, err, "Must not error when applying resource changes") + for j := 0; j < rSpans.ScopeSpans().Len(); j++ { + spans := rSpans.ScopeSpans().At(j) + err = tn.ApplyScopeSpanChanges(spans, spans.SchemaUrl()) + require.NoError(b, err, "Must not error when applying scope span changes") + } + } + } +} + +func BenchmarkUpgradingLogs(b *testing.B) { + tn, err := newTranslatorFromReader( + zap.NewNop(), + "https://example.com/1.7.0", + LoadTranslationVersion(b, "complex_changeset.yml"), + ) + require.NoError(b, err, "Must not error creating translator") + + logs := NewExampleLogs(b, Version{1, 0, 0}) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + l := plog.NewLogs() + logs.CopyTo(l) + b.StartTimer() + for i := 0; i < l.ResourceLogs().Len(); i++ { + rLogs := l.ResourceLogs().At(i) + err = tn.ApplyAllResourceChanges(rLogs, rLogs.SchemaUrl()) + require.NoError(b, err, "Must not error when applying resource changes") + for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + log := rLogs.ScopeLogs().At(j) + err = tn.ApplyScopeLogChanges(log, log.SchemaUrl()) + require.NoError(b, err, "Must not error when applying scope log changes") + } + } + } +} diff --git a/processor/schemaprocessor/internal/translation/version.go b/processor/schemaprocessor/internal/translation/version.go index f2cbc258a235..f9d1888140ce 100644 --- a/processor/schemaprocessor/internal/translation/version.go +++ b/processor/schemaprocessor/internal/translation/version.go @@ -19,6 +19,17 @@ var ( ErrInvalidVersion = errors.New("invalid schema version") ) +// The following status values define whether the transformer +// has to revert changes or update changes to the signal being modified +// These values match the Version.Compare out when performing: +// +// from.Compare(to) // ie: (1.0.0).Compare(1.2.1) +const ( + Update int = -1 // From is less than To + NoChange int = 0 // From equals To + Revert int = 1 // From is greater than To +) + // Version is a machine readable version of the string // schema identifier that can assist in making indexing easier type Version struct { @@ -61,6 +72,15 @@ func GetFamilyAndVersion(schemaURL string) (family string, version *Version, err return u.String(), version, err } +func joinSchemaFamilyAndVersion(family string, version *Version) string { //nolint: unparam + u, err := url.Parse(family) + if err != nil { + return "" + } + u.Path = path.Join(u.Path, version.String()) + return u.String() +} + // NewVersion converts a near semver like string (ie 1.4.0) into // a schema identifier that is comparable for a machine. // The expected string format can be matched by the following regex: From 5bc5a09b18fcf50ff8801166122eda43af547b35 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Wed, 5 Feb 2025 11:30:55 -0600 Subject: [PATCH 06/14] [connector/routing] Remove match_once parameter (#37708) This follows the deprecation plan which is itself removed in this PR. --- .chloggen/routing-deprecations-v120.yaml | 27 ++++++++ connector/routingconnector/README.md | 84 ++---------------------- connector/routingconnector/config.go | 3 - connector/routingconnector/logs.go | 5 -- connector/routingconnector/metrics.go | 5 -- connector/routingconnector/traces.go | 5 -- 6 files changed, 31 insertions(+), 98 deletions(-) create mode 100644 .chloggen/routing-deprecations-v120.yaml diff --git a/.chloggen/routing-deprecations-v120.yaml b/.chloggen/routing-deprecations-v120.yaml new file mode 100644 index 000000000000..899619c80650 --- /dev/null +++ b/.chloggen/routing-deprecations-v120.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: routingconnector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove `match_once` configuration parameter. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36824] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index 1d7e312bbb3d..225a36c33b7f 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -26,21 +26,6 @@ Routes logs, metrics or traces based on resource attributes to specific pipelines using [OpenTelemetry Transformation Language (OTTL)](../../pkg/ottl/README.md) statements as routing conditions. -## Notice - -The `match_once` field is deprecated as of `v0.116.0`. The deprecation schedule is planned as follows: - -- `v0.116.0`: The field is deprecated. If `false` is used, a warning will be logged. -- `v0.117.0`: The default value will change from `false` to `true`. If `false` is used, an error will be logged. -- `v0.118.0`: The field will be disconnected from behavior of the connector. -- `v0.120.0`: The field will be removed. - -### Migration - -It is recommended to set `match_once: true` until `v0.117.0` and then remove all usage of the field before `v0.120.0`. - -For detailed guidance on how to migrate configuration from `match_once: false` to `match_once: true`, see [Config Migration](#config-migration). - ## Configuration If you are not already familiar with connectors, you may find it helpful to first visit the [Connectors README]. @@ -54,11 +39,9 @@ The following settings are available: - `table.pipelines (required)`: the list of pipelines to use when the routing condition is met. - `default_pipelines (optional)`: contains the list of pipelines to use when a record does not meet any of specified conditions. - `error_mode (optional)`: determines how errors returned from OTTL statements are handled. Valid values are `propagate`, `ignore` and `silent`. If `ignore` or `silent` is used and a statement's condition has an error then the payload will be routed to the default pipelines. When `silent` is used the error is not logged. If not supplied, `propagate` is used. -- `match_once (optional, default: false)`: determines whether the connector matches multiple statements or not. If enabled, the payload will be routed to the first pipeline in the `table` whose routing condition is met. May only be `false` when used with `resource` context. ### Limitations -- The `match_once` setting is only supported when using the `resource` context. If any routes use `span`, `metric`, `datapoint`, `log` or `request` context, `match_once` must be set to `true`. - The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.) ### Supported [OTTL] functions @@ -77,57 +60,6 @@ The full list of settings exposed for this connector are documented [here](./con ## Examples -Route traces based on an attribute: - -```yaml -receivers: - otlp: - -exporters: - jaeger: - endpoint: localhost:14250 - jaeger/acme: - endpoint: localhost:24250 - jaeger/ecorp: - endpoint: localhost:34250 - -connectors: - routing: - default_pipelines: [traces/jaeger] - error_mode: ignore - match_once: false - table: - - statement: route() where attributes["X-Tenant"] == "acme" - pipelines: [traces/jaeger-acme] - - statement: delete_key(attributes, "X-Tenant") where IsMatch(attributes["X-Tenant"], ".*corp") - pipelines: [traces/jaeger-ecorp] - - routing/match_once: - default_pipelines: [traces/jaeger] - error_mode: ignore - match_once: true - table: - - statement: route() where attributes["X-Tenant"] == "acme" - pipelines: [traces/jaeger-acme] - - statement: route() where attributes["X-Tenant"] == ".*acme" - pipelines: [traces/jaeger-ecorp] - -service: - pipelines: - traces/in: - receivers: [otlp] - exporters: [routing] - traces/jaeger: - receivers: [routing] - exporters: [jaeger] - traces/jaeger-acme: - receivers: [routing] - exporters: [jaeger/acme] - traces/jaeger-ecorp: - receivers: [routing] - exporters: [jaeger/ecorp] -``` - Route logs based on tenant: ```yaml @@ -144,7 +76,6 @@ exporters: connectors: routing: - match_once: true default_pipelines: [logs/other] table: - context: request @@ -186,7 +117,6 @@ exporters: connectors: routing: - match_once: true default_pipelines: [logs/other] table: - context: log @@ -228,7 +158,6 @@ exporters: connectors: routing: - match_once: true table: - context: log condition: severity_number < SEVERITY_NUMBER_ERROR @@ -272,7 +201,6 @@ exporters: connectors: routing: - match_once: true table: - context: log condition: severity_number < SEVERITY_NUMBER_ERROR @@ -300,9 +228,11 @@ service: exporters: [file/ecorp] ``` -## Config Migration +## `match_once` + +The `match_once` field was deprecated as of `v0.116.0` and removed in `v0.120.0`. -The following examples demonstrate some strategies for migrating a configuration to `match_once: true`. +The following examples demonstrate some strategies for migrating a configuration from `match_once`. ### Example without `default_pipelines` @@ -336,14 +266,12 @@ result in each receiving an independent handle to the data. The same data can th ```yaml routing/env: - match_once: true table: - condition: attributes["env"] == "prod" pipelines: [ logs/prod ] - condition: attributes["env"] == "dev" pipelines: [ logs/dev ] routing/region: - match_once: true table: - condition: attributes["region"] == "east" pipelines: [ logs/east ] @@ -391,7 +319,6 @@ If the number of routes are limited, you may be able to articulate a route for e ```yaml routing: - match_once: true default_pipelines: [ logs/default ] table: - condition: attributes["env"] == "prod" and attributes["region"] == "east" @@ -420,7 +347,6 @@ in the first and second layers must be kept in sync. ```yaml # First layer separates logs that match no routes routing: - match_once: true default_pipelines: [ logs/default ] table: # all routes forward to second layer - condition: attributes["env"] == "prod" @@ -434,14 +360,12 @@ routing: # Second layer routes logs based on environment and region routing/env: - match_once: true table: - condition: attributes["env"] == "prod" pipelines: [ logs/prod ] - condition: attributes["env"] == "dev" pipelines: [ logs/dev ] routing/region: - match_once: true table: - condition: attributes["region"] == "east" pipelines: [ logs/east ] diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go index 3546f5b1ea21..058d4f57dfea 100644 --- a/connector/routingconnector/config.go +++ b/connector/routingconnector/config.go @@ -22,9 +22,6 @@ var ( // Config defines configuration for the Routing processor. type Config struct { - // MatchOnce determines whether the connector matches multiple statements. - // Unused. Deprecated in v0.116.0. Will be removed in v0.120.0. - MatchOnce *bool `mapstructure:"match_once"` // ErrorMode determines how the processor reacts to errors that occur while processing an OTTL // condition. // Valid values are `ignore` and `propagate`. diff --git a/connector/routingconnector/logs.go b/connector/routingconnector/logs.go index f38518426707..f32c5adfac64 100644 --- a/connector/routingconnector/logs.go +++ b/connector/routingconnector/logs.go @@ -34,11 +34,6 @@ func newLogsConnector( logs consumer.Logs, ) (*logsConnector, error) { cfg := config.(*Config) - - if cfg.MatchOnce != nil { - set.Logger.Error("The 'match_once' field has been deprecated and no longer has any effect. It will be removed in v0.120.0.") - } - lr, ok := logs.(connector.LogsRouterAndConsumer) if !ok { return nil, errUnexpectedConsumer diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index 744beb1f692c..9b52250f94ea 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -35,11 +35,6 @@ func newMetricsConnector( metrics consumer.Metrics, ) (*metricsConnector, error) { cfg := config.(*Config) - - if cfg.MatchOnce != nil { - set.Logger.Error("The 'match_once' field has been deprecated and no longer has any effect. It will be removed in v0.120.0.") - } - mr, ok := metrics.(connector.MetricsRouterAndConsumer) if !ok { return nil, errUnexpectedConsumer diff --git a/connector/routingconnector/traces.go b/connector/routingconnector/traces.go index 5d2cffd6ced2..1e4a1d29779a 100644 --- a/connector/routingconnector/traces.go +++ b/connector/routingconnector/traces.go @@ -34,11 +34,6 @@ func newTracesConnector( traces consumer.Traces, ) (*tracesConnector, error) { cfg := config.(*Config) - - if cfg.MatchOnce != nil { - set.Logger.Error("The 'match_once' field has been deprecated and no longer has any effect. It will be removed in v0.120.0.") - } - tr, ok := traces.(connector.TracesRouterAndConsumer) if !ok { return nil, errUnexpectedConsumer From 54cdd33856c4b69f740d9dfec5f8c8deaaff5c4d Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 5 Feb 2025 09:31:14 -0800 Subject: [PATCH 07/14] [chore] filelogreceiver remove dead code related to maxworkers (#37709) Signed-off-by: Bogdan Drutu --- pkg/stanza/adapter/benchmark_test.go | 17 +++++------------ pkg/stanza/adapter/config.go | 1 - 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/pkg/stanza/adapter/benchmark_test.go b/pkg/stanza/adapter/benchmark_test.go index b8641adf2d19..1d80b5a7b623 100644 --- a/pkg/stanza/adapter/benchmark_test.go +++ b/pkg/stanza/adapter/benchmark_test.go @@ -46,7 +46,6 @@ func TestEndToEnd(t *testing.T) { } type benchCase struct { - workerCount int maxBatchSize uint flushInterval time.Duration } @@ -55,7 +54,6 @@ func (bc benchCase) run(b *testing.B) { for i := 0; i < b.N; i++ { f := NewFactory(BenchReceiverType{}, component.StabilityLevelUndefined) cfg := f.CreateDefaultConfig().(*BenchConfig) - cfg.BaseConfig.numWorkers = bc.workerCount cfg.BaseConfig.maxBatchSize = bc.maxBatchSize cfg.BaseConfig.flushInterval = bc.flushInterval cfg.BenchOpConfig.NumEntries = numEntries @@ -89,21 +87,16 @@ func BenchmarkEndToEnd(b *testing.B) { // These values may have meaningful performance implications, so benchmarks // should cover a variety of values in order to highlight impacts. var ( - // converter - workerCounts = []int{1, 2, 4, 8, 16} - // emitter maxBatchSizes = []uint{1, 10, 100, 1000, 10_000} flushIntervals = []time.Duration{10 * time.Millisecond, 100 * time.Millisecond} ) - for _, wc := range workerCounts { - for _, bs := range maxBatchSizes { - for _, fi := range flushIntervals { - name := fmt.Sprintf("workerCount=%d,maxBatchSize=%d,flushInterval=%s", wc, bs, fi) - bc := benchCase{workerCount: wc, maxBatchSize: bs, flushInterval: fi} - b.Run(name, bc.run) - } + for _, bs := range maxBatchSizes { + for _, fi := range flushIntervals { + name := fmt.Sprintf("maxBatchSize=%d,flushInterval=%s", bs, fi) + bc := benchCase{maxBatchSize: bs, flushInterval: fi} + b.Run(name, bc.run) } } } diff --git a/pkg/stanza/adapter/config.go b/pkg/stanza/adapter/config.go index a80e6c053365..9ef9da2394d9 100644 --- a/pkg/stanza/adapter/config.go +++ b/pkg/stanza/adapter/config.go @@ -19,7 +19,6 @@ type BaseConfig struct { RetryOnFailure consumerretry.Config `mapstructure:"retry_on_failure"` // currently not configurable by users, but available for benchmarking - numWorkers int maxBatchSize uint flushInterval time.Duration } From 926b222b96effc966f05d70236c033f5db57ac0b Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Wed, 5 Feb 2025 18:59:17 +0100 Subject: [PATCH 08/14] [chore][checkapi] Add support for fuzzing (#37714) #### Description Allowlists fuzzing targets --- cmd/checkapi/main.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/checkapi/main.go b/cmd/checkapi/main.go index ac8f43dbc266..3972f0df88b6 100644 --- a/cmd/checkapi/main.go +++ b/cmd/checkapi/main.go @@ -83,6 +83,12 @@ func run(folder string, allowlistFilePath string) error { return nil } +func isTestFunction(fnName string) bool { + return strings.HasPrefix(fnName, "Test") || + strings.HasPrefix(fnName, "Benchmark") || + strings.HasPrefix(fnName, "Fuzz") +} + func handleFile(f *ast.File, result *api) { for _, d := range f.Decls { if str, isStr := d.(*ast.GenDecl); isStr { @@ -107,7 +113,7 @@ func handleFile(f *ast.File, result *api) { } exported := false receiver := "" - if fn.Recv.NumFields() == 0 && !strings.HasPrefix(fn.Name.String(), "Test") && !strings.HasPrefix(fn.Name.String(), "Benchmark") { + if fn.Recv.NumFields() == 0 && !isTestFunction(fn.Name.String()) { exported = true } if fn.Recv.NumFields() > 0 { From 399ec3463a6ca473019695c12c5278cd28c8548b Mon Sep 17 00:00:00 2001 From: Chao Weng <19381524+sincejune@users.noreply.github.com> Date: Thu, 6 Feb 2025 02:36:35 +0800 Subject: [PATCH 09/14] [chore] Fix make oteltestbedcol (#37705) #### Description An immediate fix for `make oteltestbedcol`. cc @mx-psi #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37627 #### Testing n/a #### Documentation n/a --- cmd/oteltestbedcol/builder-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/oteltestbedcol/builder-config.yaml b/cmd/oteltestbedcol/builder-config.yaml index 8fec9416ae54..ba9513ec40fa 100644 --- a/cmd/oteltestbedcol/builder-config.yaml +++ b/cmd/oteltestbedcol/builder-config.yaml @@ -17,7 +17,7 @@ extensions: exporters: - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.118.1-0.20250123125445-24f88da7b583 - - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.118.1-0.20250123125445-24f88da7b583 + - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.119.0 - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.1-0.20250123125445-24f88da7b583 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.119.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.119.0 From 32635f2b96de66679edf9e6b791c83e5a19efc78 Mon Sep 17 00:00:00 2001 From: Joseph Sirianni Date: Wed, 5 Feb 2025 13:37:03 -0500 Subject: [PATCH 10/14] [receiver/awscontainerinsight] Fix TLS insecure skip verify parameter in readme (#37696) #### Description The Kafka receiver's readme is incorrect. TLS `insecure` should be `insecure_skip_verify`. See the following source code - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/internal/kafka/authentication.go#L24 - https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/configtls.go#L100 --- receiver/kafkareceiver/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/receiver/kafkareceiver/README.md b/receiver/kafkareceiver/README.md index 49f3c4c0ce31..98549bcabe5b 100644 --- a/receiver/kafkareceiver/README.md +++ b/receiver/kafkareceiver/README.md @@ -66,8 +66,8 @@ The following settings can be optionally configured: only be used if `insecure` is set to false. - `key_file`: path to the TLS key to use for TLS required connections. Should only be used if `insecure` is set to false. - - `insecure` (default = false): Disable verifying the server's certificate - chain and host name (`InsecureSkipVerify` in the tls config) + - `insecure_skip_verify` (default = false): Disable verifying the server's certificate + chain and host name (`InsecureSkipVerify` in the [tls config](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/configtls.go#L100)) - `server_name_override`: ServerName indicates the name of the server requested by the client in order to support virtual hosting. - `kerberos` From f3420122a4f4cd76e295e3f912d6f1cc69087d3d Mon Sep 17 00:00:00 2001 From: Samiur Arif Date: Wed, 5 Feb 2025 10:38:35 -0800 Subject: [PATCH 11/14] =?UTF-8?q?added=20new=20metrics=20for=20vcenter=20r?= =?UTF-8?q?eceiver=20VM=20performance=20related=20met=E2=80=A6=20(#37489)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #### Description This PR adds the following VM performance metrics for vcenter. 1. `vcenter.vm.cpu.time` 2. `vcenter.vm.network.multicast.packet.rate` 3. `vcenter.vm.network.broadcast.packet.rate` More information on these metrics can be found [here](https://docs.vmware.com/en/vRealize-Operations/8.10/com.vmware.vcom.metrics.doc/GUID-41603CD6-453B-4E26-A237-34E733BAB00C.html). and also succinctly described [here](https://www.servicenow.com/docs/bundle/xanadu-it-operations-management/page/product/agent-client-collector/reference/vsphere-metrics.html) #37488 #### Testing The metrics were scraped from a test vCenter environment, and golden test files were updated accordingly to reflect the addition of the metric. #### Documentation Documentation was updated according to the metadata.yaml evidence1 Screenshot 2025-01-29 at 10 48 33 AM Screenshot 2025-01-29 at 10 53 54 AM Screenshot 2025-01-29 at 10 54 11 AM Signed-off-by: Samiur Arif --- ...er-virtual-machine-preformace-metrics.yaml | 27 + receiver/vcenterreceiver/documentation.md | 51 ++ .../internal/metadata/generated_config.go | 148 ++--- .../metadata/generated_config_test.go | 278 ++++----- .../internal/metadata/generated_metrics.go | 508 +++++++++++----- .../metadata/generated_metrics_test.go | 63 ++ .../internal/metadata/testdata/config.yaml | 12 + .../responses/vm-performance-counters.xml | 358 +++++++++++ receiver/vcenterreceiver/metadata.yaml | 31 + receiver/vcenterreceiver/metrics.go | 31 + receiver/vcenterreceiver/scraper_test.go | 3 + .../metrics/expected-all-enabled.yaml | 555 ++++++++++++++++++ 12 files changed, 1712 insertions(+), 353 deletions(-) create mode 100644 .chloggen/add-more-vecenter-receiver-virtual-machine-preformace-metrics.yaml diff --git a/.chloggen/add-more-vecenter-receiver-virtual-machine-preformace-metrics.yaml b/.chloggen/add-more-vecenter-receiver-virtual-machine-preformace-metrics.yaml new file mode 100644 index 000000000000..683db5a9b1fa --- /dev/null +++ b/.chloggen/add-more-vecenter-receiver-virtual-machine-preformace-metrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: vcenterreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Adds three more vCenter virtual machine performance metrics + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37488] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/vcenterreceiver/documentation.md b/receiver/vcenterreceiver/documentation.md index 5657f5ed3531..03a7320ff031 100644 --- a/receiver/vcenterreceiver/documentation.md +++ b/receiver/vcenterreceiver/documentation.md @@ -820,6 +820,23 @@ Total memory capacity of the host system. | ---- | ----------- | ---------- | ----------------------- | --------- | | MiBy | Sum | Double | Cumulative | false | +### vcenter.vm.cpu.time + +CPU time spent in idle, ready or wait state. + +As measured over the most recent 20s interval. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| % | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cpu_state | CPU time spent in idle, ready or idle state. | Str: ``idle``, ``ready``, ``wait`` | +| object | The object on the virtual machine or host that is being reported on. | Any Str | + ### vcenter.vm.memory.granted The amount of memory that is granted to a VM. @@ -828,6 +845,40 @@ The amount of memory that is granted to a VM. | ---- | ----------- | ---------- | ----------------------- | --------- | | MiBy | Sum | Int | Cumulative | false | +### vcenter.vm.network.broadcast.packet.rate + +The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + +As measured over the most recent 20s interval. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {packets/s} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | The direction of network throughput. | Str: ``transmitted``, ``received`` | +| object | The object on the virtual machine or host that is being reported on. | Any Str | + +### vcenter.vm.network.multicast.packet.rate + +The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + +As measured over the most recent 20s interval. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {packets/s} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | The direction of network throughput. | Str: ``transmitted``, ``received`` | +| object | The object on the virtual machine or host that is being reported on. | Any Str | + ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config.go b/receiver/vcenterreceiver/internal/metadata/generated_config.go index dd875d38847a..6e3c44034bc3 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config.go @@ -28,74 +28,77 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for vcenter metrics. type MetricsConfig struct { - VcenterClusterCPUEffective MetricConfig `mapstructure:"vcenter.cluster.cpu.effective"` - VcenterClusterCPULimit MetricConfig `mapstructure:"vcenter.cluster.cpu.limit"` - VcenterClusterHostCount MetricConfig `mapstructure:"vcenter.cluster.host.count"` - VcenterClusterMemoryEffective MetricConfig `mapstructure:"vcenter.cluster.memory.effective"` - VcenterClusterMemoryLimit MetricConfig `mapstructure:"vcenter.cluster.memory.limit"` - VcenterClusterVMCount MetricConfig `mapstructure:"vcenter.cluster.vm.count"` - VcenterClusterVMTemplateCount MetricConfig `mapstructure:"vcenter.cluster.vm_template.count"` - VcenterClusterVsanCongestions MetricConfig `mapstructure:"vcenter.cluster.vsan.congestions"` - VcenterClusterVsanLatencyAvg MetricConfig `mapstructure:"vcenter.cluster.vsan.latency.avg"` - VcenterClusterVsanOperations MetricConfig `mapstructure:"vcenter.cluster.vsan.operations"` - VcenterClusterVsanThroughput MetricConfig `mapstructure:"vcenter.cluster.vsan.throughput"` - VcenterDatacenterClusterCount MetricConfig `mapstructure:"vcenter.datacenter.cluster.count"` - VcenterDatacenterCPULimit MetricConfig `mapstructure:"vcenter.datacenter.cpu.limit"` - VcenterDatacenterDatastoreCount MetricConfig `mapstructure:"vcenter.datacenter.datastore.count"` - VcenterDatacenterDiskSpace MetricConfig `mapstructure:"vcenter.datacenter.disk.space"` - VcenterDatacenterHostCount MetricConfig `mapstructure:"vcenter.datacenter.host.count"` - VcenterDatacenterMemoryLimit MetricConfig `mapstructure:"vcenter.datacenter.memory.limit"` - VcenterDatacenterVMCount MetricConfig `mapstructure:"vcenter.datacenter.vm.count"` - VcenterDatastoreDiskUsage MetricConfig `mapstructure:"vcenter.datastore.disk.usage"` - VcenterDatastoreDiskUtilization MetricConfig `mapstructure:"vcenter.datastore.disk.utilization"` - VcenterHostCPUCapacity MetricConfig `mapstructure:"vcenter.host.cpu.capacity"` - VcenterHostCPUReserved MetricConfig `mapstructure:"vcenter.host.cpu.reserved"` - VcenterHostCPUUsage MetricConfig `mapstructure:"vcenter.host.cpu.usage"` - VcenterHostCPUUtilization MetricConfig `mapstructure:"vcenter.host.cpu.utilization"` - VcenterHostDiskLatencyAvg MetricConfig `mapstructure:"vcenter.host.disk.latency.avg"` - VcenterHostDiskLatencyMax MetricConfig `mapstructure:"vcenter.host.disk.latency.max"` - VcenterHostDiskThroughput MetricConfig `mapstructure:"vcenter.host.disk.throughput"` - VcenterHostMemoryCapacity MetricConfig `mapstructure:"vcenter.host.memory.capacity"` - VcenterHostMemoryUsage MetricConfig `mapstructure:"vcenter.host.memory.usage"` - VcenterHostMemoryUtilization MetricConfig `mapstructure:"vcenter.host.memory.utilization"` - VcenterHostNetworkPacketDropRate MetricConfig `mapstructure:"vcenter.host.network.packet.drop.rate"` - VcenterHostNetworkPacketErrorRate MetricConfig `mapstructure:"vcenter.host.network.packet.error.rate"` - VcenterHostNetworkPacketRate MetricConfig `mapstructure:"vcenter.host.network.packet.rate"` - VcenterHostNetworkThroughput MetricConfig `mapstructure:"vcenter.host.network.throughput"` - VcenterHostNetworkUsage MetricConfig `mapstructure:"vcenter.host.network.usage"` - VcenterHostVsanCacheHitRate MetricConfig `mapstructure:"vcenter.host.vsan.cache.hit_rate"` - VcenterHostVsanCongestions MetricConfig `mapstructure:"vcenter.host.vsan.congestions"` - VcenterHostVsanLatencyAvg MetricConfig `mapstructure:"vcenter.host.vsan.latency.avg"` - VcenterHostVsanOperations MetricConfig `mapstructure:"vcenter.host.vsan.operations"` - VcenterHostVsanThroughput MetricConfig `mapstructure:"vcenter.host.vsan.throughput"` - VcenterResourcePoolCPUShares MetricConfig `mapstructure:"vcenter.resource_pool.cpu.shares"` - VcenterResourcePoolCPUUsage MetricConfig `mapstructure:"vcenter.resource_pool.cpu.usage"` - VcenterResourcePoolMemoryBallooned MetricConfig `mapstructure:"vcenter.resource_pool.memory.ballooned"` - VcenterResourcePoolMemoryGranted MetricConfig `mapstructure:"vcenter.resource_pool.memory.granted"` - VcenterResourcePoolMemoryShares MetricConfig `mapstructure:"vcenter.resource_pool.memory.shares"` - VcenterResourcePoolMemorySwapped MetricConfig `mapstructure:"vcenter.resource_pool.memory.swapped"` - VcenterResourcePoolMemoryUsage MetricConfig `mapstructure:"vcenter.resource_pool.memory.usage"` - VcenterVMCPUReadiness MetricConfig `mapstructure:"vcenter.vm.cpu.readiness"` - VcenterVMCPUUsage MetricConfig `mapstructure:"vcenter.vm.cpu.usage"` - VcenterVMCPUUtilization MetricConfig `mapstructure:"vcenter.vm.cpu.utilization"` - VcenterVMDiskLatencyAvg MetricConfig `mapstructure:"vcenter.vm.disk.latency.avg"` - VcenterVMDiskLatencyMax MetricConfig `mapstructure:"vcenter.vm.disk.latency.max"` - VcenterVMDiskThroughput MetricConfig `mapstructure:"vcenter.vm.disk.throughput"` - VcenterVMDiskUsage MetricConfig `mapstructure:"vcenter.vm.disk.usage"` - VcenterVMDiskUtilization MetricConfig `mapstructure:"vcenter.vm.disk.utilization"` - VcenterVMMemoryBallooned MetricConfig `mapstructure:"vcenter.vm.memory.ballooned"` - VcenterVMMemoryGranted MetricConfig `mapstructure:"vcenter.vm.memory.granted"` - VcenterVMMemorySwapped MetricConfig `mapstructure:"vcenter.vm.memory.swapped"` - VcenterVMMemorySwappedSsd MetricConfig `mapstructure:"vcenter.vm.memory.swapped_ssd"` - VcenterVMMemoryUsage MetricConfig `mapstructure:"vcenter.vm.memory.usage"` - VcenterVMMemoryUtilization MetricConfig `mapstructure:"vcenter.vm.memory.utilization"` - VcenterVMNetworkPacketDropRate MetricConfig `mapstructure:"vcenter.vm.network.packet.drop.rate"` - VcenterVMNetworkPacketRate MetricConfig `mapstructure:"vcenter.vm.network.packet.rate"` - VcenterVMNetworkThroughput MetricConfig `mapstructure:"vcenter.vm.network.throughput"` - VcenterVMNetworkUsage MetricConfig `mapstructure:"vcenter.vm.network.usage"` - VcenterVMVsanLatencyAvg MetricConfig `mapstructure:"vcenter.vm.vsan.latency.avg"` - VcenterVMVsanOperations MetricConfig `mapstructure:"vcenter.vm.vsan.operations"` - VcenterVMVsanThroughput MetricConfig `mapstructure:"vcenter.vm.vsan.throughput"` + VcenterClusterCPUEffective MetricConfig `mapstructure:"vcenter.cluster.cpu.effective"` + VcenterClusterCPULimit MetricConfig `mapstructure:"vcenter.cluster.cpu.limit"` + VcenterClusterHostCount MetricConfig `mapstructure:"vcenter.cluster.host.count"` + VcenterClusterMemoryEffective MetricConfig `mapstructure:"vcenter.cluster.memory.effective"` + VcenterClusterMemoryLimit MetricConfig `mapstructure:"vcenter.cluster.memory.limit"` + VcenterClusterVMCount MetricConfig `mapstructure:"vcenter.cluster.vm.count"` + VcenterClusterVMTemplateCount MetricConfig `mapstructure:"vcenter.cluster.vm_template.count"` + VcenterClusterVsanCongestions MetricConfig `mapstructure:"vcenter.cluster.vsan.congestions"` + VcenterClusterVsanLatencyAvg MetricConfig `mapstructure:"vcenter.cluster.vsan.latency.avg"` + VcenterClusterVsanOperations MetricConfig `mapstructure:"vcenter.cluster.vsan.operations"` + VcenterClusterVsanThroughput MetricConfig `mapstructure:"vcenter.cluster.vsan.throughput"` + VcenterDatacenterClusterCount MetricConfig `mapstructure:"vcenter.datacenter.cluster.count"` + VcenterDatacenterCPULimit MetricConfig `mapstructure:"vcenter.datacenter.cpu.limit"` + VcenterDatacenterDatastoreCount MetricConfig `mapstructure:"vcenter.datacenter.datastore.count"` + VcenterDatacenterDiskSpace MetricConfig `mapstructure:"vcenter.datacenter.disk.space"` + VcenterDatacenterHostCount MetricConfig `mapstructure:"vcenter.datacenter.host.count"` + VcenterDatacenterMemoryLimit MetricConfig `mapstructure:"vcenter.datacenter.memory.limit"` + VcenterDatacenterVMCount MetricConfig `mapstructure:"vcenter.datacenter.vm.count"` + VcenterDatastoreDiskUsage MetricConfig `mapstructure:"vcenter.datastore.disk.usage"` + VcenterDatastoreDiskUtilization MetricConfig `mapstructure:"vcenter.datastore.disk.utilization"` + VcenterHostCPUCapacity MetricConfig `mapstructure:"vcenter.host.cpu.capacity"` + VcenterHostCPUReserved MetricConfig `mapstructure:"vcenter.host.cpu.reserved"` + VcenterHostCPUUsage MetricConfig `mapstructure:"vcenter.host.cpu.usage"` + VcenterHostCPUUtilization MetricConfig `mapstructure:"vcenter.host.cpu.utilization"` + VcenterHostDiskLatencyAvg MetricConfig `mapstructure:"vcenter.host.disk.latency.avg"` + VcenterHostDiskLatencyMax MetricConfig `mapstructure:"vcenter.host.disk.latency.max"` + VcenterHostDiskThroughput MetricConfig `mapstructure:"vcenter.host.disk.throughput"` + VcenterHostMemoryCapacity MetricConfig `mapstructure:"vcenter.host.memory.capacity"` + VcenterHostMemoryUsage MetricConfig `mapstructure:"vcenter.host.memory.usage"` + VcenterHostMemoryUtilization MetricConfig `mapstructure:"vcenter.host.memory.utilization"` + VcenterHostNetworkPacketDropRate MetricConfig `mapstructure:"vcenter.host.network.packet.drop.rate"` + VcenterHostNetworkPacketErrorRate MetricConfig `mapstructure:"vcenter.host.network.packet.error.rate"` + VcenterHostNetworkPacketRate MetricConfig `mapstructure:"vcenter.host.network.packet.rate"` + VcenterHostNetworkThroughput MetricConfig `mapstructure:"vcenter.host.network.throughput"` + VcenterHostNetworkUsage MetricConfig `mapstructure:"vcenter.host.network.usage"` + VcenterHostVsanCacheHitRate MetricConfig `mapstructure:"vcenter.host.vsan.cache.hit_rate"` + VcenterHostVsanCongestions MetricConfig `mapstructure:"vcenter.host.vsan.congestions"` + VcenterHostVsanLatencyAvg MetricConfig `mapstructure:"vcenter.host.vsan.latency.avg"` + VcenterHostVsanOperations MetricConfig `mapstructure:"vcenter.host.vsan.operations"` + VcenterHostVsanThroughput MetricConfig `mapstructure:"vcenter.host.vsan.throughput"` + VcenterResourcePoolCPUShares MetricConfig `mapstructure:"vcenter.resource_pool.cpu.shares"` + VcenterResourcePoolCPUUsage MetricConfig `mapstructure:"vcenter.resource_pool.cpu.usage"` + VcenterResourcePoolMemoryBallooned MetricConfig `mapstructure:"vcenter.resource_pool.memory.ballooned"` + VcenterResourcePoolMemoryGranted MetricConfig `mapstructure:"vcenter.resource_pool.memory.granted"` + VcenterResourcePoolMemoryShares MetricConfig `mapstructure:"vcenter.resource_pool.memory.shares"` + VcenterResourcePoolMemorySwapped MetricConfig `mapstructure:"vcenter.resource_pool.memory.swapped"` + VcenterResourcePoolMemoryUsage MetricConfig `mapstructure:"vcenter.resource_pool.memory.usage"` + VcenterVMCPUReadiness MetricConfig `mapstructure:"vcenter.vm.cpu.readiness"` + VcenterVMCPUTime MetricConfig `mapstructure:"vcenter.vm.cpu.time"` + VcenterVMCPUUsage MetricConfig `mapstructure:"vcenter.vm.cpu.usage"` + VcenterVMCPUUtilization MetricConfig `mapstructure:"vcenter.vm.cpu.utilization"` + VcenterVMDiskLatencyAvg MetricConfig `mapstructure:"vcenter.vm.disk.latency.avg"` + VcenterVMDiskLatencyMax MetricConfig `mapstructure:"vcenter.vm.disk.latency.max"` + VcenterVMDiskThroughput MetricConfig `mapstructure:"vcenter.vm.disk.throughput"` + VcenterVMDiskUsage MetricConfig `mapstructure:"vcenter.vm.disk.usage"` + VcenterVMDiskUtilization MetricConfig `mapstructure:"vcenter.vm.disk.utilization"` + VcenterVMMemoryBallooned MetricConfig `mapstructure:"vcenter.vm.memory.ballooned"` + VcenterVMMemoryGranted MetricConfig `mapstructure:"vcenter.vm.memory.granted"` + VcenterVMMemorySwapped MetricConfig `mapstructure:"vcenter.vm.memory.swapped"` + VcenterVMMemorySwappedSsd MetricConfig `mapstructure:"vcenter.vm.memory.swapped_ssd"` + VcenterVMMemoryUsage MetricConfig `mapstructure:"vcenter.vm.memory.usage"` + VcenterVMMemoryUtilization MetricConfig `mapstructure:"vcenter.vm.memory.utilization"` + VcenterVMNetworkBroadcastPacketRate MetricConfig `mapstructure:"vcenter.vm.network.broadcast.packet.rate"` + VcenterVMNetworkMulticastPacketRate MetricConfig `mapstructure:"vcenter.vm.network.multicast.packet.rate"` + VcenterVMNetworkPacketDropRate MetricConfig `mapstructure:"vcenter.vm.network.packet.drop.rate"` + VcenterVMNetworkPacketRate MetricConfig `mapstructure:"vcenter.vm.network.packet.rate"` + VcenterVMNetworkThroughput MetricConfig `mapstructure:"vcenter.vm.network.throughput"` + VcenterVMNetworkUsage MetricConfig `mapstructure:"vcenter.vm.network.usage"` + VcenterVMVsanLatencyAvg MetricConfig `mapstructure:"vcenter.vm.vsan.latency.avg"` + VcenterVMVsanOperations MetricConfig `mapstructure:"vcenter.vm.vsan.operations"` + VcenterVMVsanThroughput MetricConfig `mapstructure:"vcenter.vm.vsan.throughput"` } func DefaultMetricsConfig() MetricsConfig { @@ -244,6 +247,9 @@ func DefaultMetricsConfig() MetricsConfig { VcenterVMCPUReadiness: MetricConfig{ Enabled: true, }, + VcenterVMCPUTime: MetricConfig{ + Enabled: false, + }, VcenterVMCPUUsage: MetricConfig{ Enabled: true, }, @@ -283,6 +289,12 @@ func DefaultMetricsConfig() MetricsConfig { VcenterVMMemoryUtilization: MetricConfig{ Enabled: true, }, + VcenterVMNetworkBroadcastPacketRate: MetricConfig{ + Enabled: false, + }, + VcenterVMNetworkMulticastPacketRate: MetricConfig{ + Enabled: false, + }, VcenterVMNetworkPacketDropRate: MetricConfig{ Enabled: true, }, diff --git a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go index 66cf8d162b10..29efab844d01 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_config_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_config_test.go @@ -25,74 +25,77 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - VcenterClusterCPUEffective: MetricConfig{Enabled: true}, - VcenterClusterCPULimit: MetricConfig{Enabled: true}, - VcenterClusterHostCount: MetricConfig{Enabled: true}, - VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, - VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, - VcenterClusterVMCount: MetricConfig{Enabled: true}, - VcenterClusterVMTemplateCount: MetricConfig{Enabled: true}, - VcenterClusterVsanCongestions: MetricConfig{Enabled: true}, - VcenterClusterVsanLatencyAvg: MetricConfig{Enabled: true}, - VcenterClusterVsanOperations: MetricConfig{Enabled: true}, - VcenterClusterVsanThroughput: MetricConfig{Enabled: true}, - VcenterDatacenterClusterCount: MetricConfig{Enabled: true}, - VcenterDatacenterCPULimit: MetricConfig{Enabled: true}, - VcenterDatacenterDatastoreCount: MetricConfig{Enabled: true}, - VcenterDatacenterDiskSpace: MetricConfig{Enabled: true}, - VcenterDatacenterHostCount: MetricConfig{Enabled: true}, - VcenterDatacenterMemoryLimit: MetricConfig{Enabled: true}, - VcenterDatacenterVMCount: MetricConfig{Enabled: true}, - VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, - VcenterDatastoreDiskUtilization: MetricConfig{Enabled: true}, - VcenterHostCPUCapacity: MetricConfig{Enabled: true}, - VcenterHostCPUReserved: MetricConfig{Enabled: true}, - VcenterHostCPUUsage: MetricConfig{Enabled: true}, - VcenterHostCPUUtilization: MetricConfig{Enabled: true}, - VcenterHostDiskLatencyAvg: MetricConfig{Enabled: true}, - VcenterHostDiskLatencyMax: MetricConfig{Enabled: true}, - VcenterHostDiskThroughput: MetricConfig{Enabled: true}, - VcenterHostMemoryCapacity: MetricConfig{Enabled: true}, - VcenterHostMemoryUsage: MetricConfig{Enabled: true}, - VcenterHostMemoryUtilization: MetricConfig{Enabled: true}, - VcenterHostNetworkPacketDropRate: MetricConfig{Enabled: true}, - VcenterHostNetworkPacketErrorRate: MetricConfig{Enabled: true}, - VcenterHostNetworkPacketRate: MetricConfig{Enabled: true}, - VcenterHostNetworkThroughput: MetricConfig{Enabled: true}, - VcenterHostNetworkUsage: MetricConfig{Enabled: true}, - VcenterHostVsanCacheHitRate: MetricConfig{Enabled: true}, - VcenterHostVsanCongestions: MetricConfig{Enabled: true}, - VcenterHostVsanLatencyAvg: MetricConfig{Enabled: true}, - VcenterHostVsanOperations: MetricConfig{Enabled: true}, - VcenterHostVsanThroughput: MetricConfig{Enabled: true}, - VcenterResourcePoolCPUShares: MetricConfig{Enabled: true}, - VcenterResourcePoolCPUUsage: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryBallooned: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryGranted: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryShares: MetricConfig{Enabled: true}, - VcenterResourcePoolMemorySwapped: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: true}, - VcenterVMCPUReadiness: MetricConfig{Enabled: true}, - VcenterVMCPUUsage: MetricConfig{Enabled: true}, - VcenterVMCPUUtilization: MetricConfig{Enabled: true}, - VcenterVMDiskLatencyAvg: MetricConfig{Enabled: true}, - VcenterVMDiskLatencyMax: MetricConfig{Enabled: true}, - VcenterVMDiskThroughput: MetricConfig{Enabled: true}, - VcenterVMDiskUsage: MetricConfig{Enabled: true}, - VcenterVMDiskUtilization: MetricConfig{Enabled: true}, - VcenterVMMemoryBallooned: MetricConfig{Enabled: true}, - VcenterVMMemoryGranted: MetricConfig{Enabled: true}, - VcenterVMMemorySwapped: MetricConfig{Enabled: true}, - VcenterVMMemorySwappedSsd: MetricConfig{Enabled: true}, - VcenterVMMemoryUsage: MetricConfig{Enabled: true}, - VcenterVMMemoryUtilization: MetricConfig{Enabled: true}, - VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: true}, - VcenterVMNetworkPacketRate: MetricConfig{Enabled: true}, - VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, - VcenterVMNetworkUsage: MetricConfig{Enabled: true}, - VcenterVMVsanLatencyAvg: MetricConfig{Enabled: true}, - VcenterVMVsanOperations: MetricConfig{Enabled: true}, - VcenterVMVsanThroughput: MetricConfig{Enabled: true}, + VcenterClusterCPUEffective: MetricConfig{Enabled: true}, + VcenterClusterCPULimit: MetricConfig{Enabled: true}, + VcenterClusterHostCount: MetricConfig{Enabled: true}, + VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, + VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, + VcenterClusterVMCount: MetricConfig{Enabled: true}, + VcenterClusterVMTemplateCount: MetricConfig{Enabled: true}, + VcenterClusterVsanCongestions: MetricConfig{Enabled: true}, + VcenterClusterVsanLatencyAvg: MetricConfig{Enabled: true}, + VcenterClusterVsanOperations: MetricConfig{Enabled: true}, + VcenterClusterVsanThroughput: MetricConfig{Enabled: true}, + VcenterDatacenterClusterCount: MetricConfig{Enabled: true}, + VcenterDatacenterCPULimit: MetricConfig{Enabled: true}, + VcenterDatacenterDatastoreCount: MetricConfig{Enabled: true}, + VcenterDatacenterDiskSpace: MetricConfig{Enabled: true}, + VcenterDatacenterHostCount: MetricConfig{Enabled: true}, + VcenterDatacenterMemoryLimit: MetricConfig{Enabled: true}, + VcenterDatacenterVMCount: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUtilization: MetricConfig{Enabled: true}, + VcenterHostCPUCapacity: MetricConfig{Enabled: true}, + VcenterHostCPUReserved: MetricConfig{Enabled: true}, + VcenterHostCPUUsage: MetricConfig{Enabled: true}, + VcenterHostCPUUtilization: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterHostDiskThroughput: MetricConfig{Enabled: true}, + VcenterHostMemoryCapacity: MetricConfig{Enabled: true}, + VcenterHostMemoryUsage: MetricConfig{Enabled: true}, + VcenterHostMemoryUtilization: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketDropRate: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketErrorRate: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketRate: MetricConfig{Enabled: true}, + VcenterHostNetworkThroughput: MetricConfig{Enabled: true}, + VcenterHostNetworkUsage: MetricConfig{Enabled: true}, + VcenterHostVsanCacheHitRate: MetricConfig{Enabled: true}, + VcenterHostVsanCongestions: MetricConfig{Enabled: true}, + VcenterHostVsanLatencyAvg: MetricConfig{Enabled: true}, + VcenterHostVsanOperations: MetricConfig{Enabled: true}, + VcenterHostVsanThroughput: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUShares: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUUsage: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryBallooned: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryGranted: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryShares: MetricConfig{Enabled: true}, + VcenterResourcePoolMemorySwapped: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMCPUReadiness: MetricConfig{Enabled: true}, + VcenterVMCPUTime: MetricConfig{Enabled: true}, + VcenterVMCPUUsage: MetricConfig{Enabled: true}, + VcenterVMCPUUtilization: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterVMDiskThroughput: MetricConfig{Enabled: true}, + VcenterVMDiskUsage: MetricConfig{Enabled: true}, + VcenterVMDiskUtilization: MetricConfig{Enabled: true}, + VcenterVMMemoryBallooned: MetricConfig{Enabled: true}, + VcenterVMMemoryGranted: MetricConfig{Enabled: true}, + VcenterVMMemorySwapped: MetricConfig{Enabled: true}, + VcenterVMMemorySwappedSsd: MetricConfig{Enabled: true}, + VcenterVMMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMMemoryUtilization: MetricConfig{Enabled: true}, + VcenterVMNetworkBroadcastPacketRate: MetricConfig{Enabled: true}, + VcenterVMNetworkMulticastPacketRate: MetricConfig{Enabled: true}, + VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: true}, + VcenterVMNetworkPacketRate: MetricConfig{Enabled: true}, + VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, + VcenterVMNetworkUsage: MetricConfig{Enabled: true}, + VcenterVMVsanLatencyAvg: MetricConfig{Enabled: true}, + VcenterVMVsanOperations: MetricConfig{Enabled: true}, + VcenterVMVsanThroughput: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ VcenterClusterName: ResourceAttributeConfig{Enabled: true}, @@ -114,74 +117,77 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ - VcenterClusterCPUEffective: MetricConfig{Enabled: false}, - VcenterClusterCPULimit: MetricConfig{Enabled: false}, - VcenterClusterHostCount: MetricConfig{Enabled: false}, - VcenterClusterMemoryEffective: MetricConfig{Enabled: false}, - VcenterClusterMemoryLimit: MetricConfig{Enabled: false}, - VcenterClusterVMCount: MetricConfig{Enabled: false}, - VcenterClusterVMTemplateCount: MetricConfig{Enabled: false}, - VcenterClusterVsanCongestions: MetricConfig{Enabled: false}, - VcenterClusterVsanLatencyAvg: MetricConfig{Enabled: false}, - VcenterClusterVsanOperations: MetricConfig{Enabled: false}, - VcenterClusterVsanThroughput: MetricConfig{Enabled: false}, - VcenterDatacenterClusterCount: MetricConfig{Enabled: false}, - VcenterDatacenterCPULimit: MetricConfig{Enabled: false}, - VcenterDatacenterDatastoreCount: MetricConfig{Enabled: false}, - VcenterDatacenterDiskSpace: MetricConfig{Enabled: false}, - VcenterDatacenterHostCount: MetricConfig{Enabled: false}, - VcenterDatacenterMemoryLimit: MetricConfig{Enabled: false}, - VcenterDatacenterVMCount: MetricConfig{Enabled: false}, - VcenterDatastoreDiskUsage: MetricConfig{Enabled: false}, - VcenterDatastoreDiskUtilization: MetricConfig{Enabled: false}, - VcenterHostCPUCapacity: MetricConfig{Enabled: false}, - VcenterHostCPUReserved: MetricConfig{Enabled: false}, - VcenterHostCPUUsage: MetricConfig{Enabled: false}, - VcenterHostCPUUtilization: MetricConfig{Enabled: false}, - VcenterHostDiskLatencyAvg: MetricConfig{Enabled: false}, - VcenterHostDiskLatencyMax: MetricConfig{Enabled: false}, - VcenterHostDiskThroughput: MetricConfig{Enabled: false}, - VcenterHostMemoryCapacity: MetricConfig{Enabled: false}, - VcenterHostMemoryUsage: MetricConfig{Enabled: false}, - VcenterHostMemoryUtilization: MetricConfig{Enabled: false}, - VcenterHostNetworkPacketDropRate: MetricConfig{Enabled: false}, - VcenterHostNetworkPacketErrorRate: MetricConfig{Enabled: false}, - VcenterHostNetworkPacketRate: MetricConfig{Enabled: false}, - VcenterHostNetworkThroughput: MetricConfig{Enabled: false}, - VcenterHostNetworkUsage: MetricConfig{Enabled: false}, - VcenterHostVsanCacheHitRate: MetricConfig{Enabled: false}, - VcenterHostVsanCongestions: MetricConfig{Enabled: false}, - VcenterHostVsanLatencyAvg: MetricConfig{Enabled: false}, - VcenterHostVsanOperations: MetricConfig{Enabled: false}, - VcenterHostVsanThroughput: MetricConfig{Enabled: false}, - VcenterResourcePoolCPUShares: MetricConfig{Enabled: false}, - VcenterResourcePoolCPUUsage: MetricConfig{Enabled: false}, - VcenterResourcePoolMemoryBallooned: MetricConfig{Enabled: false}, - VcenterResourcePoolMemoryGranted: MetricConfig{Enabled: false}, - VcenterResourcePoolMemoryShares: MetricConfig{Enabled: false}, - VcenterResourcePoolMemorySwapped: MetricConfig{Enabled: false}, - VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: false}, - VcenterVMCPUReadiness: MetricConfig{Enabled: false}, - VcenterVMCPUUsage: MetricConfig{Enabled: false}, - VcenterVMCPUUtilization: MetricConfig{Enabled: false}, - VcenterVMDiskLatencyAvg: MetricConfig{Enabled: false}, - VcenterVMDiskLatencyMax: MetricConfig{Enabled: false}, - VcenterVMDiskThroughput: MetricConfig{Enabled: false}, - VcenterVMDiskUsage: MetricConfig{Enabled: false}, - VcenterVMDiskUtilization: MetricConfig{Enabled: false}, - VcenterVMMemoryBallooned: MetricConfig{Enabled: false}, - VcenterVMMemoryGranted: MetricConfig{Enabled: false}, - VcenterVMMemorySwapped: MetricConfig{Enabled: false}, - VcenterVMMemorySwappedSsd: MetricConfig{Enabled: false}, - VcenterVMMemoryUsage: MetricConfig{Enabled: false}, - VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, - VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: false}, - VcenterVMNetworkPacketRate: MetricConfig{Enabled: false}, - VcenterVMNetworkThroughput: MetricConfig{Enabled: false}, - VcenterVMNetworkUsage: MetricConfig{Enabled: false}, - VcenterVMVsanLatencyAvg: MetricConfig{Enabled: false}, - VcenterVMVsanOperations: MetricConfig{Enabled: false}, - VcenterVMVsanThroughput: MetricConfig{Enabled: false}, + VcenterClusterCPUEffective: MetricConfig{Enabled: false}, + VcenterClusterCPULimit: MetricConfig{Enabled: false}, + VcenterClusterHostCount: MetricConfig{Enabled: false}, + VcenterClusterMemoryEffective: MetricConfig{Enabled: false}, + VcenterClusterMemoryLimit: MetricConfig{Enabled: false}, + VcenterClusterVMCount: MetricConfig{Enabled: false}, + VcenterClusterVMTemplateCount: MetricConfig{Enabled: false}, + VcenterClusterVsanCongestions: MetricConfig{Enabled: false}, + VcenterClusterVsanLatencyAvg: MetricConfig{Enabled: false}, + VcenterClusterVsanOperations: MetricConfig{Enabled: false}, + VcenterClusterVsanThroughput: MetricConfig{Enabled: false}, + VcenterDatacenterClusterCount: MetricConfig{Enabled: false}, + VcenterDatacenterCPULimit: MetricConfig{Enabled: false}, + VcenterDatacenterDatastoreCount: MetricConfig{Enabled: false}, + VcenterDatacenterDiskSpace: MetricConfig{Enabled: false}, + VcenterDatacenterHostCount: MetricConfig{Enabled: false}, + VcenterDatacenterMemoryLimit: MetricConfig{Enabled: false}, + VcenterDatacenterVMCount: MetricConfig{Enabled: false}, + VcenterDatastoreDiskUsage: MetricConfig{Enabled: false}, + VcenterDatastoreDiskUtilization: MetricConfig{Enabled: false}, + VcenterHostCPUCapacity: MetricConfig{Enabled: false}, + VcenterHostCPUReserved: MetricConfig{Enabled: false}, + VcenterHostCPUUsage: MetricConfig{Enabled: false}, + VcenterHostCPUUtilization: MetricConfig{Enabled: false}, + VcenterHostDiskLatencyAvg: MetricConfig{Enabled: false}, + VcenterHostDiskLatencyMax: MetricConfig{Enabled: false}, + VcenterHostDiskThroughput: MetricConfig{Enabled: false}, + VcenterHostMemoryCapacity: MetricConfig{Enabled: false}, + VcenterHostMemoryUsage: MetricConfig{Enabled: false}, + VcenterHostMemoryUtilization: MetricConfig{Enabled: false}, + VcenterHostNetworkPacketDropRate: MetricConfig{Enabled: false}, + VcenterHostNetworkPacketErrorRate: MetricConfig{Enabled: false}, + VcenterHostNetworkPacketRate: MetricConfig{Enabled: false}, + VcenterHostNetworkThroughput: MetricConfig{Enabled: false}, + VcenterHostNetworkUsage: MetricConfig{Enabled: false}, + VcenterHostVsanCacheHitRate: MetricConfig{Enabled: false}, + VcenterHostVsanCongestions: MetricConfig{Enabled: false}, + VcenterHostVsanLatencyAvg: MetricConfig{Enabled: false}, + VcenterHostVsanOperations: MetricConfig{Enabled: false}, + VcenterHostVsanThroughput: MetricConfig{Enabled: false}, + VcenterResourcePoolCPUShares: MetricConfig{Enabled: false}, + VcenterResourcePoolCPUUsage: MetricConfig{Enabled: false}, + VcenterResourcePoolMemoryBallooned: MetricConfig{Enabled: false}, + VcenterResourcePoolMemoryGranted: MetricConfig{Enabled: false}, + VcenterResourcePoolMemoryShares: MetricConfig{Enabled: false}, + VcenterResourcePoolMemorySwapped: MetricConfig{Enabled: false}, + VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: false}, + VcenterVMCPUReadiness: MetricConfig{Enabled: false}, + VcenterVMCPUTime: MetricConfig{Enabled: false}, + VcenterVMCPUUsage: MetricConfig{Enabled: false}, + VcenterVMCPUUtilization: MetricConfig{Enabled: false}, + VcenterVMDiskLatencyAvg: MetricConfig{Enabled: false}, + VcenterVMDiskLatencyMax: MetricConfig{Enabled: false}, + VcenterVMDiskThroughput: MetricConfig{Enabled: false}, + VcenterVMDiskUsage: MetricConfig{Enabled: false}, + VcenterVMDiskUtilization: MetricConfig{Enabled: false}, + VcenterVMMemoryBallooned: MetricConfig{Enabled: false}, + VcenterVMMemoryGranted: MetricConfig{Enabled: false}, + VcenterVMMemorySwapped: MetricConfig{Enabled: false}, + VcenterVMMemorySwappedSsd: MetricConfig{Enabled: false}, + VcenterVMMemoryUsage: MetricConfig{Enabled: false}, + VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, + VcenterVMNetworkBroadcastPacketRate: MetricConfig{Enabled: false}, + VcenterVMNetworkMulticastPacketRate: MetricConfig{Enabled: false}, + VcenterVMNetworkPacketDropRate: MetricConfig{Enabled: false}, + VcenterVMNetworkPacketRate: MetricConfig{Enabled: false}, + VcenterVMNetworkThroughput: MetricConfig{Enabled: false}, + VcenterVMNetworkUsage: MetricConfig{Enabled: false}, + VcenterVMVsanLatencyAvg: MetricConfig{Enabled: false}, + VcenterVMVsanOperations: MetricConfig{Enabled: false}, + VcenterVMVsanThroughput: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ VcenterClusterName: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go index 3a31d62572d8..daf778442292 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go @@ -38,6 +38,36 @@ var MapAttributeCPUReservationType = map[string]AttributeCPUReservationType{ "used": AttributeCPUReservationTypeUsed, } +// AttributeCPUState specifies the value cpu_state attribute. +type AttributeCPUState int + +const ( + _ AttributeCPUState = iota + AttributeCPUStateIdle + AttributeCPUStateReady + AttributeCPUStateWait +) + +// String returns the string representation of the AttributeCPUState. +func (av AttributeCPUState) String() string { + switch av { + case AttributeCPUStateIdle: + return "idle" + case AttributeCPUStateReady: + return "ready" + case AttributeCPUStateWait: + return "wait" + } + return "" +} + +// MapAttributeCPUState is a helper map of string to AttributeCPUState attribute value. +var MapAttributeCPUState = map[string]AttributeCPUState{ + "idle": AttributeCPUStateIdle, + "ready": AttributeCPUStateReady, + "wait": AttributeCPUStateWait, +} + // AttributeDiskDirection specifies the value disk_direction attribute. type AttributeDiskDirection int @@ -2850,6 +2880,58 @@ func newMetricVcenterVMCPUReadiness(cfg MetricConfig) metricVcenterVMCPUReadines return m } +type metricVcenterVMCPUTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills vcenter.vm.cpu.time metric with initial data. +func (m *metricVcenterVMCPUTime) init() { + m.data.SetName("vcenter.vm.cpu.time") + m.data.SetDescription("CPU time spent in idle, ready or wait state.") + m.data.SetUnit("%") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricVcenterVMCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string, objectNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue) + dp.Attributes().PutStr("object", objectNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricVcenterVMCPUTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricVcenterVMCPUTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricVcenterVMCPUTime(cfg MetricConfig) metricVcenterVMCPUTime { + m := metricVcenterVMCPUTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricVcenterVMCPUUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -3512,6 +3594,110 @@ func newMetricVcenterVMMemoryUtilization(cfg MetricConfig) metricVcenterVMMemory return m } +type metricVcenterVMNetworkBroadcastPacketRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills vcenter.vm.network.broadcast.packet.rate metric with initial data. +func (m *metricVcenterVMNetworkBroadcastPacketRate) init() { + m.data.SetName("vcenter.vm.network.broadcast.packet.rate") + m.data.SetDescription("The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine.") + m.data.SetUnit("{packets/s}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricVcenterVMNetworkBroadcastPacketRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue string, objectNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("direction", throughputDirectionAttributeValue) + dp.Attributes().PutStr("object", objectNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricVcenterVMNetworkBroadcastPacketRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricVcenterVMNetworkBroadcastPacketRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricVcenterVMNetworkBroadcastPacketRate(cfg MetricConfig) metricVcenterVMNetworkBroadcastPacketRate { + m := metricVcenterVMNetworkBroadcastPacketRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricVcenterVMNetworkMulticastPacketRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills vcenter.vm.network.multicast.packet.rate metric with initial data. +func (m *metricVcenterVMNetworkMulticastPacketRate) init() { + m.data.SetName("vcenter.vm.network.multicast.packet.rate") + m.data.SetDescription("The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine.") + m.data.SetUnit("{packets/s}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricVcenterVMNetworkMulticastPacketRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue string, objectNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("direction", throughputDirectionAttributeValue) + dp.Attributes().PutStr("object", objectNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricVcenterVMNetworkMulticastPacketRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricVcenterVMNetworkMulticastPacketRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricVcenterVMNetworkMulticastPacketRate(cfg MetricConfig) metricVcenterVMNetworkMulticastPacketRate { + m := metricVcenterVMNetworkMulticastPacketRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricVcenterVMNetworkPacketDropRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -3879,81 +4065,84 @@ func newMetricVcenterVMVsanThroughput(cfg MetricConfig) metricVcenterVMVsanThrou // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - resourceAttributeIncludeFilter map[string]filter.Filter - resourceAttributeExcludeFilter map[string]filter.Filter - metricVcenterClusterCPUEffective metricVcenterClusterCPUEffective - metricVcenterClusterCPULimit metricVcenterClusterCPULimit - metricVcenterClusterHostCount metricVcenterClusterHostCount - metricVcenterClusterMemoryEffective metricVcenterClusterMemoryEffective - metricVcenterClusterMemoryLimit metricVcenterClusterMemoryLimit - metricVcenterClusterVMCount metricVcenterClusterVMCount - metricVcenterClusterVMTemplateCount metricVcenterClusterVMTemplateCount - metricVcenterClusterVsanCongestions metricVcenterClusterVsanCongestions - metricVcenterClusterVsanLatencyAvg metricVcenterClusterVsanLatencyAvg - metricVcenterClusterVsanOperations metricVcenterClusterVsanOperations - metricVcenterClusterVsanThroughput metricVcenterClusterVsanThroughput - metricVcenterDatacenterClusterCount metricVcenterDatacenterClusterCount - metricVcenterDatacenterCPULimit metricVcenterDatacenterCPULimit - metricVcenterDatacenterDatastoreCount metricVcenterDatacenterDatastoreCount - metricVcenterDatacenterDiskSpace metricVcenterDatacenterDiskSpace - metricVcenterDatacenterHostCount metricVcenterDatacenterHostCount - metricVcenterDatacenterMemoryLimit metricVcenterDatacenterMemoryLimit - metricVcenterDatacenterVMCount metricVcenterDatacenterVMCount - metricVcenterDatastoreDiskUsage metricVcenterDatastoreDiskUsage - metricVcenterDatastoreDiskUtilization metricVcenterDatastoreDiskUtilization - metricVcenterHostCPUCapacity metricVcenterHostCPUCapacity - metricVcenterHostCPUReserved metricVcenterHostCPUReserved - metricVcenterHostCPUUsage metricVcenterHostCPUUsage - metricVcenterHostCPUUtilization metricVcenterHostCPUUtilization - metricVcenterHostDiskLatencyAvg metricVcenterHostDiskLatencyAvg - metricVcenterHostDiskLatencyMax metricVcenterHostDiskLatencyMax - metricVcenterHostDiskThroughput metricVcenterHostDiskThroughput - metricVcenterHostMemoryCapacity metricVcenterHostMemoryCapacity - metricVcenterHostMemoryUsage metricVcenterHostMemoryUsage - metricVcenterHostMemoryUtilization metricVcenterHostMemoryUtilization - metricVcenterHostNetworkPacketDropRate metricVcenterHostNetworkPacketDropRate - metricVcenterHostNetworkPacketErrorRate metricVcenterHostNetworkPacketErrorRate - metricVcenterHostNetworkPacketRate metricVcenterHostNetworkPacketRate - metricVcenterHostNetworkThroughput metricVcenterHostNetworkThroughput - metricVcenterHostNetworkUsage metricVcenterHostNetworkUsage - metricVcenterHostVsanCacheHitRate metricVcenterHostVsanCacheHitRate - metricVcenterHostVsanCongestions metricVcenterHostVsanCongestions - metricVcenterHostVsanLatencyAvg metricVcenterHostVsanLatencyAvg - metricVcenterHostVsanOperations metricVcenterHostVsanOperations - metricVcenterHostVsanThroughput metricVcenterHostVsanThroughput - metricVcenterResourcePoolCPUShares metricVcenterResourcePoolCPUShares - metricVcenterResourcePoolCPUUsage metricVcenterResourcePoolCPUUsage - metricVcenterResourcePoolMemoryBallooned metricVcenterResourcePoolMemoryBallooned - metricVcenterResourcePoolMemoryGranted metricVcenterResourcePoolMemoryGranted - metricVcenterResourcePoolMemoryShares metricVcenterResourcePoolMemoryShares - metricVcenterResourcePoolMemorySwapped metricVcenterResourcePoolMemorySwapped - metricVcenterResourcePoolMemoryUsage metricVcenterResourcePoolMemoryUsage - metricVcenterVMCPUReadiness metricVcenterVMCPUReadiness - metricVcenterVMCPUUsage metricVcenterVMCPUUsage - metricVcenterVMCPUUtilization metricVcenterVMCPUUtilization - metricVcenterVMDiskLatencyAvg metricVcenterVMDiskLatencyAvg - metricVcenterVMDiskLatencyMax metricVcenterVMDiskLatencyMax - metricVcenterVMDiskThroughput metricVcenterVMDiskThroughput - metricVcenterVMDiskUsage metricVcenterVMDiskUsage - metricVcenterVMDiskUtilization metricVcenterVMDiskUtilization - metricVcenterVMMemoryBallooned metricVcenterVMMemoryBallooned - metricVcenterVMMemoryGranted metricVcenterVMMemoryGranted - metricVcenterVMMemorySwapped metricVcenterVMMemorySwapped - metricVcenterVMMemorySwappedSsd metricVcenterVMMemorySwappedSsd - metricVcenterVMMemoryUsage metricVcenterVMMemoryUsage - metricVcenterVMMemoryUtilization metricVcenterVMMemoryUtilization - metricVcenterVMNetworkPacketDropRate metricVcenterVMNetworkPacketDropRate - metricVcenterVMNetworkPacketRate metricVcenterVMNetworkPacketRate - metricVcenterVMNetworkThroughput metricVcenterVMNetworkThroughput - metricVcenterVMNetworkUsage metricVcenterVMNetworkUsage - metricVcenterVMVsanLatencyAvg metricVcenterVMVsanLatencyAvg - metricVcenterVMVsanOperations metricVcenterVMVsanOperations - metricVcenterVMVsanThroughput metricVcenterVMVsanThroughput + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + resourceAttributeIncludeFilter map[string]filter.Filter + resourceAttributeExcludeFilter map[string]filter.Filter + metricVcenterClusterCPUEffective metricVcenterClusterCPUEffective + metricVcenterClusterCPULimit metricVcenterClusterCPULimit + metricVcenterClusterHostCount metricVcenterClusterHostCount + metricVcenterClusterMemoryEffective metricVcenterClusterMemoryEffective + metricVcenterClusterMemoryLimit metricVcenterClusterMemoryLimit + metricVcenterClusterVMCount metricVcenterClusterVMCount + metricVcenterClusterVMTemplateCount metricVcenterClusterVMTemplateCount + metricVcenterClusterVsanCongestions metricVcenterClusterVsanCongestions + metricVcenterClusterVsanLatencyAvg metricVcenterClusterVsanLatencyAvg + metricVcenterClusterVsanOperations metricVcenterClusterVsanOperations + metricVcenterClusterVsanThroughput metricVcenterClusterVsanThroughput + metricVcenterDatacenterClusterCount metricVcenterDatacenterClusterCount + metricVcenterDatacenterCPULimit metricVcenterDatacenterCPULimit + metricVcenterDatacenterDatastoreCount metricVcenterDatacenterDatastoreCount + metricVcenterDatacenterDiskSpace metricVcenterDatacenterDiskSpace + metricVcenterDatacenterHostCount metricVcenterDatacenterHostCount + metricVcenterDatacenterMemoryLimit metricVcenterDatacenterMemoryLimit + metricVcenterDatacenterVMCount metricVcenterDatacenterVMCount + metricVcenterDatastoreDiskUsage metricVcenterDatastoreDiskUsage + metricVcenterDatastoreDiskUtilization metricVcenterDatastoreDiskUtilization + metricVcenterHostCPUCapacity metricVcenterHostCPUCapacity + metricVcenterHostCPUReserved metricVcenterHostCPUReserved + metricVcenterHostCPUUsage metricVcenterHostCPUUsage + metricVcenterHostCPUUtilization metricVcenterHostCPUUtilization + metricVcenterHostDiskLatencyAvg metricVcenterHostDiskLatencyAvg + metricVcenterHostDiskLatencyMax metricVcenterHostDiskLatencyMax + metricVcenterHostDiskThroughput metricVcenterHostDiskThroughput + metricVcenterHostMemoryCapacity metricVcenterHostMemoryCapacity + metricVcenterHostMemoryUsage metricVcenterHostMemoryUsage + metricVcenterHostMemoryUtilization metricVcenterHostMemoryUtilization + metricVcenterHostNetworkPacketDropRate metricVcenterHostNetworkPacketDropRate + metricVcenterHostNetworkPacketErrorRate metricVcenterHostNetworkPacketErrorRate + metricVcenterHostNetworkPacketRate metricVcenterHostNetworkPacketRate + metricVcenterHostNetworkThroughput metricVcenterHostNetworkThroughput + metricVcenterHostNetworkUsage metricVcenterHostNetworkUsage + metricVcenterHostVsanCacheHitRate metricVcenterHostVsanCacheHitRate + metricVcenterHostVsanCongestions metricVcenterHostVsanCongestions + metricVcenterHostVsanLatencyAvg metricVcenterHostVsanLatencyAvg + metricVcenterHostVsanOperations metricVcenterHostVsanOperations + metricVcenterHostVsanThroughput metricVcenterHostVsanThroughput + metricVcenterResourcePoolCPUShares metricVcenterResourcePoolCPUShares + metricVcenterResourcePoolCPUUsage metricVcenterResourcePoolCPUUsage + metricVcenterResourcePoolMemoryBallooned metricVcenterResourcePoolMemoryBallooned + metricVcenterResourcePoolMemoryGranted metricVcenterResourcePoolMemoryGranted + metricVcenterResourcePoolMemoryShares metricVcenterResourcePoolMemoryShares + metricVcenterResourcePoolMemorySwapped metricVcenterResourcePoolMemorySwapped + metricVcenterResourcePoolMemoryUsage metricVcenterResourcePoolMemoryUsage + metricVcenterVMCPUReadiness metricVcenterVMCPUReadiness + metricVcenterVMCPUTime metricVcenterVMCPUTime + metricVcenterVMCPUUsage metricVcenterVMCPUUsage + metricVcenterVMCPUUtilization metricVcenterVMCPUUtilization + metricVcenterVMDiskLatencyAvg metricVcenterVMDiskLatencyAvg + metricVcenterVMDiskLatencyMax metricVcenterVMDiskLatencyMax + metricVcenterVMDiskThroughput metricVcenterVMDiskThroughput + metricVcenterVMDiskUsage metricVcenterVMDiskUsage + metricVcenterVMDiskUtilization metricVcenterVMDiskUtilization + metricVcenterVMMemoryBallooned metricVcenterVMMemoryBallooned + metricVcenterVMMemoryGranted metricVcenterVMMemoryGranted + metricVcenterVMMemorySwapped metricVcenterVMMemorySwapped + metricVcenterVMMemorySwappedSsd metricVcenterVMMemorySwappedSsd + metricVcenterVMMemoryUsage metricVcenterVMMemoryUsage + metricVcenterVMMemoryUtilization metricVcenterVMMemoryUtilization + metricVcenterVMNetworkBroadcastPacketRate metricVcenterVMNetworkBroadcastPacketRate + metricVcenterVMNetworkMulticastPacketRate metricVcenterVMNetworkMulticastPacketRate + metricVcenterVMNetworkPacketDropRate metricVcenterVMNetworkPacketDropRate + metricVcenterVMNetworkPacketRate metricVcenterVMNetworkPacketRate + metricVcenterVMNetworkThroughput metricVcenterVMNetworkThroughput + metricVcenterVMNetworkUsage metricVcenterVMNetworkUsage + metricVcenterVMVsanLatencyAvg metricVcenterVMVsanLatencyAvg + metricVcenterVMVsanOperations metricVcenterVMVsanOperations + metricVcenterVMVsanThroughput metricVcenterVMVsanThroughput } // MetricBuilderOption applies changes to default metrics builder. @@ -3975,80 +4164,83 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricVcenterClusterCPUEffective: newMetricVcenterClusterCPUEffective(mbc.Metrics.VcenterClusterCPUEffective), - metricVcenterClusterCPULimit: newMetricVcenterClusterCPULimit(mbc.Metrics.VcenterClusterCPULimit), - metricVcenterClusterHostCount: newMetricVcenterClusterHostCount(mbc.Metrics.VcenterClusterHostCount), - metricVcenterClusterMemoryEffective: newMetricVcenterClusterMemoryEffective(mbc.Metrics.VcenterClusterMemoryEffective), - metricVcenterClusterMemoryLimit: newMetricVcenterClusterMemoryLimit(mbc.Metrics.VcenterClusterMemoryLimit), - metricVcenterClusterVMCount: newMetricVcenterClusterVMCount(mbc.Metrics.VcenterClusterVMCount), - metricVcenterClusterVMTemplateCount: newMetricVcenterClusterVMTemplateCount(mbc.Metrics.VcenterClusterVMTemplateCount), - metricVcenterClusterVsanCongestions: newMetricVcenterClusterVsanCongestions(mbc.Metrics.VcenterClusterVsanCongestions), - metricVcenterClusterVsanLatencyAvg: newMetricVcenterClusterVsanLatencyAvg(mbc.Metrics.VcenterClusterVsanLatencyAvg), - metricVcenterClusterVsanOperations: newMetricVcenterClusterVsanOperations(mbc.Metrics.VcenterClusterVsanOperations), - metricVcenterClusterVsanThroughput: newMetricVcenterClusterVsanThroughput(mbc.Metrics.VcenterClusterVsanThroughput), - metricVcenterDatacenterClusterCount: newMetricVcenterDatacenterClusterCount(mbc.Metrics.VcenterDatacenterClusterCount), - metricVcenterDatacenterCPULimit: newMetricVcenterDatacenterCPULimit(mbc.Metrics.VcenterDatacenterCPULimit), - metricVcenterDatacenterDatastoreCount: newMetricVcenterDatacenterDatastoreCount(mbc.Metrics.VcenterDatacenterDatastoreCount), - metricVcenterDatacenterDiskSpace: newMetricVcenterDatacenterDiskSpace(mbc.Metrics.VcenterDatacenterDiskSpace), - metricVcenterDatacenterHostCount: newMetricVcenterDatacenterHostCount(mbc.Metrics.VcenterDatacenterHostCount), - metricVcenterDatacenterMemoryLimit: newMetricVcenterDatacenterMemoryLimit(mbc.Metrics.VcenterDatacenterMemoryLimit), - metricVcenterDatacenterVMCount: newMetricVcenterDatacenterVMCount(mbc.Metrics.VcenterDatacenterVMCount), - metricVcenterDatastoreDiskUsage: newMetricVcenterDatastoreDiskUsage(mbc.Metrics.VcenterDatastoreDiskUsage), - metricVcenterDatastoreDiskUtilization: newMetricVcenterDatastoreDiskUtilization(mbc.Metrics.VcenterDatastoreDiskUtilization), - metricVcenterHostCPUCapacity: newMetricVcenterHostCPUCapacity(mbc.Metrics.VcenterHostCPUCapacity), - metricVcenterHostCPUReserved: newMetricVcenterHostCPUReserved(mbc.Metrics.VcenterHostCPUReserved), - metricVcenterHostCPUUsage: newMetricVcenterHostCPUUsage(mbc.Metrics.VcenterHostCPUUsage), - metricVcenterHostCPUUtilization: newMetricVcenterHostCPUUtilization(mbc.Metrics.VcenterHostCPUUtilization), - metricVcenterHostDiskLatencyAvg: newMetricVcenterHostDiskLatencyAvg(mbc.Metrics.VcenterHostDiskLatencyAvg), - metricVcenterHostDiskLatencyMax: newMetricVcenterHostDiskLatencyMax(mbc.Metrics.VcenterHostDiskLatencyMax), - metricVcenterHostDiskThroughput: newMetricVcenterHostDiskThroughput(mbc.Metrics.VcenterHostDiskThroughput), - metricVcenterHostMemoryCapacity: newMetricVcenterHostMemoryCapacity(mbc.Metrics.VcenterHostMemoryCapacity), - metricVcenterHostMemoryUsage: newMetricVcenterHostMemoryUsage(mbc.Metrics.VcenterHostMemoryUsage), - metricVcenterHostMemoryUtilization: newMetricVcenterHostMemoryUtilization(mbc.Metrics.VcenterHostMemoryUtilization), - metricVcenterHostNetworkPacketDropRate: newMetricVcenterHostNetworkPacketDropRate(mbc.Metrics.VcenterHostNetworkPacketDropRate), - metricVcenterHostNetworkPacketErrorRate: newMetricVcenterHostNetworkPacketErrorRate(mbc.Metrics.VcenterHostNetworkPacketErrorRate), - metricVcenterHostNetworkPacketRate: newMetricVcenterHostNetworkPacketRate(mbc.Metrics.VcenterHostNetworkPacketRate), - metricVcenterHostNetworkThroughput: newMetricVcenterHostNetworkThroughput(mbc.Metrics.VcenterHostNetworkThroughput), - metricVcenterHostNetworkUsage: newMetricVcenterHostNetworkUsage(mbc.Metrics.VcenterHostNetworkUsage), - metricVcenterHostVsanCacheHitRate: newMetricVcenterHostVsanCacheHitRate(mbc.Metrics.VcenterHostVsanCacheHitRate), - metricVcenterHostVsanCongestions: newMetricVcenterHostVsanCongestions(mbc.Metrics.VcenterHostVsanCongestions), - metricVcenterHostVsanLatencyAvg: newMetricVcenterHostVsanLatencyAvg(mbc.Metrics.VcenterHostVsanLatencyAvg), - metricVcenterHostVsanOperations: newMetricVcenterHostVsanOperations(mbc.Metrics.VcenterHostVsanOperations), - metricVcenterHostVsanThroughput: newMetricVcenterHostVsanThroughput(mbc.Metrics.VcenterHostVsanThroughput), - metricVcenterResourcePoolCPUShares: newMetricVcenterResourcePoolCPUShares(mbc.Metrics.VcenterResourcePoolCPUShares), - metricVcenterResourcePoolCPUUsage: newMetricVcenterResourcePoolCPUUsage(mbc.Metrics.VcenterResourcePoolCPUUsage), - metricVcenterResourcePoolMemoryBallooned: newMetricVcenterResourcePoolMemoryBallooned(mbc.Metrics.VcenterResourcePoolMemoryBallooned), - metricVcenterResourcePoolMemoryGranted: newMetricVcenterResourcePoolMemoryGranted(mbc.Metrics.VcenterResourcePoolMemoryGranted), - metricVcenterResourcePoolMemoryShares: newMetricVcenterResourcePoolMemoryShares(mbc.Metrics.VcenterResourcePoolMemoryShares), - metricVcenterResourcePoolMemorySwapped: newMetricVcenterResourcePoolMemorySwapped(mbc.Metrics.VcenterResourcePoolMemorySwapped), - metricVcenterResourcePoolMemoryUsage: newMetricVcenterResourcePoolMemoryUsage(mbc.Metrics.VcenterResourcePoolMemoryUsage), - metricVcenterVMCPUReadiness: newMetricVcenterVMCPUReadiness(mbc.Metrics.VcenterVMCPUReadiness), - metricVcenterVMCPUUsage: newMetricVcenterVMCPUUsage(mbc.Metrics.VcenterVMCPUUsage), - metricVcenterVMCPUUtilization: newMetricVcenterVMCPUUtilization(mbc.Metrics.VcenterVMCPUUtilization), - metricVcenterVMDiskLatencyAvg: newMetricVcenterVMDiskLatencyAvg(mbc.Metrics.VcenterVMDiskLatencyAvg), - metricVcenterVMDiskLatencyMax: newMetricVcenterVMDiskLatencyMax(mbc.Metrics.VcenterVMDiskLatencyMax), - metricVcenterVMDiskThroughput: newMetricVcenterVMDiskThroughput(mbc.Metrics.VcenterVMDiskThroughput), - metricVcenterVMDiskUsage: newMetricVcenterVMDiskUsage(mbc.Metrics.VcenterVMDiskUsage), - metricVcenterVMDiskUtilization: newMetricVcenterVMDiskUtilization(mbc.Metrics.VcenterVMDiskUtilization), - metricVcenterVMMemoryBallooned: newMetricVcenterVMMemoryBallooned(mbc.Metrics.VcenterVMMemoryBallooned), - metricVcenterVMMemoryGranted: newMetricVcenterVMMemoryGranted(mbc.Metrics.VcenterVMMemoryGranted), - metricVcenterVMMemorySwapped: newMetricVcenterVMMemorySwapped(mbc.Metrics.VcenterVMMemorySwapped), - metricVcenterVMMemorySwappedSsd: newMetricVcenterVMMemorySwappedSsd(mbc.Metrics.VcenterVMMemorySwappedSsd), - metricVcenterVMMemoryUsage: newMetricVcenterVMMemoryUsage(mbc.Metrics.VcenterVMMemoryUsage), - metricVcenterVMMemoryUtilization: newMetricVcenterVMMemoryUtilization(mbc.Metrics.VcenterVMMemoryUtilization), - metricVcenterVMNetworkPacketDropRate: newMetricVcenterVMNetworkPacketDropRate(mbc.Metrics.VcenterVMNetworkPacketDropRate), - metricVcenterVMNetworkPacketRate: newMetricVcenterVMNetworkPacketRate(mbc.Metrics.VcenterVMNetworkPacketRate), - metricVcenterVMNetworkThroughput: newMetricVcenterVMNetworkThroughput(mbc.Metrics.VcenterVMNetworkThroughput), - metricVcenterVMNetworkUsage: newMetricVcenterVMNetworkUsage(mbc.Metrics.VcenterVMNetworkUsage), - metricVcenterVMVsanLatencyAvg: newMetricVcenterVMVsanLatencyAvg(mbc.Metrics.VcenterVMVsanLatencyAvg), - metricVcenterVMVsanOperations: newMetricVcenterVMVsanOperations(mbc.Metrics.VcenterVMVsanOperations), - metricVcenterVMVsanThroughput: newMetricVcenterVMVsanThroughput(mbc.Metrics.VcenterVMVsanThroughput), - resourceAttributeIncludeFilter: make(map[string]filter.Filter), - resourceAttributeExcludeFilter: make(map[string]filter.Filter), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricVcenterClusterCPUEffective: newMetricVcenterClusterCPUEffective(mbc.Metrics.VcenterClusterCPUEffective), + metricVcenterClusterCPULimit: newMetricVcenterClusterCPULimit(mbc.Metrics.VcenterClusterCPULimit), + metricVcenterClusterHostCount: newMetricVcenterClusterHostCount(mbc.Metrics.VcenterClusterHostCount), + metricVcenterClusterMemoryEffective: newMetricVcenterClusterMemoryEffective(mbc.Metrics.VcenterClusterMemoryEffective), + metricVcenterClusterMemoryLimit: newMetricVcenterClusterMemoryLimit(mbc.Metrics.VcenterClusterMemoryLimit), + metricVcenterClusterVMCount: newMetricVcenterClusterVMCount(mbc.Metrics.VcenterClusterVMCount), + metricVcenterClusterVMTemplateCount: newMetricVcenterClusterVMTemplateCount(mbc.Metrics.VcenterClusterVMTemplateCount), + metricVcenterClusterVsanCongestions: newMetricVcenterClusterVsanCongestions(mbc.Metrics.VcenterClusterVsanCongestions), + metricVcenterClusterVsanLatencyAvg: newMetricVcenterClusterVsanLatencyAvg(mbc.Metrics.VcenterClusterVsanLatencyAvg), + metricVcenterClusterVsanOperations: newMetricVcenterClusterVsanOperations(mbc.Metrics.VcenterClusterVsanOperations), + metricVcenterClusterVsanThroughput: newMetricVcenterClusterVsanThroughput(mbc.Metrics.VcenterClusterVsanThroughput), + metricVcenterDatacenterClusterCount: newMetricVcenterDatacenterClusterCount(mbc.Metrics.VcenterDatacenterClusterCount), + metricVcenterDatacenterCPULimit: newMetricVcenterDatacenterCPULimit(mbc.Metrics.VcenterDatacenterCPULimit), + metricVcenterDatacenterDatastoreCount: newMetricVcenterDatacenterDatastoreCount(mbc.Metrics.VcenterDatacenterDatastoreCount), + metricVcenterDatacenterDiskSpace: newMetricVcenterDatacenterDiskSpace(mbc.Metrics.VcenterDatacenterDiskSpace), + metricVcenterDatacenterHostCount: newMetricVcenterDatacenterHostCount(mbc.Metrics.VcenterDatacenterHostCount), + metricVcenterDatacenterMemoryLimit: newMetricVcenterDatacenterMemoryLimit(mbc.Metrics.VcenterDatacenterMemoryLimit), + metricVcenterDatacenterVMCount: newMetricVcenterDatacenterVMCount(mbc.Metrics.VcenterDatacenterVMCount), + metricVcenterDatastoreDiskUsage: newMetricVcenterDatastoreDiskUsage(mbc.Metrics.VcenterDatastoreDiskUsage), + metricVcenterDatastoreDiskUtilization: newMetricVcenterDatastoreDiskUtilization(mbc.Metrics.VcenterDatastoreDiskUtilization), + metricVcenterHostCPUCapacity: newMetricVcenterHostCPUCapacity(mbc.Metrics.VcenterHostCPUCapacity), + metricVcenterHostCPUReserved: newMetricVcenterHostCPUReserved(mbc.Metrics.VcenterHostCPUReserved), + metricVcenterHostCPUUsage: newMetricVcenterHostCPUUsage(mbc.Metrics.VcenterHostCPUUsage), + metricVcenterHostCPUUtilization: newMetricVcenterHostCPUUtilization(mbc.Metrics.VcenterHostCPUUtilization), + metricVcenterHostDiskLatencyAvg: newMetricVcenterHostDiskLatencyAvg(mbc.Metrics.VcenterHostDiskLatencyAvg), + metricVcenterHostDiskLatencyMax: newMetricVcenterHostDiskLatencyMax(mbc.Metrics.VcenterHostDiskLatencyMax), + metricVcenterHostDiskThroughput: newMetricVcenterHostDiskThroughput(mbc.Metrics.VcenterHostDiskThroughput), + metricVcenterHostMemoryCapacity: newMetricVcenterHostMemoryCapacity(mbc.Metrics.VcenterHostMemoryCapacity), + metricVcenterHostMemoryUsage: newMetricVcenterHostMemoryUsage(mbc.Metrics.VcenterHostMemoryUsage), + metricVcenterHostMemoryUtilization: newMetricVcenterHostMemoryUtilization(mbc.Metrics.VcenterHostMemoryUtilization), + metricVcenterHostNetworkPacketDropRate: newMetricVcenterHostNetworkPacketDropRate(mbc.Metrics.VcenterHostNetworkPacketDropRate), + metricVcenterHostNetworkPacketErrorRate: newMetricVcenterHostNetworkPacketErrorRate(mbc.Metrics.VcenterHostNetworkPacketErrorRate), + metricVcenterHostNetworkPacketRate: newMetricVcenterHostNetworkPacketRate(mbc.Metrics.VcenterHostNetworkPacketRate), + metricVcenterHostNetworkThroughput: newMetricVcenterHostNetworkThroughput(mbc.Metrics.VcenterHostNetworkThroughput), + metricVcenterHostNetworkUsage: newMetricVcenterHostNetworkUsage(mbc.Metrics.VcenterHostNetworkUsage), + metricVcenterHostVsanCacheHitRate: newMetricVcenterHostVsanCacheHitRate(mbc.Metrics.VcenterHostVsanCacheHitRate), + metricVcenterHostVsanCongestions: newMetricVcenterHostVsanCongestions(mbc.Metrics.VcenterHostVsanCongestions), + metricVcenterHostVsanLatencyAvg: newMetricVcenterHostVsanLatencyAvg(mbc.Metrics.VcenterHostVsanLatencyAvg), + metricVcenterHostVsanOperations: newMetricVcenterHostVsanOperations(mbc.Metrics.VcenterHostVsanOperations), + metricVcenterHostVsanThroughput: newMetricVcenterHostVsanThroughput(mbc.Metrics.VcenterHostVsanThroughput), + metricVcenterResourcePoolCPUShares: newMetricVcenterResourcePoolCPUShares(mbc.Metrics.VcenterResourcePoolCPUShares), + metricVcenterResourcePoolCPUUsage: newMetricVcenterResourcePoolCPUUsage(mbc.Metrics.VcenterResourcePoolCPUUsage), + metricVcenterResourcePoolMemoryBallooned: newMetricVcenterResourcePoolMemoryBallooned(mbc.Metrics.VcenterResourcePoolMemoryBallooned), + metricVcenterResourcePoolMemoryGranted: newMetricVcenterResourcePoolMemoryGranted(mbc.Metrics.VcenterResourcePoolMemoryGranted), + metricVcenterResourcePoolMemoryShares: newMetricVcenterResourcePoolMemoryShares(mbc.Metrics.VcenterResourcePoolMemoryShares), + metricVcenterResourcePoolMemorySwapped: newMetricVcenterResourcePoolMemorySwapped(mbc.Metrics.VcenterResourcePoolMemorySwapped), + metricVcenterResourcePoolMemoryUsage: newMetricVcenterResourcePoolMemoryUsage(mbc.Metrics.VcenterResourcePoolMemoryUsage), + metricVcenterVMCPUReadiness: newMetricVcenterVMCPUReadiness(mbc.Metrics.VcenterVMCPUReadiness), + metricVcenterVMCPUTime: newMetricVcenterVMCPUTime(mbc.Metrics.VcenterVMCPUTime), + metricVcenterVMCPUUsage: newMetricVcenterVMCPUUsage(mbc.Metrics.VcenterVMCPUUsage), + metricVcenterVMCPUUtilization: newMetricVcenterVMCPUUtilization(mbc.Metrics.VcenterVMCPUUtilization), + metricVcenterVMDiskLatencyAvg: newMetricVcenterVMDiskLatencyAvg(mbc.Metrics.VcenterVMDiskLatencyAvg), + metricVcenterVMDiskLatencyMax: newMetricVcenterVMDiskLatencyMax(mbc.Metrics.VcenterVMDiskLatencyMax), + metricVcenterVMDiskThroughput: newMetricVcenterVMDiskThroughput(mbc.Metrics.VcenterVMDiskThroughput), + metricVcenterVMDiskUsage: newMetricVcenterVMDiskUsage(mbc.Metrics.VcenterVMDiskUsage), + metricVcenterVMDiskUtilization: newMetricVcenterVMDiskUtilization(mbc.Metrics.VcenterVMDiskUtilization), + metricVcenterVMMemoryBallooned: newMetricVcenterVMMemoryBallooned(mbc.Metrics.VcenterVMMemoryBallooned), + metricVcenterVMMemoryGranted: newMetricVcenterVMMemoryGranted(mbc.Metrics.VcenterVMMemoryGranted), + metricVcenterVMMemorySwapped: newMetricVcenterVMMemorySwapped(mbc.Metrics.VcenterVMMemorySwapped), + metricVcenterVMMemorySwappedSsd: newMetricVcenterVMMemorySwappedSsd(mbc.Metrics.VcenterVMMemorySwappedSsd), + metricVcenterVMMemoryUsage: newMetricVcenterVMMemoryUsage(mbc.Metrics.VcenterVMMemoryUsage), + metricVcenterVMMemoryUtilization: newMetricVcenterVMMemoryUtilization(mbc.Metrics.VcenterVMMemoryUtilization), + metricVcenterVMNetworkBroadcastPacketRate: newMetricVcenterVMNetworkBroadcastPacketRate(mbc.Metrics.VcenterVMNetworkBroadcastPacketRate), + metricVcenterVMNetworkMulticastPacketRate: newMetricVcenterVMNetworkMulticastPacketRate(mbc.Metrics.VcenterVMNetworkMulticastPacketRate), + metricVcenterVMNetworkPacketDropRate: newMetricVcenterVMNetworkPacketDropRate(mbc.Metrics.VcenterVMNetworkPacketDropRate), + metricVcenterVMNetworkPacketRate: newMetricVcenterVMNetworkPacketRate(mbc.Metrics.VcenterVMNetworkPacketRate), + metricVcenterVMNetworkThroughput: newMetricVcenterVMNetworkThroughput(mbc.Metrics.VcenterVMNetworkThroughput), + metricVcenterVMNetworkUsage: newMetricVcenterVMNetworkUsage(mbc.Metrics.VcenterVMNetworkUsage), + metricVcenterVMVsanLatencyAvg: newMetricVcenterVMVsanLatencyAvg(mbc.Metrics.VcenterVMVsanLatencyAvg), + metricVcenterVMVsanOperations: newMetricVcenterVMVsanOperations(mbc.Metrics.VcenterVMVsanOperations), + metricVcenterVMVsanThroughput: newMetricVcenterVMVsanThroughput(mbc.Metrics.VcenterVMVsanThroughput), + resourceAttributeIncludeFilter: make(map[string]filter.Filter), + resourceAttributeExcludeFilter: make(map[string]filter.Filter), } if mbc.ResourceAttributes.VcenterClusterName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["vcenter.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.VcenterClusterName.MetricsInclude) @@ -4239,6 +4431,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricVcenterResourcePoolMemorySwapped.emit(ils.Metrics()) mb.metricVcenterResourcePoolMemoryUsage.emit(ils.Metrics()) mb.metricVcenterVMCPUReadiness.emit(ils.Metrics()) + mb.metricVcenterVMCPUTime.emit(ils.Metrics()) mb.metricVcenterVMCPUUsage.emit(ils.Metrics()) mb.metricVcenterVMCPUUtilization.emit(ils.Metrics()) mb.metricVcenterVMDiskLatencyAvg.emit(ils.Metrics()) @@ -4252,6 +4445,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricVcenterVMMemorySwappedSsd.emit(ils.Metrics()) mb.metricVcenterVMMemoryUsage.emit(ils.Metrics()) mb.metricVcenterVMMemoryUtilization.emit(ils.Metrics()) + mb.metricVcenterVMNetworkBroadcastPacketRate.emit(ils.Metrics()) + mb.metricVcenterVMNetworkMulticastPacketRate.emit(ils.Metrics()) mb.metricVcenterVMNetworkPacketDropRate.emit(ils.Metrics()) mb.metricVcenterVMNetworkPacketRate.emit(ils.Metrics()) mb.metricVcenterVMNetworkThroughput.emit(ils.Metrics()) @@ -4530,6 +4725,11 @@ func (mb *MetricsBuilder) RecordVcenterVMCPUReadinessDataPoint(ts pcommon.Timest mb.metricVcenterVMCPUReadiness.recordDataPoint(mb.startTime, ts, val) } +// RecordVcenterVMCPUTimeDataPoint adds a data point to vcenter.vm.cpu.time metric. +func (mb *MetricsBuilder) RecordVcenterVMCPUTimeDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState, objectNameAttributeValue string) { + mb.metricVcenterVMCPUTime.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String(), objectNameAttributeValue) +} + // RecordVcenterVMCPUUsageDataPoint adds a data point to vcenter.vm.cpu.usage metric. func (mb *MetricsBuilder) RecordVcenterVMCPUUsageDataPoint(ts pcommon.Timestamp, val int64) { mb.metricVcenterVMCPUUsage.recordDataPoint(mb.startTime, ts, val) @@ -4595,6 +4795,16 @@ func (mb *MetricsBuilder) RecordVcenterVMMemoryUtilizationDataPoint(ts pcommon.T mb.metricVcenterVMMemoryUtilization.recordDataPoint(mb.startTime, ts, val) } +// RecordVcenterVMNetworkBroadcastPacketRateDataPoint adds a data point to vcenter.vm.network.broadcast.packet.rate metric. +func (mb *MetricsBuilder) RecordVcenterVMNetworkBroadcastPacketRateDataPoint(ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue AttributeThroughputDirection, objectNameAttributeValue string) { + mb.metricVcenterVMNetworkBroadcastPacketRate.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) +} + +// RecordVcenterVMNetworkMulticastPacketRateDataPoint adds a data point to vcenter.vm.network.multicast.packet.rate metric. +func (mb *MetricsBuilder) RecordVcenterVMNetworkMulticastPacketRateDataPoint(ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue AttributeThroughputDirection, objectNameAttributeValue string) { + mb.metricVcenterVMNetworkMulticastPacketRate.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) +} + // RecordVcenterVMNetworkPacketDropRateDataPoint adds a data point to vcenter.vm.network.packet.drop.rate metric. func (mb *MetricsBuilder) RecordVcenterVMNetworkPacketDropRateDataPoint(ts pcommon.Timestamp, val float64, throughputDirectionAttributeValue AttributeThroughputDirection, objectNameAttributeValue string) { mb.metricVcenterVMNetworkPacketDropRate.recordDataPoint(mb.startTime, ts, val, throughputDirectionAttributeValue.String(), objectNameAttributeValue) diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go index f286196dda59..73588e8633c3 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics_test.go @@ -259,6 +259,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordVcenterVMCPUReadinessDataPoint(ts, 1) + allMetricsCount++ + mb.RecordVcenterVMCPUTimeDataPoint(ts, 1, AttributeCPUStateIdle, "object_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordVcenterVMCPUUsageDataPoint(ts, 1) @@ -310,6 +313,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordVcenterVMMemoryUtilizationDataPoint(ts, 1) + allMetricsCount++ + mb.RecordVcenterVMNetworkBroadcastPacketRateDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") + + allMetricsCount++ + mb.RecordVcenterVMNetworkMulticastPacketRateDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordVcenterVMNetworkPacketDropRateDataPoint(ts, 1, AttributeThroughputDirectionTransmitted, "object_name-val") @@ -1105,6 +1114,24 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "vcenter.vm.cpu.time": + assert.False(t, validatedMetrics["vcenter.vm.cpu.time"], "Found a duplicate in the metrics slice: vcenter.vm.cpu.time") + validatedMetrics["vcenter.vm.cpu.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU time spent in idle, ready or wait state.", ms.At(i).Description()) + assert.Equal(t, "%", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + attrVal, ok := dp.Attributes().Get("cpu_state") + assert.True(t, ok) + assert.EqualValues(t, "idle", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("object") + assert.True(t, ok) + assert.EqualValues(t, "object_name-val", attrVal.Str()) case "vcenter.vm.cpu.usage": assert.False(t, validatedMetrics["vcenter.vm.cpu.usage"], "Found a duplicate in the metrics slice: vcenter.vm.cpu.usage") validatedMetrics["vcenter.vm.cpu.usage"] = true @@ -1296,6 +1323,42 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "vcenter.vm.network.broadcast.packet.rate": + assert.False(t, validatedMetrics["vcenter.vm.network.broadcast.packet.rate"], "Found a duplicate in the metrics slice: vcenter.vm.network.broadcast.packet.rate") + validatedMetrics["vcenter.vm.network.broadcast.packet.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine.", ms.At(i).Description()) + assert.Equal(t, "{packets/s}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + attrVal, ok := dp.Attributes().Get("direction") + assert.True(t, ok) + assert.EqualValues(t, "transmitted", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("object") + assert.True(t, ok) + assert.EqualValues(t, "object_name-val", attrVal.Str()) + case "vcenter.vm.network.multicast.packet.rate": + assert.False(t, validatedMetrics["vcenter.vm.network.multicast.packet.rate"], "Found a duplicate in the metrics slice: vcenter.vm.network.multicast.packet.rate") + validatedMetrics["vcenter.vm.network.multicast.packet.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine.", ms.At(i).Description()) + assert.Equal(t, "{packets/s}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + attrVal, ok := dp.Attributes().Get("direction") + assert.True(t, ok) + assert.EqualValues(t, "transmitted", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("object") + assert.True(t, ok) + assert.EqualValues(t, "object_name-val", attrVal.Str()) case "vcenter.vm.network.packet.drop.rate": assert.False(t, validatedMetrics["vcenter.vm.network.packet.drop.rate"], "Found a duplicate in the metrics slice: vcenter.vm.network.packet.drop.rate") validatedMetrics["vcenter.vm.network.packet.drop.rate"] = true diff --git a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml index ce2aae9f092d..130db2582ef9 100644 --- a/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/vcenterreceiver/internal/metadata/testdata/config.yaml @@ -97,6 +97,8 @@ all_set: enabled: true vcenter.vm.cpu.readiness: enabled: true + vcenter.vm.cpu.time: + enabled: true vcenter.vm.cpu.usage: enabled: true vcenter.vm.cpu.utilization: @@ -123,6 +125,10 @@ all_set: enabled: true vcenter.vm.memory.utilization: enabled: true + vcenter.vm.network.broadcast.packet.rate: + enabled: true + vcenter.vm.network.multicast.packet.rate: + enabled: true vcenter.vm.network.packet.drop.rate: enabled: true vcenter.vm.network.packet.rate: @@ -260,6 +266,8 @@ none_set: enabled: false vcenter.vm.cpu.readiness: enabled: false + vcenter.vm.cpu.time: + enabled: false vcenter.vm.cpu.usage: enabled: false vcenter.vm.cpu.utilization: @@ -286,6 +294,10 @@ none_set: enabled: false vcenter.vm.memory.utilization: enabled: false + vcenter.vm.network.broadcast.packet.rate: + enabled: false + vcenter.vm.network.multicast.packet.rate: + enabled: false vcenter.vm.network.packet.drop.rate: enabled: false vcenter.vm.network.packet.rate: diff --git a/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml b/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml index 59f89bf27dab..efad590c7f80 100644 --- a/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml +++ b/receiver/vcenterreceiver/internal/mockserver/responses/vm-performance-counters.xml @@ -78,6 +78,34 @@ 20 + + + 533 + + + 70 + + + + 534 + + + 90 + + + + 535 + + + 90 + + + + 536 + + + 90 + 143 @@ -190,6 +218,20 @@ 20 + + + 533 + vmnic1 + + 70 + + + + 535 + vmnic1 + + 90 + 143 @@ -225,6 +267,20 @@ 20 + + + 533 + vmnic0 + + 70 + + + + 535 + vmnic0 + + 90 + 143 @@ -246,6 +302,20 @@ 20 + + + 533 + vmnic3 + + 70 + + + + 535 + vmnic3 + + 90 + 146 @@ -260,6 +330,20 @@ 20 + + + 533 + vmnic2 + + 70 + + + + 535 + vmnic2 + + 90 + 532 @@ -323,6 +407,20 @@ 20 + + + 533 + 4000 + + 70 + + + + 535 + 4000 + + 90 + 147 @@ -351,6 +449,27 @@ 899 + + + 13 + 4000 + + 2 + + + + 11 + 4000 + + 3 + + + + 12 + 4000 + + 5 + vm-6004 @@ -428,6 +547,34 @@ 20 + + + 533 + + + 70 + + + + 534 + + + 70 + + + + 535 + + + 20 + + + + 536 + + + 70 + 143 @@ -540,6 +687,20 @@ 20 + + + 533 + vmnic1 + + 70 + + + + 535 + vmnic1 + + 90 + 143 @@ -575,6 +736,20 @@ 20 + + + 533 + vmnic0 + + 70 + + + + 535 + vmnic0 + + 90 + 143 @@ -596,6 +771,20 @@ 20 + + + 533 + vmnic3 + + 70 + + + + 535 + vmnic3 + + 90 + 146 @@ -610,6 +799,21 @@ 20 + + + 533 + vmnic2 + + 70 + + + + 535 + vmnic2 + + 90 + + 532 @@ -687,6 +891,20 @@ 20 + + + 533 + 4000 + + 70 + + + + 535 + 4000 + + 90 + 532 @@ -701,6 +919,27 @@ 899 + + + 13 + 4000 + + 2 + + + + 11 + 4000 + + 3 + + + + 12 + 4000 + + 5 + vm-6005 @@ -778,6 +1017,20 @@ 20 + + + 533 + + + 70 + + + + 535 + + + 90 + 143 @@ -890,6 +1143,20 @@ 20 + + + 533 + vmnic1 + + 70 + + + + 535 + vmnic1 + + 90 + 143 @@ -925,6 +1192,20 @@ 20 + + + 533 + vmnic0 + + 70 + + + + 535 + vmnic0 + + 90 + 143 @@ -946,6 +1227,20 @@ 20 + + + 533 + vmnic3 + + 70 + + + + 535 + vmnic3 + + 90 + 146 @@ -960,6 +1255,20 @@ 20 + + + 533 + vmnic2 + + 70 + + + + 535 + vmnic2 + + 90 + 532 @@ -1030,6 +1339,34 @@ 20 + + + 533 + 4000 + + 70 + + + + 534 + 4000 + + 70 + + + + 535 + 4000 + + 90 + + + + 536 + 4000 + + 70 + 530 @@ -1051,6 +1388,27 @@ 899 + + + 13 + 4000 + + 2 + + + + 11 + 4000 + + 3 + + + + 12 + 4000 + + 5 + diff --git a/receiver/vcenterreceiver/metadata.yaml b/receiver/vcenterreceiver/metadata.yaml index 761b36c3bcf0..b9840c16e307 100644 --- a/receiver/vcenterreceiver/metadata.yaml +++ b/receiver/vcenterreceiver/metadata.yaml @@ -106,6 +106,13 @@ attributes: - "off" - "standby" - "unknown" + cpu_state: + description: CPU time spent in idle, ready or idle state. + type: string + enum: + - "idle" + - "ready" + - "wait" disk_direction: name_override: direction description: The direction of disk latency. @@ -568,6 +575,14 @@ metrics: value_type: int aggregation_temporality: cumulative attributes: [] + vcenter.vm.cpu.time: + enabled: false + description: CPU time spent in idle, ready or wait state. + unit: "%" + gauge: + value_type: double + attributes: [cpu_state, object_name] + extended_documentation: As measured over the most recent 20s interval. vcenter.vm.memory.ballooned: enabled: true description: The amount of memory that is ballooned due to virtualization. @@ -662,6 +677,22 @@ metrics: aggregation_temporality: cumulative attributes: [throughput_direction, object_name] extended_documentation: As measured over the most recent 20s interval. + vcenter.vm.network.broadcast.packet.rate: + enabled: false + description: The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + unit: "{packets/s}" + gauge: + value_type: double + attributes: [throughput_direction, object_name] + extended_documentation: As measured over the most recent 20s interval. + vcenter.vm.network.multicast.packet.rate: + enabled: false + description: The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + unit: "{packets/s}" + gauge: + value_type: double + attributes: [throughput_direction, object_name] + extended_documentation: As measured over the most recent 20s interval. vcenter.vm.network.packet.rate: enabled: true description: The rate of packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. diff --git a/receiver/vcenterreceiver/metrics.go b/receiver/vcenterreceiver/metrics.go index 199da2fa8317..f0091d28e774 100644 --- a/receiver/vcenterreceiver/metrics.go +++ b/receiver/vcenterreceiver/metrics.go @@ -406,6 +406,11 @@ var vmPerfMetricList = []string{ "net.bytesRx.average", "net.bytesTx.average", "net.usage.average", + "net.broadcastRx.summation", + "net.broadcastTx.summation", + "net.multicastRx.summation", + "net.multicastTx.summation", + // disk metrics "disk.totalWriteLatency.average", "disk.totalReadLatency.average", @@ -414,6 +419,11 @@ var vmPerfMetricList = []string{ "virtualDisk.totalReadLatency.average", "virtualDisk.read.average", "virtualDisk.write.average", + + // cpu metrics + "cpu.idle.summation", + "cpu.wait.summation", + "cpu.ready.summation", } // recordVMPerformanceMetrics records performance metrics for a vSphere Virtual Machine @@ -460,6 +470,27 @@ func (v *vcenterMetricScraper) recordVMPerformanceMetrics(entityMetric *performa case "net.droppedRx.summation": rxRate := float64(nestedValue) / 20 v.mb.RecordVcenterVMNetworkPacketDropRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), rxRate, metadata.AttributeThroughputDirectionReceived, val.Instance) + case "net.multicastRx.summation": + rxRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkMulticastPacketRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), rxRate, metadata.AttributeThroughputDirectionReceived, val.Instance) + case "net.multicastTx.summation": + txRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkMulticastPacketRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), txRate, metadata.AttributeThroughputDirectionTransmitted, val.Instance) + case "cpu.idle.summation": + idleTime := float64(nestedValue) / float64(si.Interval) * 10 + v.mb.RecordVcenterVMCPUTimeDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), idleTime, metadata.AttributeCPUStateIdle, val.Instance) + case "cpu.wait.summation": + waitTime := float64(nestedValue) / float64(si.Interval) * 10 + v.mb.RecordVcenterVMCPUTimeDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), waitTime, metadata.AttributeCPUStateWait, val.Instance) + case "cpu.ready.summation": + readyTime := float64(nestedValue) / float64(si.Interval) * 10 + v.mb.RecordVcenterVMCPUTimeDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), readyTime, metadata.AttributeCPUStateReady, val.Instance) + case "net.broadcastRx.summation": + rxRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkBroadcastPacketRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), rxRate, metadata.AttributeThroughputDirectionReceived, val.Instance) + case "net.broadcastTx.summation": + txRate := float64(nestedValue) / 20 + v.mb.RecordVcenterVMNetworkBroadcastPacketRateDataPoint(pcommon.NewTimestampFromTime(si.Timestamp), txRate, metadata.AttributeThroughputDirectionTransmitted, val.Instance) } } } diff --git a/receiver/vcenterreceiver/scraper_test.go b/receiver/vcenterreceiver/scraper_test.go index c30d3e29641d..46421a171d25 100644 --- a/receiver/vcenterreceiver/scraper_test.go +++ b/receiver/vcenterreceiver/scraper_test.go @@ -42,6 +42,9 @@ func TestScrapeConfigsEnabled(t *testing.T) { optConfigs := metadata.DefaultMetricsBuilderConfig() optConfigs.Metrics.VcenterHostMemoryCapacity.Enabled = true optConfigs.Metrics.VcenterVMMemoryGranted.Enabled = true + optConfigs.Metrics.VcenterVMNetworkBroadcastPacketRate.Enabled = true + optConfigs.Metrics.VcenterVMNetworkMulticastPacketRate.Enabled = true + optConfigs.Metrics.VcenterVMCPUTime.Enabled = true setResourcePoolMemoryUsageAttrFeatureGate(t, true) cfg := &Config{ diff --git a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml index a850c3d2669b..7abc1db106be 100644 --- a/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml +++ b/receiver/vcenterreceiver/testdata/metrics/expected-all-enabled.yaml @@ -2167,6 +2167,41 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.cpu.readiness unit: '%' + - description: CPU time spent in idle, ready or wait state. + gauge: + dataPoints: + - asDouble: "1.0" + attributes: + - key: cpu_state + value: + stringValue: "idle" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2.5" + attributes: + - key: cpu_state + value: + stringValue: "ready" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1.5" + attributes: + - key: cpu_state + value: + stringValue: "wait" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.cpu.time + unit: "%" - description: The amount of CPU used by the VM. name: vcenter.vm.cpu.usage sum: @@ -2353,6 +2388,156 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.memory.utilization unit: '%' + - description: The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.broadcast.packet.rate + unit: '{packets/s}' + - description: The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.multicast.packet.rate + unit: '{packets/s}' - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. gauge: dataPoints: @@ -2813,6 +2998,41 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.cpu.readiness unit: '%' + - description: CPU time spent in idle, ready or wait state. + gauge: + dataPoints: + - asDouble: "1.0" + attributes: + - key: cpu_state + value: + stringValue: "idle" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2.5" + attributes: + - key: cpu_state + value: + stringValue: "ready" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1.5" + attributes: + - key: cpu_state + value: + stringValue: "wait" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.cpu.time + unit: "%" - description: The amount of CPU used by the VM. name: vcenter.vm.cpu.usage sum: @@ -2999,6 +3219,156 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.memory.utilization unit: '%' + - description: The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.broadcast.packet.rate + unit: '{packets/s}' + - description: The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.multicast.packet.rate + unit: '{packets/s}' - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. gauge: dataPoints: @@ -3558,6 +3928,41 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.cpu.readiness unit: '%' + - description: CPU time spent in idle, ready or wait state. + gauge: + dataPoints: + - asDouble: "1.0" + attributes: + - key: cpu_state + value: + stringValue: "idle" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "2.5" + attributes: + - key: cpu_state + value: + stringValue: "ready" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: "1.50" + attributes: + - key: cpu_state + value: + stringValue: "wait" + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.cpu.time + unit: "%" - description: The amount of CPU used by the VM. name: vcenter.vm.cpu.usage sum: @@ -3744,6 +4149,156 @@ resourceMetrics: timeUnixNano: "2000000" name: vcenter.vm.memory.utilization unit: '%' + - description: The rate of broadcast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.broadcast.packet.rate + unit: '{packets/s}' + - description: The rate of multicast packets transmitted or received by each vNIC (virtual network interface controller) on the virtual machine. + gauge: + dataPoints: + - asDouble: 3.5 + attributes: + - key: direction + value: + stringValue: transmitted + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 1.0 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: "4000" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic0 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic1 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic2 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + - asDouble: 4.5 + attributes: + - key: direction + value: + stringValue: received + - key: object + value: + stringValue: vmnic3 + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + name: vcenter.vm.network.multicast.packet.rate + unit: '{packets/s}' - description: The rate of transmitted or received packets dropped by each vNIC (virtual network interface controller) on the virtual machine. gauge: dataPoints: From 95d50af4ba964708bddbf1685925241e636b68d5 Mon Sep 17 00:00:00 2001 From: Phil Gore Date: Wed, 5 Feb 2025 12:13:22 -0700 Subject: [PATCH 12/14] [cmd/telemetrygen] Export telemetrygen functions for testing (#37044) #### Description This PR addresses #36984 in order to open up telemetrygen to be used by golang tests and generate varying amounts of telemetry in code to support use cases which don't rely on external tools. The following major changes were made to clear the footprint for how this API can be used: 1. Moved `metrics`, `traces`, and `logs` under `pkg` instead of internal. 2. Unexported the `run` function for each test suite so the entrypoint is only the `Start` function 3. Moved config defaults from pflags into a `SetDefaults` function which is exported alongside `NewConfig` for changed packages in order for users to get the same sane defaults as if run from the command line. 4. Adjusted the pflags to use the existing default values rather than duplicate their own removing default config duplication 5. Added E2E tests for Metrics and Logs and adjusted Traces to use the new API so it stays consistent. 6. Adjusted log and metric exporter instantiation slightly due to a race condition with an `err` variable which was only caught by the new E2E tests. #### Testing Added to the E2E tests to ensure consistent API specs. --- .chloggen/telemetrygen-public-api.yaml | 27 ++++++ cmd/telemetrygen/config.go | 14 +-- cmd/telemetrygen/internal/common/config.go | 49 +++++++---- cmd/telemetrygen/internal/e2etest/go.mod | 7 ++ cmd/telemetrygen/internal/e2etest/go.sum | 14 +++ .../internal/e2etest/logs_test.go | 51 +++++++++++ .../internal/e2etest/metrics_test.go | 51 +++++++++++ .../e2etest/{e2e_test.go => traces_test.go} | 31 +++---- cmd/telemetrygen/internal/logs/config.go | 58 ------------ cmd/telemetrygen/internal/traces/config.go | 51 ----------- cmd/telemetrygen/pkg/logs/config.go | 78 ++++++++++++++++ .../{internal => pkg}/logs/exporter.go | 0 .../{internal => pkg}/logs/logs.go | 86 +++++++++++------- .../{internal => pkg}/logs/package_test.go | 0 .../{internal => pkg}/logs/worker.go | 15 +--- .../{internal => pkg}/logs/worker_test.go | 18 ++-- .../{internal => pkg}/metrics/config.go | 36 ++++++-- .../{internal => pkg}/metrics/exporter.go | 0 .../{internal => pkg}/metrics/metrics.go | 88 ++++++++++++------- .../{internal => pkg}/metrics/metrics_test.go | 0 .../metrics/metrics_types.go | 18 ++-- .../{internal => pkg}/metrics/package_test.go | 0 .../{internal => pkg}/metrics/worker.go | 23 ++--- .../{internal => pkg}/metrics/worker_test.go | 50 +++++------ cmd/telemetrygen/pkg/traces/config.go | 73 +++++++++++++++ .../{internal => pkg}/traces/exporter.go | 0 .../{internal => pkg}/traces/exporter_test.go | 0 .../{internal => pkg}/traces/package_test.go | 0 .../{internal => pkg}/traces/traces.go | 8 +- .../{internal => pkg}/traces/worker.go | 0 .../{internal => pkg}/traces/worker_test.go | 24 ++--- 31 files changed, 554 insertions(+), 316 deletions(-) create mode 100644 .chloggen/telemetrygen-public-api.yaml create mode 100644 cmd/telemetrygen/internal/e2etest/logs_test.go create mode 100644 cmd/telemetrygen/internal/e2etest/metrics_test.go rename cmd/telemetrygen/internal/e2etest/{e2e_test.go => traces_test.go} (68%) delete mode 100644 cmd/telemetrygen/internal/logs/config.go delete mode 100644 cmd/telemetrygen/internal/traces/config.go create mode 100644 cmd/telemetrygen/pkg/logs/config.go rename cmd/telemetrygen/{internal => pkg}/logs/exporter.go (100%) rename cmd/telemetrygen/{internal => pkg}/logs/logs.go (70%) rename cmd/telemetrygen/{internal => pkg}/logs/package_test.go (100%) rename cmd/telemetrygen/{internal => pkg}/logs/worker.go (84%) rename cmd/telemetrygen/{internal => pkg}/logs/worker_test.go (93%) rename cmd/telemetrygen/{internal => pkg}/metrics/config.go (55%) rename cmd/telemetrygen/{internal => pkg}/metrics/exporter.go (100%) rename cmd/telemetrygen/{internal => pkg}/metrics/metrics.go (62%) rename cmd/telemetrygen/{internal => pkg}/metrics/metrics_test.go (100%) rename cmd/telemetrygen/{internal => pkg}/metrics/metrics_types.go (59%) rename cmd/telemetrygen/{internal => pkg}/metrics/package_test.go (100%) rename cmd/telemetrygen/{internal => pkg}/metrics/worker.go (88%) rename cmd/telemetrygen/{internal => pkg}/metrics/worker_test.go (88%) create mode 100644 cmd/telemetrygen/pkg/traces/config.go rename cmd/telemetrygen/{internal => pkg}/traces/exporter.go (100%) rename cmd/telemetrygen/{internal => pkg}/traces/exporter_test.go (100%) rename cmd/telemetrygen/{internal => pkg}/traces/package_test.go (100%) rename cmd/telemetrygen/{internal => pkg}/traces/traces.go (97%) rename cmd/telemetrygen/{internal => pkg}/traces/worker.go (100%) rename cmd/telemetrygen/{internal => pkg}/traces/worker_test.go (94%) diff --git a/.chloggen/telemetrygen-public-api.yaml b/.chloggen/telemetrygen-public-api.yaml new file mode 100644 index 000000000000..c68c46db5ea9 --- /dev/null +++ b/.chloggen/telemetrygen-public-api.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: telemetrygen + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Exported the API for telemetrygen for test uses. Additionally added new E2E tests and fixed race condition + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36984] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/cmd/telemetrygen/config.go b/cmd/telemetrygen/config.go index 80aaaaa1e5d4..d29fbe2ec992 100644 --- a/cmd/telemetrygen/config.go +++ b/cmd/telemetrygen/config.go @@ -12,10 +12,10 @@ import ( "github.com/spf13/cobra" - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/logs" "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/metrics" - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/traces" + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/logs" + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/traces" ) var ( @@ -64,13 +64,13 @@ var logsCmd = &cobra.Command{ func init() { rootCmd.AddCommand(tracesCmd, metricsCmd, logsCmd) - tracesCfg = new(traces.Config) + tracesCfg = traces.NewConfig() tracesCfg.Flags(tracesCmd.Flags()) - metricsCfg = new(metrics.Config) + metricsCfg = metrics.NewConfig() metricsCfg.Flags(metricsCmd.Flags()) - logsCfg = new(logs.Config) + logsCfg = logs.NewConfig() logsCfg.Flags(logsCmd.Flags()) // Disabling completion command for end user @@ -81,7 +81,7 @@ func init() { // Execute tries to run the input command func Execute() { if err := rootCmd.Execute(); err != nil { - // TODO: Uncomment the line below when using Run instead of RunE in the xxxCmd functions + // TODO: Uncomment the line below when using run instead of RunE in the xxxCmd functions // fmt.Fprintln(os.Stderr, err) os.Exit(1) } diff --git a/cmd/telemetrygen/internal/common/config.go b/cmd/telemetrygen/internal/common/config.go index c6592b6fa582..44ca9aa936d8 100644 --- a/cmd/telemetrygen/internal/common/config.go +++ b/cmd/telemetrygen/internal/common/config.go @@ -148,40 +148,59 @@ func (c *Config) GetHeaders() map[string]string { // CommonFlags registers common config flags. func (c *Config) CommonFlags(fs *pflag.FlagSet) { - fs.IntVar(&c.WorkerCount, "workers", 1, "Number of workers (goroutines) to run") - fs.Float64Var(&c.Rate, "rate", 0, "Approximately how many metrics/spans/logs per second each worker should generate. Zero means no throttling.") - fs.DurationVar(&c.TotalDuration, "duration", 0, "For how long to run the test") - fs.DurationVar(&c.ReportingInterval, "interval", 1*time.Second, "Reporting interval") + fs.IntVar(&c.WorkerCount, "workers", c.WorkerCount, "Number of workers (goroutines) to run") + fs.Float64Var(&c.Rate, "rate", c.Rate, "Approximately how many metrics/spans/logs per second each worker should generate. Zero means no throttling.") + fs.DurationVar(&c.TotalDuration, "duration", c.TotalDuration, "For how long to run the test") + fs.DurationVar(&c.ReportingInterval, "interval", c.ReportingInterval, "Reporting interval") - fs.StringVar(&c.CustomEndpoint, "otlp-endpoint", "", "Destination endpoint for exporting logs, metrics and traces") - fs.BoolVar(&c.Insecure, "otlp-insecure", false, "Whether to enable client transport security for the exporter's grpc or http connection") - fs.BoolVar(&c.InsecureSkipVerify, "otlp-insecure-skip-verify", false, "Whether a client verifies the server's certificate chain and host name") - fs.BoolVar(&c.UseHTTP, "otlp-http", false, "Whether to use HTTP exporter rather than a gRPC one") + fs.StringVar(&c.CustomEndpoint, "otlp-endpoint", c.CustomEndpoint, "Destination endpoint for exporting logs, metrics and traces") + fs.BoolVar(&c.Insecure, "otlp-insecure", c.Insecure, "Whether to enable client transport security for the exporter's grpc or http connection") + fs.BoolVar(&c.InsecureSkipVerify, "otlp-insecure-skip-verify", c.InsecureSkipVerify, "Whether a client verifies the server's certificate chain and host name") + fs.BoolVar(&c.UseHTTP, "otlp-http", c.UseHTTP, "Whether to use HTTP exporter rather than a gRPC one") // custom headers - c.Headers = make(KeyValue) fs.Var(&c.Headers, "otlp-header", "Custom header to be passed along with each OTLP request. The value is expected in the format key=\"value\". "+ "Note you may need to escape the quotes when using the tool from a cli. "+ `Flag may be repeated to set multiple headers (e.g --otlp-header key1=\"value1\" --otlp-header key2=\"value2\")`) // custom resource attributes - c.ResourceAttributes = make(KeyValue) fs.Var(&c.ResourceAttributes, "otlp-attributes", "Custom resource attributes to use. The value is expected in the format key=\"value\". "+ "You can use key=true or key=false. to set boolean attribute."+ "Note you may need to escape the quotes when using the tool from a cli. "+ `Flag may be repeated to set multiple attributes (e.g --otlp-attributes key1=\"value1\" --otlp-attributes key2=\"value2\" --telemetry-attributes key3=true)`) - c.TelemetryAttributes = make(KeyValue) fs.Var(&c.TelemetryAttributes, "telemetry-attributes", "Custom telemetry attributes to use. The value is expected in the format key=\"value\". "+ "You can use key=true or key=false. to set boolean attribute."+ "Note you may need to escape the quotes when using the tool from a cli. "+ `Flag may be repeated to set multiple attributes (e.g --telemetry-attributes key1=\"value1\" --telemetry-attributes key2=\"value2\" --telemetry-attributes key3=true)`) // TLS CA configuration - fs.StringVar(&c.CaFile, "ca-cert", "", "Trusted Certificate Authority to verify server certificate") + fs.StringVar(&c.CaFile, "ca-cert", c.CaFile, "Trusted Certificate Authority to verify server certificate") // mTLS configuration - fs.BoolVar(&c.ClientAuth.Enabled, "mtls", false, "Whether to require client authentication for mTLS") - fs.StringVar(&c.ClientAuth.ClientCertFile, "client-cert", "", "Client certificate file") - fs.StringVar(&c.ClientAuth.ClientKeyFile, "client-key", "", "Client private key file") + fs.BoolVar(&c.ClientAuth.Enabled, "mtls", c.ClientAuth.Enabled, "Whether to require client authentication for mTLS") + fs.StringVar(&c.ClientAuth.ClientCertFile, "client-cert", c.ClientAuth.ClientCertFile, "Client certificate file") + fs.StringVar(&c.ClientAuth.ClientKeyFile, "client-key", c.ClientAuth.ClientKeyFile, "Client private key file") +} + +// SetDefaults is here to mirror the defaults for flags above, +// This allows for us to have a single place to change the defaults +// while exposing the API for use. +func (c *Config) SetDefaults() { + c.WorkerCount = 1 + c.Rate = 0 + c.TotalDuration = 0 + c.ReportingInterval = 1 * time.Second + c.CustomEndpoint = "" + c.Insecure = false + c.InsecureSkipVerify = false + c.UseHTTP = false + c.HTTPPath = "" + c.Headers = make(KeyValue) + c.ResourceAttributes = make(KeyValue) + c.TelemetryAttributes = make(KeyValue) + c.CaFile = "" + c.ClientAuth.Enabled = false + c.ClientAuth.ClientCertFile = "" + c.ClientAuth.ClientKeyFile = "" } diff --git a/cmd/telemetrygen/internal/e2etest/go.mod b/cmd/telemetrygen/internal/e2etest/go.mod index 0583717d93c4..c15474a39490 100644 --- a/cmd/telemetrygen/internal/e2etest/go.mod +++ b/cmd/telemetrygen/internal/e2etest/go.mod @@ -65,14 +65,21 @@ require ( go.opentelemetry.io/collector/pipeline v0.119.0 // indirect go.opentelemetry.io/collector/receiver v0.119.0 // indirect go.opentelemetry.io/collector/receiver/xreceiver v0.119.0 // indirect + go.opentelemetry.io/collector/semconv v0.119.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect + go.opentelemetry.io/otel/log v0.10.0 // indirect go.opentelemetry.io/otel/metric v1.34.0 // indirect go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect diff --git a/cmd/telemetrygen/internal/e2etest/go.sum b/cmd/telemetrygen/internal/e2etest/go.sum index 440a48cb41d1..531f614caf68 100644 --- a/cmd/telemetrygen/internal/e2etest/go.sum +++ b/cmd/telemetrygen/internal/e2etest/go.sum @@ -136,22 +136,36 @@ go.opentelemetry.io/collector/receiver/receivertest v0.119.0 h1:thZkyftPCNit/m2b go.opentelemetry.io/collector/receiver/receivertest v0.119.0/go.mod h1:DZM70vofnquGkQiTfT5ZSFZlohxANl9XOrVq9h5IKnc= go.opentelemetry.io/collector/receiver/xreceiver v0.119.0 h1:ZcTO+h+r9TyR1XgMhA7FTSTV9RF+z/IDPrcRIg1l56U= go.opentelemetry.io/collector/receiver/xreceiver v0.119.0/go.mod h1:AkoWhnYFMygK7Tlzez398ti20NqydX8wxPVWU86+baE= +go.opentelemetry.io/collector/semconv v0.119.0 h1:xo+V3a7hnK0I6fxAWCXT8BIT1PCBYd4emolhoKSDUlI= +go.opentelemetry.io/collector/semconv v0.119.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 h1:5dTKu4I5Dn4P2hxyW3l3jTaZx9ACgg0ECos1eAVrheY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 h1:q/heq5Zh8xV1+7GoMGJpTxM2Lhq5+bFxB29tshuRuw0= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0/go.mod h1:leO2CSTg0Y+LyvmR7Wm4pUxE8KAmaM2GCVx7O+RATLA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0 h1:opwv08VbCZ8iecIWs+McMdHRcAXzjAeda3uG2kI/hcA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0/go.mod h1:oOP3ABpW7vFHulLpE8aYtNBodrHhMTrvfxUXGvqm7Ac= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/log v0.10.0 h1:1CXmspaRITvFcjA4kyVszuG4HjA61fPDxMb7q3BuyF0= +go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM= go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw= +go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo= go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= diff --git a/cmd/telemetrygen/internal/e2etest/logs_test.go b/cmd/telemetrygen/internal/e2etest/logs_test.go new file mode 100644 index 000000000000..ffbefb45177e --- /dev/null +++ b/cmd/telemetrygen/internal/e2etest/logs_test.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package e2etest + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/logs" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" +) + +func TestGenerateLogs(t *testing.T) { + f := otlpreceiver.NewFactory() + sink := &consumertest.LogsSink{} + rCfg := f.CreateDefaultConfig() + endpoint := testutil.GetAvailableLocalAddress(t) + rCfg.(*otlpreceiver.Config).GRPC.NetAddr.Endpoint = endpoint + r, err := f.CreateLogs(context.Background(), receivertest.NewNopSettings(), rCfg, sink) + require.NoError(t, err) + err = r.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + defer func() { + require.NoError(t, r.Shutdown(context.Background())) + }() + cfg := logs.NewConfig() + cfg.WorkerCount = 10 + cfg.Rate = 10 + cfg.TotalDuration = 10 * time.Second + cfg.ReportingInterval = 10 + cfg.CustomEndpoint = endpoint + cfg.Insecure = true + cfg.SkipSettingGRPCLogger = true + cfg.NumLogs = 6000 + go func() { + err = logs.Start(cfg) + assert.NoError(t, err) + }() + require.Eventually(t, func() bool { + return len(sink.AllLogs()) > 0 + }, 10*time.Second, 100*time.Millisecond) +} diff --git a/cmd/telemetrygen/internal/e2etest/metrics_test.go b/cmd/telemetrygen/internal/e2etest/metrics_test.go new file mode 100644 index 000000000000..73e1c6102543 --- /dev/null +++ b/cmd/telemetrygen/internal/e2etest/metrics_test.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package e2etest + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/receiver/receivertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" +) + +func TestGenerateMetrics(t *testing.T) { + f := otlpreceiver.NewFactory() + sink := &consumertest.MetricsSink{} + rCfg := f.CreateDefaultConfig() + endpoint := testutil.GetAvailableLocalAddress(t) + rCfg.(*otlpreceiver.Config).GRPC.NetAddr.Endpoint = endpoint + r, err := f.CreateMetrics(context.Background(), receivertest.NewNopSettings(), rCfg, sink) + require.NoError(t, err) + err = r.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + defer func() { + require.NoError(t, r.Shutdown(context.Background())) + }() + cfg := metrics.NewConfig() + cfg.WorkerCount = 10 + cfg.Rate = 10 + cfg.TotalDuration = 10 * time.Second + cfg.ReportingInterval = 10 + cfg.CustomEndpoint = endpoint + cfg.Insecure = true + cfg.SkipSettingGRPCLogger = true + cfg.NumMetrics = 6000 + go func() { + err = metrics.Start(cfg) + assert.NoError(t, err) + }() + require.Eventually(t, func() bool { + return len(sink.AllMetrics()) > 0 + }, 10*time.Second, 100*time.Millisecond) +} diff --git a/cmd/telemetrygen/internal/e2etest/e2e_test.go b/cmd/telemetrygen/internal/e2etest/traces_test.go similarity index 68% rename from cmd/telemetrygen/internal/e2etest/e2e_test.go rename to cmd/telemetrygen/internal/e2etest/traces_test.go index 0d76261a9350..4b7e18deefba 100644 --- a/cmd/telemetrygen/internal/e2etest/e2e_test.go +++ b/cmd/telemetrygen/internal/e2etest/traces_test.go @@ -15,8 +15,7 @@ import ( "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.opentelemetry.io/collector/receiver/receivertest" - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common" - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/traces" + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg/traces" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" ) @@ -33,25 +32,15 @@ func TestGenerateTraces(t *testing.T) { defer func() { require.NoError(t, r.Shutdown(context.Background())) }() - cfg := &traces.Config{ - Config: common.Config{ - WorkerCount: 10, - Rate: 10, - TotalDuration: 10 * time.Second, - ReportingInterval: 10, - CustomEndpoint: endpoint, - Insecure: true, - UseHTTP: false, - Headers: nil, - ResourceAttributes: nil, - SkipSettingGRPCLogger: true, - }, - NumTraces: 6000, - ServiceName: "foo", - StatusCode: "0", - LoadSize: 0, - Batch: true, - } + cfg := traces.NewConfig() + cfg.WorkerCount = 10 + cfg.Rate = 10 + cfg.TotalDuration = 10 * time.Second + cfg.ReportingInterval = 10 + cfg.CustomEndpoint = endpoint + cfg.Insecure = true + cfg.SkipSettingGRPCLogger = true + cfg.NumTraces = 6000 go func() { err = traces.Start(cfg) assert.NoError(t, err) diff --git a/cmd/telemetrygen/internal/logs/config.go b/cmd/telemetrygen/internal/logs/config.go deleted file mode 100644 index dd17973bb66d..000000000000 --- a/cmd/telemetrygen/internal/logs/config.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package logs - -import ( - "fmt" - - "github.com/spf13/pflag" - - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common" -) - -// Config describes the test scenario. -type Config struct { - common.Config - NumLogs int - Body string - SeverityText string - SeverityNumber int32 - TraceID string - SpanID string -} - -// Flags registers config flags. -func (c *Config) Flags(fs *pflag.FlagSet) { - c.CommonFlags(fs) - - fs.StringVar(&c.HTTPPath, "otlp-http-url-path", "/v1/logs", "Which URL path to write to") - - fs.IntVar(&c.NumLogs, "logs", 1, "Number of logs to generate in each worker (ignored if duration is provided)") - fs.StringVar(&c.Body, "body", "the message", "Body of the log") - fs.StringVar(&c.SeverityText, "severity-text", "Info", "Severity text of the log") - fs.Int32Var(&c.SeverityNumber, "severity-number", 9, "Severity number of the log, range from 1 to 24 (inclusive)") - fs.StringVar(&c.TraceID, "trace-id", "", "TraceID of the log") - fs.StringVar(&c.SpanID, "span-id", "", "SpanID of the log") -} - -// Validate validates the test scenario parameters. -func (c *Config) Validate() error { - if c.TotalDuration <= 0 && c.NumLogs <= 0 { - return fmt.Errorf("either `logs` or `duration` must be greater than 0") - } - - if c.TraceID != "" { - if err := common.ValidateTraceID(c.TraceID); err != nil { - return err - } - } - - if c.SpanID != "" { - if err := common.ValidateSpanID(c.SpanID); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/telemetrygen/internal/traces/config.go b/cmd/telemetrygen/internal/traces/config.go deleted file mode 100644 index 5bb63dffbd9a..000000000000 --- a/cmd/telemetrygen/internal/traces/config.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package traces - -import ( - "fmt" - "time" - - "github.com/spf13/pflag" - - "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common" -) - -// Config describes the test scenario. -type Config struct { - common.Config - NumTraces int - NumChildSpans int - PropagateContext bool - ServiceName string - StatusCode string - Batch bool - LoadSize int - - SpanDuration time.Duration -} - -// Flags registers config flags. -func (c *Config) Flags(fs *pflag.FlagSet) { - c.CommonFlags(fs) - - fs.StringVar(&c.HTTPPath, "otlp-http-url-path", "/v1/traces", "Which URL path to write to") - - fs.IntVar(&c.NumTraces, "traces", 1, "Number of traces to generate in each worker (ignored if duration is provided)") - fs.IntVar(&c.NumChildSpans, "child-spans", 1, "Number of child spans to generate for each trace") - fs.BoolVar(&c.PropagateContext, "marshal", false, "Whether to marshal trace context via HTTP headers") - fs.StringVar(&c.ServiceName, "service", "telemetrygen", "Service name to use") - fs.StringVar(&c.StatusCode, "status-code", "0", "Status code to use for the spans, one of (Unset, Error, Ok) or the equivalent integer (0,1,2)") - fs.BoolVar(&c.Batch, "batch", true, "Whether to batch traces") - fs.IntVar(&c.LoadSize, "size", 0, "Desired minimum size in MB of string data for each trace generated. This can be used to test traces with large payloads, i.e. when testing the OTLP receiver endpoint max receive size.") - fs.DurationVar(&c.SpanDuration, "span-duration", 123*time.Microsecond, "The duration of each generated span.") -} - -// Validate validates the test scenario parameters. -func (c *Config) Validate() error { - if c.TotalDuration <= 0 && c.NumTraces <= 0 { - return fmt.Errorf("either `traces` or `duration` must be greater than 0") - } - return nil -} diff --git a/cmd/telemetrygen/pkg/logs/config.go b/cmd/telemetrygen/pkg/logs/config.go new file mode 100644 index 000000000000..a624a9d16eb0 --- /dev/null +++ b/cmd/telemetrygen/pkg/logs/config.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logs + +import ( + "fmt" + + "github.com/spf13/pflag" + + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common" +) + +// Config describes the test scenario. +type Config struct { + common.Config + NumLogs int + Body string + SeverityText string + SeverityNumber int32 + TraceID string + SpanID string +} + +func NewConfig() *Config { + cfg := &Config{} + cfg.SetDefaults() + return cfg +} + +// Flags registers config flags. +func (c *Config) Flags(fs *pflag.FlagSet) { + c.CommonFlags(fs) + + fs.StringVar(&c.HTTPPath, "otlp-http-url-path", c.HTTPPath, "Which URL path to write to") + + fs.IntVar(&c.NumLogs, "logs", c.NumLogs, "Number of logs to generate in each worker (ignored if duration is provided)") + fs.StringVar(&c.Body, "body", c.Body, "Body of the log") + fs.StringVar(&c.SeverityText, "severity-text", c.SeverityText, "Severity text of the log") + fs.Int32Var(&c.SeverityNumber, "severity-number", c.SeverityNumber, "Severity number of the log, range from 1 to 24 (inclusive)") + fs.StringVar(&c.TraceID, "trace-id", c.TraceID, "TraceID of the log") + fs.StringVar(&c.SpanID, "span-id", c.SpanID, "SpanID of the log") +} + +// SetDefaults sets the default values for the configuration +// This is called before parsing the command line flags and when +// calling NewConfig() +func (c *Config) SetDefaults() { + c.Config.SetDefaults() + c.HTTPPath = "/v1/logs" + c.NumLogs = 1 + c.Body = "the message" + c.SeverityText = "Info" + c.SeverityNumber = 9 + c.TraceID = "" + c.SpanID = "" +} + +// Validate validates the test scenario parameters. +func (c *Config) Validate() error { + if c.TotalDuration <= 0 && c.NumLogs <= 0 { + return fmt.Errorf("either `logs` or `duration` must be greater than 0") + } + + if c.TraceID != "" { + if err := common.ValidateTraceID(c.TraceID); err != nil { + return err + } + } + + if c.SpanID != "" { + if err := common.ValidateSpanID(c.SpanID); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/telemetrygen/internal/logs/exporter.go b/cmd/telemetrygen/pkg/logs/exporter.go similarity index 100% rename from cmd/telemetrygen/internal/logs/exporter.go rename to cmd/telemetrygen/pkg/logs/exporter.go diff --git a/cmd/telemetrygen/internal/logs/logs.go b/cmd/telemetrygen/pkg/logs/logs.go similarity index 70% rename from cmd/telemetrygen/internal/logs/logs.go rename to cmd/telemetrygen/pkg/logs/logs.go index ff428fc93e1b..b96ffc41948f 100644 --- a/cmd/telemetrygen/internal/logs/logs.go +++ b/cmd/telemetrygen/pkg/logs/logs.go @@ -29,45 +29,18 @@ func Start(cfg *Config) error { if err != nil { return err } - expFunc := func() (sdklog.Exporter, error) { - var exp sdklog.Exporter - if cfg.UseHTTP { - var exporterOpts []otlploghttp.Option - - logger.Info("starting HTTP exporter") - exporterOpts, err = httpExporterOptions(cfg) - if err != nil { - return nil, err - } - exp, err = otlploghttp.New(context.Background(), exporterOpts...) - if err != nil { - return nil, fmt.Errorf("failed to obtain OTLP HTTP exporter: %w", err) - } - } else { - var exporterOpts []otlploggrpc.Option - logger.Info("starting gRPC exporter") - exporterOpts, err = grpcExporterOptions(cfg) - if err != nil { - return nil, err - } - exp, err = otlploggrpc.New(context.Background(), exporterOpts...) - if err != nil { - return nil, fmt.Errorf("failed to obtain OTLP gRPC exporter: %w", err) - } - } - return exp, err - } + logger.Info("starting the logs generator with configuration", zap.Any("config", cfg)) - if err = Run(cfg, expFunc, logger); err != nil { + if err = run(cfg, exporterFactory(cfg, logger), logger); err != nil { return err } return nil } -// Run executes the test scenario. -func Run(c *Config, exp func() (sdklog.Exporter, error), logger *zap.Logger) error { +// run executes the test scenario. +func run(c *Config, expF exporterFunc, logger *zap.Logger) error { if err := c.Validate(); err != nil { return err } @@ -111,7 +84,17 @@ func Run(c *Config, exp func() (sdklog.Exporter, error), logger *zap.Logger) err traceID: c.TraceID, spanID: c.SpanID, } - + exp, err := expF() + if err != nil { + w.logger.Error("failed to create the exporter", zap.Error(err)) + return err + } + defer func() { + w.logger.Info("stopping the exporter") + if tempError := exp.Shutdown(context.Background()); tempError != nil { + w.logger.Error("failed to stop the exporter", zap.Error(tempError)) + } + }() go w.simulateLogs(res, exp, c.GetTelemetryAttributes()) } if c.TotalDuration > 0 { @@ -122,6 +105,45 @@ func Run(c *Config, exp func() (sdklog.Exporter, error), logger *zap.Logger) err return nil } +type exporterFunc func() (sdklog.Exporter, error) + +func exporterFactory(cfg *Config, logger *zap.Logger) exporterFunc { + return func() (sdklog.Exporter, error) { + return createExporter(cfg, logger) + } +} + +func createExporter(cfg *Config, logger *zap.Logger) (sdklog.Exporter, error) { + var exp sdklog.Exporter + var err error + if cfg.UseHTTP { + var exporterOpts []otlploghttp.Option + + logger.Info("starting HTTP exporter") + exporterOpts, err = httpExporterOptions(cfg) + if err != nil { + return nil, err + } + exp, err = otlploghttp.New(context.Background(), exporterOpts...) + if err != nil { + return nil, fmt.Errorf("failed to obtain OTLP HTTP exporter: %w", err) + } + } else { + var exporterOpts []otlploggrpc.Option + + logger.Info("starting gRPC exporter") + exporterOpts, err = grpcExporterOptions(cfg) + if err != nil { + return nil, err + } + exp, err = otlploggrpc.New(context.Background(), exporterOpts...) + if err != nil { + return nil, fmt.Errorf("failed to obtain OTLP gRPC exporter: %w", err) + } + } + return exp, err +} + func parseSeverity(severityText string, severityNumber int32) (string, log.Severity, error) { sn := log.Severity(severityNumber) if sn < log.SeverityTrace1 || sn > log.SeverityFatal4 { diff --git a/cmd/telemetrygen/internal/logs/package_test.go b/cmd/telemetrygen/pkg/logs/package_test.go similarity index 100% rename from cmd/telemetrygen/internal/logs/package_test.go rename to cmd/telemetrygen/pkg/logs/package_test.go diff --git a/cmd/telemetrygen/internal/logs/worker.go b/cmd/telemetrygen/pkg/logs/worker.go similarity index 84% rename from cmd/telemetrygen/internal/logs/worker.go rename to cmd/telemetrygen/pkg/logs/worker.go index 26de676e8efe..a449f7912742 100644 --- a/cmd/telemetrygen/internal/logs/worker.go +++ b/cmd/telemetrygen/pkg/logs/worker.go @@ -35,23 +35,10 @@ type worker struct { spanID string // spanID string } -func (w worker) simulateLogs(res *resource.Resource, exporterFunc func() (sdklog.Exporter, error), telemetryAttributes []attribute.KeyValue) { +func (w worker) simulateLogs(res *resource.Resource, exporter sdklog.Exporter, telemetryAttributes []attribute.KeyValue) { limiter := rate.NewLimiter(w.limitPerSecond, 1) var i int64 - exporter, err := exporterFunc() - if err != nil { - w.logger.Error("failed to create the exporter", zap.Error(err)) - return - } - - defer func() { - w.logger.Info("stopping the exporter") - if tempError := exporter.Shutdown(context.Background()); tempError != nil { - w.logger.Error("failed to stop the exporter", zap.Error(tempError)) - } - }() - for w.running.Load() { var tid trace.TraceID var sid trace.SpanID diff --git a/cmd/telemetrygen/internal/logs/worker_test.go b/cmd/telemetrygen/pkg/logs/worker_test.go similarity index 93% rename from cmd/telemetrygen/internal/logs/worker_test.go rename to cmd/telemetrygen/pkg/logs/worker_test.go index 2d1095afe326..1ba828323edc 100644 --- a/cmd/telemetrygen/internal/logs/worker_test.go +++ b/cmd/telemetrygen/pkg/logs/worker_test.go @@ -58,7 +58,7 @@ func TestFixedNumberOfLogs(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -82,7 +82,7 @@ func TestRateOfLogs(t *testing.T) { } // test - require.NoError(t, Run(cfg, expFunc, zap.NewNop())) + require.NoError(t, run(cfg, expFunc, zap.NewNop())) // verify // the minimum acceptable number of logs for the rate of 10/sec for half a second @@ -107,7 +107,7 @@ func TestUnthrottled(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) assert.Greater(t, len(m.logs), 100, "there should have been more than 100 logs, had %d", len(m.logs)) } @@ -129,7 +129,7 @@ func TestCustomBody(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) assert.Equal(t, "custom body", m.logs[0].Body().AsString()) } @@ -144,7 +144,7 @@ func TestLogsWithNoTelemetryAttributes(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -166,7 +166,7 @@ func TestLogsWithOneTelemetryAttributes(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -195,7 +195,7 @@ func TestLogsWithMultipleTelemetryAttributes(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -219,7 +219,7 @@ func TestLogsWithTraceIDAndSpanID(t *testing.T) { // test logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) // verify require.Len(t, m.logs, qty) @@ -276,7 +276,7 @@ func TestValidate(t *testing.T) { return m, nil } logger, _ := zap.NewDevelopment() - require.EqualError(t, Run(tt.cfg, expFunc, logger), tt.wantErrMessage) + require.EqualError(t, run(tt.cfg, expFunc, logger), tt.wantErrMessage) }) } } diff --git a/cmd/telemetrygen/internal/metrics/config.go b/cmd/telemetrygen/pkg/metrics/config.go similarity index 55% rename from cmd/telemetrygen/internal/metrics/config.go rename to cmd/telemetrygen/pkg/metrics/config.go index 16c1306a9071..3ccd66251a8f 100644 --- a/cmd/telemetrygen/internal/metrics/config.go +++ b/cmd/telemetrygen/pkg/metrics/config.go @@ -16,26 +16,44 @@ type Config struct { common.Config NumMetrics int MetricName string - MetricType metricType + MetricType MetricType SpanID string TraceID string } +// NewConfig creates a new Config with default values. +func NewConfig() *Config { + cfg := &Config{} + cfg.SetDefaults() + return cfg +} + // Flags registers config flags. func (c *Config) Flags(fs *pflag.FlagSet) { - // Use Gauge as default metric type. - c.MetricName = "gen" - c.MetricType = metricTypeGauge - c.CommonFlags(fs) - fs.StringVar(&c.HTTPPath, "otlp-http-url-path", "/v1/metrics", "Which URL path to write to") + fs.StringVar(&c.HTTPPath, "otlp-http-url-path", c.HTTPPath, "Which URL path to write to") fs.Var(&c.MetricType, "metric-type", "Metric type enum. must be one of 'Gauge' or 'Sum'") - fs.IntVar(&c.NumMetrics, "metrics", 1, "Number of metrics to generate in each worker (ignored if duration is provided)") + fs.IntVar(&c.NumMetrics, "metrics", c.NumMetrics, "Number of metrics to generate in each worker (ignored if duration is provided)") - fs.StringVar(&c.TraceID, "trace-id", "", "TraceID to use as exemplar") - fs.StringVar(&c.SpanID, "span-id", "", "SpanID to use as exemplar") + fs.StringVar(&c.TraceID, "trace-id", c.TraceID, "TraceID to use as exemplar") + fs.StringVar(&c.SpanID, "span-id", c.SpanID, "SpanID to use as exemplar") +} + +// SetDefaults sets the default values for the configuration +// This is called before parsing the command line flags and when +// calling NewConfig() +func (c *Config) SetDefaults() { + c.Config.SetDefaults() + c.HTTPPath = "/v1/metrics" + c.NumMetrics = 1 + + // Use Gauge as default metric type. + c.MetricType = MetricTypeGauge + c.MetricName = "gen" + c.TraceID = "" + c.SpanID = "" } // Validate validates the test scenario parameters. diff --git a/cmd/telemetrygen/internal/metrics/exporter.go b/cmd/telemetrygen/pkg/metrics/exporter.go similarity index 100% rename from cmd/telemetrygen/internal/metrics/exporter.go rename to cmd/telemetrygen/pkg/metrics/exporter.go diff --git a/cmd/telemetrygen/internal/metrics/metrics.go b/cmd/telemetrygen/pkg/metrics/metrics.go similarity index 62% rename from cmd/telemetrygen/internal/metrics/metrics.go rename to cmd/telemetrygen/pkg/metrics/metrics.go index ec05571412e5..bfbc9ab529e9 100644 --- a/cmd/telemetrygen/internal/metrics/metrics.go +++ b/cmd/telemetrygen/pkg/metrics/metrics.go @@ -29,47 +29,18 @@ func Start(cfg *Config) error { if err != nil { return err } - logger.Info("starting the metrics generator with configuration", zap.Any("config", cfg)) - - expFunc := func() (sdkmetric.Exporter, error) { - var exp sdkmetric.Exporter - if cfg.UseHTTP { - var exporterOpts []otlpmetrichttp.Option - logger.Info("starting HTTP exporter") - exporterOpts, err = httpExporterOptions(cfg) - if err != nil { - return nil, err - } - exp, err = otlpmetrichttp.New(context.Background(), exporterOpts...) - if err != nil { - return nil, fmt.Errorf("failed to obtain OTLP HTTP exporter: %w", err) - } - } else { - var exporterOpts []otlpmetricgrpc.Option - - logger.Info("starting gRPC exporter") - exporterOpts, err = grpcExporterOptions(cfg) - if err != nil { - return nil, err - } - exp, err = otlpmetricgrpc.New(context.Background(), exporterOpts...) - if err != nil { - return nil, fmt.Errorf("failed to obtain OTLP gRPC exporter: %w", err) - } - } - return exp, err - } + logger.Info("starting the metrics generator with configuration", zap.Any("config", cfg)) - if err = Run(cfg, expFunc, logger); err != nil { + if err = run(cfg, exporterFactory(cfg, logger), logger); err != nil { return err } return nil } -// Run executes the test scenario. -func Run(c *Config, exp func() (sdkmetric.Exporter, error), logger *zap.Logger) error { +// run executes the test scenario. +func run(c *Config, expF exporterFunc, logger *zap.Logger) error { if err := c.Validate(); err != nil { return err } @@ -106,6 +77,18 @@ func Run(c *Config, exp func() (sdkmetric.Exporter, error), logger *zap.Logger) logger: logger.With(zap.Int("worker", i)), index: i, } + exp, err := expF() + if err != nil { + w.logger.Error("failed to create the exporter", zap.Error(err)) + return err + } + + defer func() { + w.logger.Info("stopping the exporter") + if tempError := exp.Shutdown(context.Background()); tempError != nil { + w.logger.Error("failed to stop the exporter", zap.Error(tempError)) + } + }() go w.simulateMetrics(res, exp, c.GetTelemetryAttributes()) } @@ -117,6 +100,45 @@ func Run(c *Config, exp func() (sdkmetric.Exporter, error), logger *zap.Logger) return nil } +type exporterFunc func() (sdkmetric.Exporter, error) + +func exporterFactory(cfg *Config, logger *zap.Logger) exporterFunc { + return func() (sdkmetric.Exporter, error) { + return createExporter(cfg, logger) + } +} + +func createExporter(cfg *Config, logger *zap.Logger) (sdkmetric.Exporter, error) { + var exp sdkmetric.Exporter + var err error + if cfg.UseHTTP { + var exporterOpts []otlpmetrichttp.Option + + logger.Info("starting HTTP exporter") + exporterOpts, err = httpExporterOptions(cfg) + if err != nil { + return nil, err + } + exp, err = otlpmetrichttp.New(context.Background(), exporterOpts...) + if err != nil { + return nil, fmt.Errorf("failed to obtain OTLP HTTP exporter: %w", err) + } + } else { + var exporterOpts []otlpmetricgrpc.Option + + logger.Info("starting gRPC exporter") + exporterOpts, err = grpcExporterOptions(cfg) + if err != nil { + return nil, err + } + exp, err = otlpmetricgrpc.New(context.Background(), exporterOpts...) + if err != nil { + return nil, fmt.Errorf("failed to obtain OTLP gRPC exporter: %w", err) + } + } + return exp, err +} + func exemplarsFromConfig(c *Config) []metricdata.Exemplar[int64] { if c.TraceID != "" || c.SpanID != "" { var exemplars []metricdata.Exemplar[int64] diff --git a/cmd/telemetrygen/internal/metrics/metrics_test.go b/cmd/telemetrygen/pkg/metrics/metrics_test.go similarity index 100% rename from cmd/telemetrygen/internal/metrics/metrics_test.go rename to cmd/telemetrygen/pkg/metrics/metrics_test.go diff --git a/cmd/telemetrygen/internal/metrics/metrics_types.go b/cmd/telemetrygen/pkg/metrics/metrics_types.go similarity index 59% rename from cmd/telemetrygen/internal/metrics/metrics_types.go rename to cmd/telemetrygen/pkg/metrics/metrics_types.go index e07b26720acb..af2d3db623ae 100644 --- a/cmd/telemetrygen/internal/metrics/metrics_types.go +++ b/cmd/telemetrygen/pkg/metrics/metrics_types.go @@ -7,24 +7,24 @@ import ( "errors" ) -type metricType string +type MetricType string const ( - metricTypeGauge = "Gauge" - metricTypeSum = "Sum" - metricTypeHistogram = "Histogram" + MetricTypeGauge MetricType = "Gauge" + MetricTypeSum MetricType = "Sum" + MetricTypeHistogram MetricType = "Histogram" ) // String is used both by fmt.Print and by Cobra in help text -func (e *metricType) String() string { +func (e *MetricType) String() string { return string(*e) } // Set must have pointer receiver so it doesn't change the value of a copy -func (e *metricType) Set(v string) error { +func (e *MetricType) Set(v string) error { switch v { case "Gauge", "Sum", "Histogram": - *e = metricType(v) + *e = MetricType(v) return nil default: return errors.New(`must be one of "Gauge", "Sum", "Histogram"`) @@ -32,6 +32,6 @@ func (e *metricType) Set(v string) error { } // Type is only used in help text -func (e *metricType) Type() string { - return "metricType" +func (e *MetricType) Type() string { + return "MetricType" } diff --git a/cmd/telemetrygen/internal/metrics/package_test.go b/cmd/telemetrygen/pkg/metrics/package_test.go similarity index 100% rename from cmd/telemetrygen/internal/metrics/package_test.go rename to cmd/telemetrygen/pkg/metrics/package_test.go diff --git a/cmd/telemetrygen/internal/metrics/worker.go b/cmd/telemetrygen/pkg/metrics/worker.go similarity index 88% rename from cmd/telemetrygen/internal/metrics/worker.go rename to cmd/telemetrygen/pkg/metrics/worker.go index 02a0c7ded490..2483670e55f9 100644 --- a/cmd/telemetrygen/internal/metrics/worker.go +++ b/cmd/telemetrygen/pkg/metrics/worker.go @@ -20,7 +20,7 @@ import ( type worker struct { running *atomic.Bool // pointer to shared flag that indicates it's time to stop the test metricName string // name of metric to generate - metricType metricType // type of metric to generate + metricType MetricType // type of metric to generate exemplars []metricdata.Exemplar[int64] // exemplars to attach to the metric numMetrics int // how many metrics the worker has to generate (only when duration==0) totalDuration time.Duration // how long to run the test for (overrides `numMetrics`) @@ -76,28 +76,15 @@ var histogramBucketSamples = []struct { }, } -func (w worker) simulateMetrics(res *resource.Resource, exporterFunc func() (sdkmetric.Exporter, error), signalAttrs []attribute.KeyValue) { +func (w worker) simulateMetrics(res *resource.Resource, exporter sdkmetric.Exporter, signalAttrs []attribute.KeyValue) { limiter := rate.NewLimiter(w.limitPerSecond, 1) - exporter, err := exporterFunc() - if err != nil { - w.logger.Error("failed to create the exporter", zap.Error(err)) - return - } - - defer func() { - w.logger.Info("stopping the exporter") - if tempError := exporter.Shutdown(context.Background()); tempError != nil { - w.logger.Error("failed to stop the exporter", zap.Error(tempError)) - } - }() - var i int64 for w.running.Load() { var metrics []metricdata.Metrics switch w.metricType { - case metricTypeGauge: + case MetricTypeGauge: metrics = append(metrics, metricdata.Metrics{ Name: w.metricName, Data: metricdata.Gauge[int64]{ @@ -111,7 +98,7 @@ func (w worker) simulateMetrics(res *resource.Resource, exporterFunc func() (sdk }, }, }) - case metricTypeSum: + case MetricTypeSum: metrics = append(metrics, metricdata.Metrics{ Name: w.metricName, Data: metricdata.Sum[int64]{ @@ -128,7 +115,7 @@ func (w worker) simulateMetrics(res *resource.Resource, exporterFunc func() (sdk }, }, }) - case metricTypeHistogram: + case MetricTypeHistogram: iteration := uint64(i) % 10 sum := histogramBucketSamples[iteration].sum bucketCounts := histogramBucketSamples[iteration].bucketCounts diff --git a/cmd/telemetrygen/internal/metrics/worker_test.go b/cmd/telemetrygen/pkg/metrics/worker_test.go similarity index 88% rename from cmd/telemetrygen/internal/metrics/worker_test.go rename to cmd/telemetrygen/pkg/metrics/worker_test.go index 7574ac203757..fe83abdcc81f 100644 --- a/cmd/telemetrygen/internal/metrics/worker_test.go +++ b/cmd/telemetrygen/pkg/metrics/worker_test.go @@ -57,7 +57,7 @@ func TestFixedNumberOfMetrics(t *testing.T) { WorkerCount: 1, }, NumMetrics: 5, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, } m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { @@ -66,7 +66,7 @@ func TestFixedNumberOfMetrics(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) // assert @@ -81,7 +81,7 @@ func TestRateOfMetrics(t *testing.T) { TotalDuration: time.Second / 2, WorkerCount: 1, }, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, } m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { @@ -89,7 +89,7 @@ func TestRateOfMetrics(t *testing.T) { } // act - require.NoError(t, Run(cfg, expFunc, zap.NewNop())) + require.NoError(t, run(cfg, expFunc, zap.NewNop())) // assert // the minimum acceptable number of metrics for the rate of 10/sec for half a second @@ -105,7 +105,7 @@ func TestUnthrottled(t *testing.T) { TotalDuration: 1 * time.Second, WorkerCount: 1, }, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, } m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { @@ -114,7 +114,7 @@ func TestUnthrottled(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) // assert assert.Greater(t, len(m.rms), 100, "there should have been more than 100 metrics, had %d", len(m.rms)) @@ -123,7 +123,7 @@ func TestUnthrottled(t *testing.T) { func TestSumNoTelemetryAttrs(t *testing.T) { // arrange qty := 2 - cfg := configWithNoAttributes(metricTypeSum, qty) + cfg := configWithNoAttributes(MetricTypeSum, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -131,7 +131,7 @@ func TestSumNoTelemetryAttrs(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -151,7 +151,7 @@ func TestSumNoTelemetryAttrs(t *testing.T) { func TestGaugeNoTelemetryAttrs(t *testing.T) { // arrange qty := 2 - cfg := configWithNoAttributes(metricTypeGauge, qty) + cfg := configWithNoAttributes(MetricTypeGauge, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -159,7 +159,7 @@ func TestGaugeNoTelemetryAttrs(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -179,7 +179,7 @@ func TestGaugeNoTelemetryAttrs(t *testing.T) { func TestSumSingleTelemetryAttr(t *testing.T) { // arrange qty := 2 - cfg := configWithOneAttribute(metricTypeSum, qty) + cfg := configWithOneAttribute(MetricTypeSum, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -187,7 +187,7 @@ func TestSumSingleTelemetryAttr(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -209,7 +209,7 @@ func TestSumSingleTelemetryAttr(t *testing.T) { func TestGaugeSingleTelemetryAttr(t *testing.T) { // arrange qty := 2 - cfg := configWithOneAttribute(metricTypeGauge, qty) + cfg := configWithOneAttribute(MetricTypeGauge, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -217,7 +217,7 @@ func TestGaugeSingleTelemetryAttr(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -239,7 +239,7 @@ func TestGaugeSingleTelemetryAttr(t *testing.T) { func TestSumMultipleTelemetryAttr(t *testing.T) { // arrange qty := 2 - cfg := configWithMultipleAttributes(metricTypeSum, qty) + cfg := configWithMultipleAttributes(MetricTypeSum, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -247,7 +247,7 @@ func TestSumMultipleTelemetryAttr(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -271,7 +271,7 @@ func TestSumMultipleTelemetryAttr(t *testing.T) { func TestGaugeMultipleTelemetryAttr(t *testing.T) { // arrange qty := 2 - cfg := configWithMultipleAttributes(metricTypeGauge, qty) + cfg := configWithMultipleAttributes(MetricTypeGauge, qty) m := &mockExporter{} expFunc := func() (sdkmetric.Exporter, error) { return m, nil @@ -279,7 +279,7 @@ func TestGaugeMultipleTelemetryAttr(t *testing.T) { // act logger, _ := zap.NewDevelopment() - require.NoError(t, Run(cfg, expFunc, logger)) + require.NoError(t, run(cfg, expFunc, logger)) time.Sleep(1 * time.Second) @@ -312,7 +312,7 @@ func TestValidate(t *testing.T) { Config: common.Config{ WorkerCount: 1, }, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, TraceID: "123", }, wantErrMessage: "either `metrics` or `duration` must be greater than 0", @@ -324,7 +324,7 @@ func TestValidate(t *testing.T) { WorkerCount: 1, }, NumMetrics: 5, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, TraceID: "123", }, wantErrMessage: "TraceID must be a 32 character hex string, like: 'ae87dadd90e9935a4bc9660628efd569'", @@ -336,7 +336,7 @@ func TestValidate(t *testing.T) { WorkerCount: 1, }, NumMetrics: 5, - MetricType: metricTypeSum, + MetricType: MetricTypeSum, TraceID: "ae87dadd90e9935a4bc9660628efd569", SpanID: "123", }, @@ -350,12 +350,12 @@ func TestValidate(t *testing.T) { return m, nil } logger, _ := zap.NewDevelopment() - require.EqualError(t, Run(tt.cfg, expFunc, logger), tt.wantErrMessage) + require.EqualError(t, run(tt.cfg, expFunc, logger), tt.wantErrMessage) }) } } -func configWithNoAttributes(metric metricType, qty int) *Config { +func configWithNoAttributes(metric MetricType, qty int) *Config { return &Config{ Config: common.Config{ WorkerCount: 1, @@ -367,7 +367,7 @@ func configWithNoAttributes(metric metricType, qty int) *Config { } } -func configWithOneAttribute(metric metricType, qty int) *Config { +func configWithOneAttribute(metric MetricType, qty int) *Config { return &Config{ Config: common.Config{ WorkerCount: 1, @@ -379,7 +379,7 @@ func configWithOneAttribute(metric metricType, qty int) *Config { } } -func configWithMultipleAttributes(metric metricType, qty int) *Config { +func configWithMultipleAttributes(metric MetricType, qty int) *Config { kvs := common.KeyValue{telemetryAttrKeyOne: telemetryAttrValueOne, telemetryAttrKeyTwo: telemetryAttrValueTwo} return &Config{ Config: common.Config{ diff --git a/cmd/telemetrygen/pkg/traces/config.go b/cmd/telemetrygen/pkg/traces/config.go new file mode 100644 index 000000000000..5ff431132911 --- /dev/null +++ b/cmd/telemetrygen/pkg/traces/config.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package traces + +import ( + "fmt" + "time" + + "github.com/spf13/pflag" + + "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common" +) + +// Config describes the test scenario. +type Config struct { + common.Config + NumTraces int + NumChildSpans int + PropagateContext bool + ServiceName string + StatusCode string + Batch bool + LoadSize int + + SpanDuration time.Duration +} + +func NewConfig() *Config { + cfg := &Config{} + cfg.SetDefaults() + return cfg +} + +// Flags registers config flags. +func (c *Config) Flags(fs *pflag.FlagSet) { + c.CommonFlags(fs) + + fs.StringVar(&c.HTTPPath, "otlp-http-url-path", c.HTTPPath, "Which URL path to write to") + + fs.IntVar(&c.NumTraces, "traces", c.NumTraces, "Number of traces to generate in each worker (ignored if duration is provided)") + fs.IntVar(&c.NumChildSpans, "child-spans", c.NumChildSpans, "Number of child spans to generate for each trace") + fs.BoolVar(&c.PropagateContext, "marshal", c.PropagateContext, "Whether to marshal trace context via HTTP headers") + fs.StringVar(&c.ServiceName, "service", c.ServiceName, "Service name to use") + fs.StringVar(&c.StatusCode, "status-code", c.StatusCode, "Status code to use for the spans, one of (Unset, Error, Ok) or the equivalent integer (0,1,2)") + fs.BoolVar(&c.Batch, "batch", c.Batch, "Whether to batch traces") + fs.IntVar(&c.LoadSize, "size", c.LoadSize, "Desired minimum size in MB of string data for each trace generated. This can be used to test traces with large payloads, i.e. when testing the OTLP receiver endpoint max receive size.") + fs.DurationVar(&c.SpanDuration, "span-duration", c.SpanDuration, "The duration of each generated span.") +} + +// SetDefaults sets the default values for the configuration +// This is called before parsing the command line flags and when +// calling NewConfig() +func (c *Config) SetDefaults() { + c.Config.SetDefaults() + c.HTTPPath = "/v1/traces" + c.NumTraces = 1 + c.NumChildSpans = 1 + c.PropagateContext = false + c.ServiceName = "telemetrygen" + c.StatusCode = "0" + c.Batch = true + c.LoadSize = 0 + c.SpanDuration = 123 * time.Microsecond +} + +// Validate validates the test scenario parameters. +func (c *Config) Validate() error { + if c.TotalDuration <= 0 && c.NumTraces <= 0 { + return fmt.Errorf("either `traces` or `duration` must be greater than 0") + } + return nil +} diff --git a/cmd/telemetrygen/internal/traces/exporter.go b/cmd/telemetrygen/pkg/traces/exporter.go similarity index 100% rename from cmd/telemetrygen/internal/traces/exporter.go rename to cmd/telemetrygen/pkg/traces/exporter.go diff --git a/cmd/telemetrygen/internal/traces/exporter_test.go b/cmd/telemetrygen/pkg/traces/exporter_test.go similarity index 100% rename from cmd/telemetrygen/internal/traces/exporter_test.go rename to cmd/telemetrygen/pkg/traces/exporter_test.go diff --git a/cmd/telemetrygen/internal/traces/package_test.go b/cmd/telemetrygen/pkg/traces/package_test.go similarity index 100% rename from cmd/telemetrygen/internal/traces/package_test.go rename to cmd/telemetrygen/pkg/traces/package_test.go diff --git a/cmd/telemetrygen/internal/traces/traces.go b/cmd/telemetrygen/pkg/traces/traces.go similarity index 97% rename from cmd/telemetrygen/internal/traces/traces.go rename to cmd/telemetrygen/pkg/traces/traces.go index 29c0aacc5d27..1a281e5faa53 100644 --- a/cmd/telemetrygen/internal/traces/traces.go +++ b/cmd/telemetrygen/pkg/traces/traces.go @@ -72,6 +72,7 @@ func Start(cfg *Config) error { ssp = sdktrace.NewBatchSpanProcessor(exp, sdktrace.WithBatchTimeout(time.Second)) defer func() { logger.Info("stop the batch span processor") + if tempError := ssp.Shutdown(context.Background()); tempError != nil { logger.Error("failed to stop the batch span processor", zap.Error(tempError)) } @@ -90,9 +91,10 @@ func Start(cfg *Config) error { if cfg.Batch { tracerProvider.RegisterSpanProcessor(ssp) } + otel.SetTracerProvider(tracerProvider) - if err = Run(cfg, logger); err != nil { + if err = run(cfg, logger); err != nil { logger.Error("failed to execute the test scenario.", zap.Error(err)) return err } @@ -100,8 +102,8 @@ func Start(cfg *Config) error { return nil } -// Run executes the test scenario. -func Run(c *Config, logger *zap.Logger) error { +// run executes the test scenario. +func run(c *Config, logger *zap.Logger) error { if err := c.Validate(); err != nil { return err } diff --git a/cmd/telemetrygen/internal/traces/worker.go b/cmd/telemetrygen/pkg/traces/worker.go similarity index 100% rename from cmd/telemetrygen/internal/traces/worker.go rename to cmd/telemetrygen/pkg/traces/worker.go diff --git a/cmd/telemetrygen/internal/traces/worker_test.go b/cmd/telemetrygen/pkg/traces/worker_test.go similarity index 94% rename from cmd/telemetrygen/internal/traces/worker_test.go rename to cmd/telemetrygen/pkg/traces/worker_test.go index 8c85b90cb305..9fc8894611b8 100644 --- a/cmd/telemetrygen/internal/traces/worker_test.go +++ b/cmd/telemetrygen/pkg/traces/worker_test.go @@ -44,7 +44,7 @@ func TestFixedNumberOfTraces(t *testing.T) { } // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify assert.Len(t, syncer.spans, 2) // each trace has two spans @@ -69,7 +69,7 @@ func TestNumberOfSpans(t *testing.T) { expectedNumSpans := cfg.NumChildSpans + 1 // each trace has 1 + NumChildSpans spans // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify assert.Len(t, syncer.spans, expectedNumSpans) @@ -96,7 +96,7 @@ func TestRateOfSpans(t *testing.T) { require.Empty(t, syncer.spans) // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify // the minimum acceptable number of spans for the rate of 10/sec for half a second @@ -128,7 +128,7 @@ func TestSpanDuration(t *testing.T) { require.Empty(t, syncer.spans) // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) for _, span := range syncer.spans { startTime, endTime := span.StartTime(), span.EndTime() @@ -157,7 +157,7 @@ func TestUnthrottled(t *testing.T) { require.Empty(t, syncer.spans) // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify // the minimum acceptable number of spans -- the real number should be > 10k, but CI env might be slower @@ -181,7 +181,7 @@ func TestSpanKind(t *testing.T) { } // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify that the default Span Kind is being overridden for _, span := range syncer.spans { @@ -232,13 +232,13 @@ func TestSpanStatuses(t *testing.T) { // test the program given input, including erroneous inputs if tt.validInput { - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify that the default the span status is set as expected for _, span := range syncer.spans { assert.Equalf(t, span.Status().Code, tt.spanStatus, "span status: %v and expected status %v", span.Status().Code, tt.spanStatus) } } else { - require.Error(t, Run(cfg, zap.NewNop())) + require.Error(t, run(cfg, zap.NewNop())) } }) } @@ -256,7 +256,7 @@ func TestSpansWithNoAttrs(t *testing.T) { cfg := configWithNoAttributes(2, "") // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify assert.Len(t, syncer.spans, 4) // each trace has two spans @@ -278,7 +278,7 @@ func TestSpansWithOneAttrs(t *testing.T) { cfg := configWithOneAttribute(2, "") // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify assert.Len(t, syncer.spans, 4) // each trace has two spans @@ -300,7 +300,7 @@ func TestSpansWithMultipleAttrs(t *testing.T) { cfg := configWithMultipleAttributes(2, "") // test - require.NoError(t, Run(cfg, zap.NewNop())) + require.NoError(t, run(cfg, zap.NewNop())) // verify assert.Len(t, syncer.spans, 4) // each trace has two spans @@ -335,7 +335,7 @@ func TestValidate(t *testing.T) { tracerProvider.RegisterSpanProcessor(sp) otel.SetTracerProvider(tracerProvider) logger, _ := zap.NewDevelopment() - require.EqualError(t, Run(tt.cfg, logger), tt.wantErrMessage) + require.EqualError(t, run(tt.cfg, logger), tt.wantErrMessage) }) } } From 86430c50f11fe1d1dee33ee9d84b1d7579f77778 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 5 Feb 2025 16:58:24 -0300 Subject: [PATCH 13/14] Skip TestExportWithWALEnabled due to flakiness (#37719) Fixes #37715 cc @dashpole @pjanotti Signed-off-by: Arthur Silva Sens --- exporter/prometheusremotewriteexporter/wal_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/exporter/prometheusremotewriteexporter/wal_test.go b/exporter/prometheusremotewriteexporter/wal_test.go index db4c41c91c1c..dbdf62906939 100644 --- a/exporter/prometheusremotewriteexporter/wal_test.go +++ b/exporter/prometheusremotewriteexporter/wal_test.go @@ -160,6 +160,7 @@ func TestWAL_persist(t *testing.T) { } func TestExportWithWALEnabled(t *testing.T) { + t.Skip("skipping test, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37715") cfg := &Config{ WAL: &WALConfig{ Directory: t.TempDir(), From 6638b83fad429c9636ac3e40d00c2b3acafebdc1 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Wed, 5 Feb 2025 12:06:59 -0800 Subject: [PATCH 14/14] [chore] add pod.container test (#37634) Add code coverage of pod.container rule type. --- receiver/receivercreator/fixtures_test.go | 3 ++- receiver/receivercreator/rules_test.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/receiver/receivercreator/fixtures_test.go b/receiver/receivercreator/fixtures_test.go index 964ce437bf9e..e8f086b5e2fc 100644 --- a/receiver/receivercreator/fixtures_test.go +++ b/receiver/receivercreator/fixtures_test.go @@ -82,7 +82,8 @@ var podContainerEndpointWithHints = observer.Endpoint{ ID: "namespace/pod-2-UID/redis(6379)", Target: "1.2.3.4:6379", Details: &observer.PodContainer{ - Name: "redis", Pod: observer.Pod{ + Image: "redis", + Name: "redis", Pod: observer.Pod{ Name: "pod-2", Namespace: "default", UID: "pod-2-UID", diff --git a/receiver/receivercreator/rules_test.go b/receiver/receivercreator/rules_test.go index 4ecd0c8f85f8..315b1f39da52 100644 --- a/receiver/receivercreator/rules_test.go +++ b/receiver/receivercreator/rules_test.go @@ -33,6 +33,7 @@ func Test_ruleEval(t *testing.T) { {"basic container", args{`type == "container" && labels["region"] == "east-1"`, containerEndpoint}, true, false}, {"basic k8s.node", args{`type == "k8s.node" && kubelet_endpoint_port == 10250`, k8sNodeEndpoint}, true, false}, {"relocated type builtin", args{`type == "k8s.node" && typeOf("some string") == "string"`, k8sNodeEndpoint}, true, false}, + {"pod container", args{`type == "pod.container" and container_image matches "redis"`, podContainerEndpointWithHints}, true, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {