From 42aeaf81fb41849351b662bca9c1c9f439e6229d Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 18 Dec 2024 17:45:31 -0800 Subject: [PATCH 01/19] Added queries_per_second Telegraf metric in mongodbreceiver --- receiver/mongodbreceiver/README.md | 2 +- receiver/mongodbreceiver/documentation.md | 8 ++ .../generated_component_test.go | 12 +- .../mongodbreceiver/generated_package_test.go | 3 +- .../internal/metadata/generated_config.go | 4 + .../metadata/generated_config_test.go | 2 + .../internal/metadata/generated_metrics.go | 111 +++++++++++++++--- .../metadata/generated_metrics_test.go | 84 +++++++------ .../metadata/generated_resource_test.go | 12 +- .../internal/metadata/testdata/config.yaml | 4 + receiver/mongodbreceiver/metadata.yaml | 8 ++ receiver/mongodbreceiver/metrics.go | 32 +++++ receiver/mongodbreceiver/scraper.go | 22 ++-- .../testdata/integration/expected.4_0.yaml | 9 ++ .../testdata/integration/expected.4_4lpu.yaml | 9 ++ .../testdata/integration/expected.5_0.yaml | 9 ++ .../testdata/scraper/expected.yaml | 9 ++ .../testdata/scraper/partial_scrape.yaml | 9 ++ 18 files changed, 272 insertions(+), 77 deletions(-) diff --git a/receiver/mongodbreceiver/README.md b/receiver/mongodbreceiver/README.md index 8ce2e4e0c8887..41389dfa9ad93 100644 --- a/receiver/mongodbreceiver/README.md +++ b/receiver/mongodbreceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fmongodb%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fmongodb) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fmongodb%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fmongodb) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@djaglowski](https://www.github.com/djaglowski), [@schmikei](https://www.github.com/schmikei) \| Seeking more code owners! | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 1a605c560614d..15f242023dd0d 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -214,6 +214,14 @@ The total time spent performing operations. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +### mongodb.queries_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {query}/s | Gauge | Double | + ### mongodb.session.count The total number of active sessions. diff --git a/receiver/mongodbreceiver/generated_component_test.go b/receiver/mongodbreceiver/generated_component_test.go index 2ab097eb039cd..4ed787b08c022 100644 --- a/receiver/mongodbreceiver/generated_component_test.go +++ b/receiver/mongodbreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, test := range tests { - t.Run(test.name+"-shutdown", func(t *testing.T) { - c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(test.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 17e9f23be856d..080891042403b 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package mongodbreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index ab438c54658fd..3557cdb80cc8f 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -55,6 +55,7 @@ type MetricsConfig struct { MongodbOperationLatencyTime MetricConfig `mapstructure:"mongodb.operation.latency.time"` MongodbOperationReplCount MetricConfig `mapstructure:"mongodb.operation.repl.count"` MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` + MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` MongodbUptime MetricConfig `mapstructure:"mongodb.uptime"` @@ -143,6 +144,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbOperationTime: MetricConfig{ Enabled: true, }, + MongodbQueriesPerSec: MetricConfig{ + Enabled: true, + }, MongodbSessionCount: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 1d809884496a5..dc5ff3e08ce35 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -52,6 +52,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: true}, MongodbOperationReplCount: MetricConfig{Enabled: true}, MongodbOperationTime: MetricConfig{Enabled: true}, + MongodbQueriesPerSec: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, MongodbStorageSize: MetricConfig{Enabled: true}, MongodbUptime: MetricConfig{Enabled: true}, @@ -94,6 +95,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: false}, MongodbOperationReplCount: MetricConfig{Enabled: false}, MongodbOperationTime: MetricConfig{Enabled: false}, + MongodbQueriesPerSec: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, MongodbStorageSize: MetricConfig{Enabled: false}, MongodbUptime: MetricConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 12fca83208961..73d5423cbeb21 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -1653,6 +1653,55 @@ func newMetricMongodbOperationTime(cfg MetricConfig) metricMongodbOperationTime return m } +type metricMongodbQueriesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.queries_per_sec metric with initial data. +func (m *metricMongodbQueriesPerSec) init() { + m.data.SetName("mongodb.queries_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{query}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbQueriesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbQueriesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbQueriesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbQueriesPerSec(cfg MetricConfig) metricMongodbQueriesPerSec { + m := metricMongodbQueriesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbSessionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1843,22 +1892,31 @@ type MetricsBuilder struct { metricMongodbOperationLatencyTime metricMongodbOperationLatencyTime metricMongodbOperationReplCount metricMongodbOperationReplCount metricMongodbOperationTime metricMongodbOperationTime + metricMongodbQueriesPerSec metricMongodbQueriesPerSec metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize metricMongodbUptime metricMongodbUptime } -// metricBuilderOption applies changes to default metrics builder. -type metricBuilderOption func(*MetricsBuilder) +// MetricBuilderOption applies changes to default metrics builder. +type MetricBuilderOption interface { + apply(*MetricsBuilder) +} + +type metricBuilderOptionFunc func(mb *MetricsBuilder) + +func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { + mbof(mb) +} // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { - return func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { + return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime - } + }) } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -1891,6 +1949,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationLatencyTime: newMetricMongodbOperationLatencyTime(mbc.Metrics.MongodbOperationLatencyTime), metricMongodbOperationReplCount: newMetricMongodbOperationReplCount(mbc.Metrics.MongodbOperationReplCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), + metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), metricMongodbUptime: newMetricMongodbUptime(mbc.Metrics.MongodbUptime), @@ -1917,7 +1976,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op(mb) + op.apply(mb) } return mb } @@ -1935,20 +1994,28 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption func(pmetric.ResourceMetrics) +type ResourceMetricsOption interface { + apply(pmetric.ResourceMetrics) +} + +type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) + +func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { + rmof(rm) +} // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - } + }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -1962,7 +2029,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - } + }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -1970,7 +2037,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver") @@ -2003,12 +2070,13 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricMongodbOperationLatencyTime.emit(ils.Metrics()) mb.metricMongodbOperationReplCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) + mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) mb.metricMongodbUptime.emit(ils.Metrics()) - for _, op := range rmo { - op(rm) + for _, op := range options { + op.apply(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { @@ -2030,8 +2098,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(rmo...) +func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -2172,6 +2240,11 @@ func (mb *MetricsBuilder) RecordMongodbOperationTimeDataPoint(ts pcommon.Timesta mb.metricMongodbOperationTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } +// RecordMongodbQueriesPerSecDataPoint adds a data point to mongodb.queries_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbSessionCountDataPoint adds a data point to mongodb.session.count metric. func (mb *MetricsBuilder) RecordMongodbSessionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbSessionCount.recordDataPoint(mb.startTime, ts, val) @@ -2189,9 +2262,9 @@ func (mb *MetricsBuilder) RecordMongodbUptimeDataPoint(ts pcommon.Timestamp, val // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op(mb) + op.apply(mb) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 0ae51af87c7ff..5508e67e867b5 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -52,14 +52,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -169,6 +169,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationTimeDataPoint(ts, 1, AttributeOperationInsert) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbSessionCountDataPoint(ts, 1) @@ -187,7 +191,7 @@ func TestMetricsBuilder(t *testing.T) { res := rb.Emit() metrics := mb.Emit(WithResource(res)) - if test.expectEmpty { + if tt.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -197,10 +201,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if test.metricsSet == testDataSetDefault { + if tt.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if test.metricsSet == testDataSetAll { + if tt.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -213,7 +217,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of cache operations of the instance.", ms.At(i).Description()) assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -230,7 +234,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of collections.", ms.At(i).Description()) assert.Equal(t, "{collections}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -244,7 +248,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of connections.", ms.At(i).Description()) assert.Equal(t, "{connections}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -261,7 +265,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of open cursors maintained for clients.", ms.At(i).Description()) assert.Equal(t, "{cursors}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -275,7 +279,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of cursors that have timed out.", ms.At(i).Description()) assert.Equal(t, "{cursors}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -289,7 +293,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The size of the collection. Data compression does not affect this value.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -303,7 +307,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of existing databases.", ms.At(i).Description()) assert.Equal(t, "{databases}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -317,7 +321,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of document operations executed.", ms.At(i).Description()) assert.Equal(t, "{documents}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -334,7 +338,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of extents.", ms.At(i).Description()) assert.Equal(t, "{extents}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -348,7 +352,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The time the global lock has been held.", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -374,7 +378,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of times an index has been accessed.", ms.At(i).Description()) assert.Equal(t, "{accesses}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -391,7 +395,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of indexes.", ms.At(i).Description()) assert.Equal(t, "{indexes}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -405,7 +409,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Sum of the space allocated to all indexes in the database, including free index space.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -419,7 +423,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the lock was acquired in the specified mode.", ms.At(i).Description()) assert.Equal(t, "{count}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -439,7 +443,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Cumulative wait time for the lock acquisitions.", ms.At(i).Description()) assert.Equal(t, "microseconds", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -459,7 +463,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the lock acquisitions encountered waits because the locks were held in a conflicting mode.", ms.At(i).Description()) assert.Equal(t, "{count}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -479,7 +483,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the lock acquisitions encountered deadlocks.", ms.At(i).Description()) assert.Equal(t, "{count}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -499,7 +503,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The amount of memory used.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -516,7 +520,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of bytes received.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -530,7 +534,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of by transmitted.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -544,7 +548,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of requests received by the server.", ms.At(i).Description()) assert.Equal(t, "{requests}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -558,7 +562,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of objects.", ms.At(i).Description()) assert.Equal(t, "{objects}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -572,7 +576,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of operations executed.", ms.At(i).Description()) assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -604,7 +608,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of replicated operations executed.", ms.At(i).Description()) assert.Equal(t, "{operations}", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -621,7 +625,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total time spent performing operations.", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -631,6 +635,18 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "insert", attrVal.Str()) + case "mongodb.queries_per_sec": + assert.False(t, validatedMetrics["mongodb.queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.queries_per_sec") + validatedMetrics["mongodb.queries_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{query}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.session.count": assert.False(t, validatedMetrics["mongodb.session.count"], "Found a duplicate in the metrics slice: mongodb.session.count") validatedMetrics["mongodb.session.count"] = true @@ -638,7 +654,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of active sessions.", ms.At(i).Description()) assert.Equal(t, "{sessions}", ms.At(i).Unit()) - assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -652,7 +668,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total amount of storage allocated to this collection.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -666,7 +682,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The amount of time that the server has been running.", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_resource_test.go b/receiver/mongodbreceiver/internal/metadata/generated_resource_test.go index f089e8995c610..dbc2fd1e745d7 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_resource_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_resource_test.go @@ -9,9 +9,9 @@ import ( ) func TestResourceBuilder(t *testing.T) { - for _, test := range []string{"default", "all_set", "none_set"} { - t.Run(test, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, test) + for _, tt := range []string{"default", "all_set", "none_set"} { + t.Run(tt, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt) rb := NewResourceBuilder(cfg) rb.SetDatabase("database-val") rb.SetServerAddress("server.address-val") @@ -20,7 +20,7 @@ func TestResourceBuilder(t *testing.T) { res := rb.Emit() assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource - switch test { + switch tt { case "default": assert.Equal(t, 2, res.Attributes().Len()) case "all_set": @@ -29,7 +29,7 @@ func TestResourceBuilder(t *testing.T) { assert.Equal(t, 0, res.Attributes().Len()) return default: - assert.Failf(t, "unexpected test case: %s", test) + assert.Failf(t, "unexpected test case: %s", tt) } val, ok := res.Attributes().Get("database") @@ -43,7 +43,7 @@ func TestResourceBuilder(t *testing.T) { assert.EqualValues(t, "server.address-val", val.Str()) } val, ok = res.Attributes().Get("server.port") - assert.Equal(t, test == "all_set", ok) + assert.Equal(t, tt == "all_set", ok) if ok { assert.EqualValues(t, 11, val.Int()) } diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index 5f97ca6b40815..6b08c571e5e93 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -55,6 +55,8 @@ all_set: enabled: true mongodb.operation.time: enabled: true + mongodb.queries_per_sec: + enabled: true mongodb.session.count: enabled: true mongodb.storage.size: @@ -124,6 +126,8 @@ none_set: enabled: false mongodb.operation.time: enabled: false + mongodb.queries_per_sec: + enabled: false mongodb.session.count: enabled: false mongodb.storage.size: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 815b6ea222228..0c02dbf90c676 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -358,6 +358,14 @@ metrics: monotonic: true aggregation_temporality: cumulative attributes: [ ] + mongodb.queries_per_sec: + description: The number of queries executed per second. + unit: "{query}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta # For rate, delta temporality is appropriate + monotonic: false # Allow values to fluctuate tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 83f80ff17ca32..f0ca891fe6e0b 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -220,6 +220,10 @@ func (s *mongodbScraper) recordLatencyTime(now pcommon.Timestamp, doc bson.M, er // Admin Stats func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + // Collect operation counts first + currentCounts := make(map[string]int64) + fmt.Println("SCRAPER COUNTS @@@@@@@@@@@@@@@@@@@@: ", s.prevCounts) + for operationVal, operation := range metadata.MapAttributeOperation { metricPath := []string{"opcounters", operationVal} metricName := "mongodb.operation.count" @@ -228,8 +232,17 @@ func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, err errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, operationVal, err)) continue } + + // Record the raw count s.mb.RecordMongodbOperationCountDataPoint(now, val, operation) + + currentCounts[operationVal] = val + s.recordOperationPerSecond(now, operationVal, val) } + + // Store current counts for next iteration + s.prevCounts = currentCounts + s.prevTimestamp = now } func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { @@ -245,6 +258,25 @@ func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, } } +func (s *mongodbScraper) recordOperationPerSecond(now pcommon.Timestamp, operationVal string, currentCount int64) { + if s.prevTimestamp > 0 { + timeDelta := float64(now-s.prevTimestamp) / 1e9 + if timeDelta > 0 { + if prevCount, exists := s.prevCounts[operationVal]; exists { + delta := currentCount - prevCount + queriesPerSec := float64(delta) / timeDelta + + switch operationVal { + case "query": + s.mb.RecordMongodbQueriesPerSecDataPoint(now, queriesPerSec) + default: + fmt.Printf("Unhandled operation: %s\n", operationVal) + } + } + } + } +} + func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { storageEngine, err := dig(doc, []string{"storageEngine", "name"}) if err != nil { diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 28d8b5080ce4e..f52519bfc87d8 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -37,19 +37,23 @@ var ( ) type mongodbScraper struct { - logger *zap.Logger - config *Config - client client - mongoVersion *version.Version - mb *metadata.MetricsBuilder + logger *zap.Logger + config *Config + client client + mongoVersion *version.Version + mb *metadata.MetricsBuilder + prevTimestamp pcommon.Timestamp + prevCounts map[string]int64 } func newMongodbScraper(settings receiver.Settings, config *Config) *mongodbScraper { return &mongodbScraper{ - logger: settings.Logger, - config: config, - mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), - mongoVersion: unknownVersion(), + logger: settings.Logger, + config: config, + mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), + mongoVersion: unknownVersion(), + prevTimestamp: pcommon.Timestamp(0), + prevCounts: make(map[string]int64), } } diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml index 7e2bbd364a1d6..126bf4836318e 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml @@ -137,6 +137,15 @@ resourceMetrics: timeUnixNano: "1682363210513475000" isMonotonic: true unit: '{operations}' + - description: The total number of queries per second. + name: mongodb.queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml index 98fb6055110e9..df704e02a73e8 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml @@ -137,6 +137,15 @@ resourceMetrics: timeUnixNano: "1682363222253814000" isMonotonic: true unit: '{operations}' + - description: The total number of queries per second. + name: mongodb.queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml index 054c25999b599..18c49fb942ef0 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml @@ -137,6 +137,15 @@ resourceMetrics: timeUnixNano: "1682363210542990000" isMonotonic: true unit: '{operations}' + - description: The total number of queries per second. + name: mongodb.queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index 4a22e2a46351f..0c415f02a940e 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -148,6 +148,15 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: The total number of queries per second. + name: mongodb.queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index ab033da4869dc..4272c815efaea 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -148,6 +148,15 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: The total number of queries per second. + name: mongodb.queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' - description: The latency of operations. gauge: dataPoints: From 9d09c391e681e86dc311b1884ba5f64e6d0ea40b Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Thu, 19 Dec 2024 16:49:26 -0800 Subject: [PATCH 02/19] Added rest of operation metrics --- receiver/mongodbreceiver/documentation.md | 40 +++ .../internal/metadata/generated_config.go | 20 ++ .../metadata/generated_config_test.go | 10 + .../internal/metadata/generated_metrics.go | 285 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 80 +++++ .../internal/metadata/testdata/config.yaml | 20 ++ receiver/mongodbreceiver/metadata.yaml | 44 ++- receiver/mongodbreceiver/metrics.go | 12 +- .../testdata/integration/expected.4_0.yaml | 47 ++- .../testdata/integration/expected.4_4lpu.yaml | 47 ++- .../testdata/integration/expected.5_0.yaml | 47 ++- .../testdata/scraper/expected.yaml | 45 +++ .../testdata/scraper/partial_scrape.yaml | 47 ++- 13 files changed, 737 insertions(+), 7 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 15f242023dd0d..d4cbe0fb6fdf7 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -34,6 +34,14 @@ The number of collections. | ---- | ----------- | ---------- | ----------------------- | --------- | | {collections} | Sum | Int | Cumulative | false | +### mongodb.commands_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {command}/s | Gauge | Double | + ### mongodb.connection.count The number of connections. @@ -80,6 +88,14 @@ The number of existing databases. | ---- | ----------- | ---------- | ----------------------- | --------- | | {databases} | Sum | Int | Cumulative | false | +### mongodb.deletes_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {delete}/s | Gauge | Double | + ### mongodb.document.operation.count The number of document operations executed. @@ -102,6 +118,14 @@ The number of extents. | ---- | ----------- | ---------- | ----------------------- | --------- | | {extents} | Sum | Int | Cumulative | false | +### mongodb.getmores_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {getmore}/s | Gauge | Double | + ### mongodb.global_lock.time The time the global lock has been held. @@ -140,6 +164,14 @@ Sum of the space allocated to all indexes in the database, including free index | ---- | ----------- | ---------- | ----------------------- | --------- | | By | Sum | Int | Cumulative | false | +### mongodb.inserts_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {insert}/s | Gauge | Double | + ### mongodb.memory.usage The amount of memory used. @@ -240,6 +272,14 @@ If collection data is compressed it reflects the compressed size. | ---- | ----------- | ---------- | ----------------------- | --------- | | By | Sum | Int | Cumulative | true | +### mongodb.updates_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {update}/s | Gauge | Double | + ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index 3557cdb80cc8f..c86bf2c342200 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -30,18 +30,22 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { type MetricsConfig struct { MongodbCacheOperations MetricConfig `mapstructure:"mongodb.cache.operations"` MongodbCollectionCount MetricConfig `mapstructure:"mongodb.collection.count"` + MongodbCommandsPerSec MetricConfig `mapstructure:"mongodb.commands_per_sec"` MongodbConnectionCount MetricConfig `mapstructure:"mongodb.connection.count"` MongodbCursorCount MetricConfig `mapstructure:"mongodb.cursor.count"` MongodbCursorTimeoutCount MetricConfig `mapstructure:"mongodb.cursor.timeout.count"` MongodbDataSize MetricConfig `mapstructure:"mongodb.data.size"` MongodbDatabaseCount MetricConfig `mapstructure:"mongodb.database.count"` + MongodbDeletesPerSec MetricConfig `mapstructure:"mongodb.deletes_per_sec"` MongodbDocumentOperationCount MetricConfig `mapstructure:"mongodb.document.operation.count"` MongodbExtentCount MetricConfig `mapstructure:"mongodb.extent.count"` + MongodbGetmoresPerSec MetricConfig `mapstructure:"mongodb.getmores_per_sec"` MongodbGlobalLockTime MetricConfig `mapstructure:"mongodb.global_lock.time"` MongodbHealth MetricConfig `mapstructure:"mongodb.health"` MongodbIndexAccessCount MetricConfig `mapstructure:"mongodb.index.access.count"` MongodbIndexCount MetricConfig `mapstructure:"mongodb.index.count"` MongodbIndexSize MetricConfig `mapstructure:"mongodb.index.size"` + MongodbInsertsPerSec MetricConfig `mapstructure:"mongodb.inserts_per_sec"` MongodbLockAcquireCount MetricConfig `mapstructure:"mongodb.lock.acquire.count"` MongodbLockAcquireTime MetricConfig `mapstructure:"mongodb.lock.acquire.time"` MongodbLockAcquireWaitCount MetricConfig `mapstructure:"mongodb.lock.acquire.wait_count"` @@ -58,6 +62,7 @@ type MetricsConfig struct { MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` + MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` MongodbUptime MetricConfig `mapstructure:"mongodb.uptime"` } @@ -69,6 +74,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbCollectionCount: MetricConfig{ Enabled: true, }, + MongodbCommandsPerSec: MetricConfig{ + Enabled: true, + }, MongodbConnectionCount: MetricConfig{ Enabled: true, }, @@ -84,12 +92,18 @@ func DefaultMetricsConfig() MetricsConfig { MongodbDatabaseCount: MetricConfig{ Enabled: true, }, + MongodbDeletesPerSec: MetricConfig{ + Enabled: true, + }, MongodbDocumentOperationCount: MetricConfig{ Enabled: true, }, MongodbExtentCount: MetricConfig{ Enabled: true, }, + MongodbGetmoresPerSec: MetricConfig{ + Enabled: true, + }, MongodbGlobalLockTime: MetricConfig{ Enabled: true, }, @@ -105,6 +119,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbIndexSize: MetricConfig{ Enabled: true, }, + MongodbInsertsPerSec: MetricConfig{ + Enabled: true, + }, MongodbLockAcquireCount: MetricConfig{ Enabled: false, }, @@ -153,6 +170,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbStorageSize: MetricConfig{ Enabled: true, }, + MongodbUpdatesPerSec: MetricConfig{ + Enabled: true, + }, MongodbUptime: MetricConfig{ Enabled: false, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index dc5ff3e08ce35..866c5b94d27e6 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -27,18 +27,22 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbCacheOperations: MetricConfig{Enabled: true}, MongodbCollectionCount: MetricConfig{Enabled: true}, + MongodbCommandsPerSec: MetricConfig{Enabled: true}, MongodbConnectionCount: MetricConfig{Enabled: true}, MongodbCursorCount: MetricConfig{Enabled: true}, MongodbCursorTimeoutCount: MetricConfig{Enabled: true}, MongodbDataSize: MetricConfig{Enabled: true}, MongodbDatabaseCount: MetricConfig{Enabled: true}, + MongodbDeletesPerSec: MetricConfig{Enabled: true}, MongodbDocumentOperationCount: MetricConfig{Enabled: true}, MongodbExtentCount: MetricConfig{Enabled: true}, + MongodbGetmoresPerSec: MetricConfig{Enabled: true}, MongodbGlobalLockTime: MetricConfig{Enabled: true}, MongodbHealth: MetricConfig{Enabled: true}, MongodbIndexAccessCount: MetricConfig{Enabled: true}, MongodbIndexCount: MetricConfig{Enabled: true}, MongodbIndexSize: MetricConfig{Enabled: true}, + MongodbInsertsPerSec: MetricConfig{Enabled: true}, MongodbLockAcquireCount: MetricConfig{Enabled: true}, MongodbLockAcquireTime: MetricConfig{Enabled: true}, MongodbLockAcquireWaitCount: MetricConfig{Enabled: true}, @@ -55,6 +59,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbQueriesPerSec: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, MongodbStorageSize: MetricConfig{Enabled: true}, + MongodbUpdatesPerSec: MetricConfig{Enabled: true}, MongodbUptime: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ @@ -70,18 +75,22 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbCacheOperations: MetricConfig{Enabled: false}, MongodbCollectionCount: MetricConfig{Enabled: false}, + MongodbCommandsPerSec: MetricConfig{Enabled: false}, MongodbConnectionCount: MetricConfig{Enabled: false}, MongodbCursorCount: MetricConfig{Enabled: false}, MongodbCursorTimeoutCount: MetricConfig{Enabled: false}, MongodbDataSize: MetricConfig{Enabled: false}, MongodbDatabaseCount: MetricConfig{Enabled: false}, + MongodbDeletesPerSec: MetricConfig{Enabled: false}, MongodbDocumentOperationCount: MetricConfig{Enabled: false}, MongodbExtentCount: MetricConfig{Enabled: false}, + MongodbGetmoresPerSec: MetricConfig{Enabled: false}, MongodbGlobalLockTime: MetricConfig{Enabled: false}, MongodbHealth: MetricConfig{Enabled: false}, MongodbIndexAccessCount: MetricConfig{Enabled: false}, MongodbIndexCount: MetricConfig{Enabled: false}, MongodbIndexSize: MetricConfig{Enabled: false}, + MongodbInsertsPerSec: MetricConfig{Enabled: false}, MongodbLockAcquireCount: MetricConfig{Enabled: false}, MongodbLockAcquireTime: MetricConfig{Enabled: false}, MongodbLockAcquireWaitCount: MetricConfig{Enabled: false}, @@ -98,6 +107,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbQueriesPerSec: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, MongodbStorageSize: MetricConfig{Enabled: false}, + MongodbUpdatesPerSec: MetricConfig{Enabled: false}, MongodbUptime: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 73d5423cbeb21..f2c41e8d8330c 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -354,6 +354,55 @@ func newMetricMongodbCollectionCount(cfg MetricConfig) metricMongodbCollectionCo return m } +type metricMongodbCommandsPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.commands_per_sec metric with initial data. +func (m *metricMongodbCommandsPerSec) init() { + m.data.SetName("mongodb.commands_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{command}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbCommandsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbCommandsPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbCommandsPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbCommandsPerSec(cfg MetricConfig) metricMongodbCommandsPerSec { + m := metricMongodbCommandsPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbConnectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -611,6 +660,55 @@ func newMetricMongodbDatabaseCount(cfg MetricConfig) metricMongodbDatabaseCount return m } +type metricMongodbDeletesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.deletes_per_sec metric with initial data. +func (m *metricMongodbDeletesPerSec) init() { + m.data.SetName("mongodb.deletes_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{delete}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbDeletesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbDeletesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbDeletesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbDeletesPerSec(cfg MetricConfig) metricMongodbDeletesPerSec { + m := metricMongodbDeletesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbDocumentOperationCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -715,6 +813,55 @@ func newMetricMongodbExtentCount(cfg MetricConfig) metricMongodbExtentCount { return m } +type metricMongodbGetmoresPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.getmores_per_sec metric with initial data. +func (m *metricMongodbGetmoresPerSec) init() { + m.data.SetName("mongodb.getmores_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{getmore}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbGetmoresPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbGetmoresPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbGetmoresPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbGetmoresPerSec(cfg MetricConfig) metricMongodbGetmoresPerSec { + m := metricMongodbGetmoresPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbGlobalLockTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -970,6 +1117,55 @@ func newMetricMongodbIndexSize(cfg MetricConfig) metricMongodbIndexSize { return m } +type metricMongodbInsertsPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.inserts_per_sec metric with initial data. +func (m *metricMongodbInsertsPerSec) init() { + m.data.SetName("mongodb.inserts_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{insert}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbInsertsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbInsertsPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbInsertsPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbInsertsPerSec(cfg MetricConfig) metricMongodbInsertsPerSec { + m := metricMongodbInsertsPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbLockAcquireCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1804,6 +2000,55 @@ func newMetricMongodbStorageSize(cfg MetricConfig) metricMongodbStorageSize { return m } +type metricMongodbUpdatesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.updates_per_sec metric with initial data. +func (m *metricMongodbUpdatesPerSec) init() { + m.data.SetName("mongodb.updates_per_sec") + m.data.SetDescription("The number of queries executed per second.") + m.data.SetUnit("{update}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbUpdatesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbUpdatesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbUpdatesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbUpdatesPerSec(cfg MetricConfig) metricMongodbUpdatesPerSec { + m := metricMongodbUpdatesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbUptime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1867,18 +2112,22 @@ type MetricsBuilder struct { resourceAttributeExcludeFilter map[string]filter.Filter metricMongodbCacheOperations metricMongodbCacheOperations metricMongodbCollectionCount metricMongodbCollectionCount + metricMongodbCommandsPerSec metricMongodbCommandsPerSec metricMongodbConnectionCount metricMongodbConnectionCount metricMongodbCursorCount metricMongodbCursorCount metricMongodbCursorTimeoutCount metricMongodbCursorTimeoutCount metricMongodbDataSize metricMongodbDataSize metricMongodbDatabaseCount metricMongodbDatabaseCount + metricMongodbDeletesPerSec metricMongodbDeletesPerSec metricMongodbDocumentOperationCount metricMongodbDocumentOperationCount metricMongodbExtentCount metricMongodbExtentCount + metricMongodbGetmoresPerSec metricMongodbGetmoresPerSec metricMongodbGlobalLockTime metricMongodbGlobalLockTime metricMongodbHealth metricMongodbHealth metricMongodbIndexAccessCount metricMongodbIndexAccessCount metricMongodbIndexCount metricMongodbIndexCount metricMongodbIndexSize metricMongodbIndexSize + metricMongodbInsertsPerSec metricMongodbInsertsPerSec metricMongodbLockAcquireCount metricMongodbLockAcquireCount metricMongodbLockAcquireTime metricMongodbLockAcquireTime metricMongodbLockAcquireWaitCount metricMongodbLockAcquireWaitCount @@ -1895,6 +2144,7 @@ type MetricsBuilder struct { metricMongodbQueriesPerSec metricMongodbQueriesPerSec metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize + metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec metricMongodbUptime metricMongodbUptime } @@ -1924,18 +2174,22 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt buildInfo: settings.BuildInfo, metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), + metricMongodbCommandsPerSec: newMetricMongodbCommandsPerSec(mbc.Metrics.MongodbCommandsPerSec), metricMongodbConnectionCount: newMetricMongodbConnectionCount(mbc.Metrics.MongodbConnectionCount), metricMongodbCursorCount: newMetricMongodbCursorCount(mbc.Metrics.MongodbCursorCount), metricMongodbCursorTimeoutCount: newMetricMongodbCursorTimeoutCount(mbc.Metrics.MongodbCursorTimeoutCount), metricMongodbDataSize: newMetricMongodbDataSize(mbc.Metrics.MongodbDataSize), metricMongodbDatabaseCount: newMetricMongodbDatabaseCount(mbc.Metrics.MongodbDatabaseCount), + metricMongodbDeletesPerSec: newMetricMongodbDeletesPerSec(mbc.Metrics.MongodbDeletesPerSec), metricMongodbDocumentOperationCount: newMetricMongodbDocumentOperationCount(mbc.Metrics.MongodbDocumentOperationCount), metricMongodbExtentCount: newMetricMongodbExtentCount(mbc.Metrics.MongodbExtentCount), + metricMongodbGetmoresPerSec: newMetricMongodbGetmoresPerSec(mbc.Metrics.MongodbGetmoresPerSec), metricMongodbGlobalLockTime: newMetricMongodbGlobalLockTime(mbc.Metrics.MongodbGlobalLockTime), metricMongodbHealth: newMetricMongodbHealth(mbc.Metrics.MongodbHealth), metricMongodbIndexAccessCount: newMetricMongodbIndexAccessCount(mbc.Metrics.MongodbIndexAccessCount), metricMongodbIndexCount: newMetricMongodbIndexCount(mbc.Metrics.MongodbIndexCount), metricMongodbIndexSize: newMetricMongodbIndexSize(mbc.Metrics.MongodbIndexSize), + metricMongodbInsertsPerSec: newMetricMongodbInsertsPerSec(mbc.Metrics.MongodbInsertsPerSec), metricMongodbLockAcquireCount: newMetricMongodbLockAcquireCount(mbc.Metrics.MongodbLockAcquireCount), metricMongodbLockAcquireTime: newMetricMongodbLockAcquireTime(mbc.Metrics.MongodbLockAcquireTime), metricMongodbLockAcquireWaitCount: newMetricMongodbLockAcquireWaitCount(mbc.Metrics.MongodbLockAcquireWaitCount), @@ -1952,6 +2206,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), + metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), metricMongodbUptime: newMetricMongodbUptime(mbc.Metrics.MongodbUptime), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), @@ -2045,18 +2300,22 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbCacheOperations.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) + mb.metricMongodbCommandsPerSec.emit(ils.Metrics()) mb.metricMongodbConnectionCount.emit(ils.Metrics()) mb.metricMongodbCursorCount.emit(ils.Metrics()) mb.metricMongodbCursorTimeoutCount.emit(ils.Metrics()) mb.metricMongodbDataSize.emit(ils.Metrics()) mb.metricMongodbDatabaseCount.emit(ils.Metrics()) + mb.metricMongodbDeletesPerSec.emit(ils.Metrics()) mb.metricMongodbDocumentOperationCount.emit(ils.Metrics()) mb.metricMongodbExtentCount.emit(ils.Metrics()) + mb.metricMongodbGetmoresPerSec.emit(ils.Metrics()) mb.metricMongodbGlobalLockTime.emit(ils.Metrics()) mb.metricMongodbHealth.emit(ils.Metrics()) mb.metricMongodbIndexAccessCount.emit(ils.Metrics()) mb.metricMongodbIndexCount.emit(ils.Metrics()) mb.metricMongodbIndexSize.emit(ils.Metrics()) + mb.metricMongodbInsertsPerSec.emit(ils.Metrics()) mb.metricMongodbLockAcquireCount.emit(ils.Metrics()) mb.metricMongodbLockAcquireTime.emit(ils.Metrics()) mb.metricMongodbLockAcquireWaitCount.emit(ils.Metrics()) @@ -2073,6 +2332,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) + mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) mb.metricMongodbUptime.emit(ils.Metrics()) for _, op := range options { @@ -2115,6 +2375,11 @@ func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Times mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbCommandsPerSecDataPoint adds a data point to mongodb.commands_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbCommandsPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCommandsPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbConnectionCountDataPoint adds a data point to mongodb.connection.count metric. func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pcommon.Timestamp, val int64, connectionTypeAttributeValue AttributeConnectionType) { mb.metricMongodbConnectionCount.recordDataPoint(mb.startTime, ts, val, connectionTypeAttributeValue.String()) @@ -2140,6 +2405,11 @@ func (mb *MetricsBuilder) RecordMongodbDatabaseCountDataPoint(ts pcommon.Timesta mb.metricMongodbDatabaseCount.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbDeletesPerSecDataPoint adds a data point to mongodb.deletes_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbDeletesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbDeletesPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbDocumentOperationCountDataPoint adds a data point to mongodb.document.operation.count metric. func (mb *MetricsBuilder) RecordMongodbDocumentOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricMongodbDocumentOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) @@ -2150,6 +2420,11 @@ func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pcommon.Timestamp mb.metricMongodbExtentCount.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbGetmoresPerSecDataPoint adds a data point to mongodb.getmores_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbGetmoresPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbGetmoresPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbGlobalLockTimeDataPoint adds a data point to mongodb.global_lock.time metric. func (mb *MetricsBuilder) RecordMongodbGlobalLockTimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbGlobalLockTime.recordDataPoint(mb.startTime, ts, val) @@ -2175,6 +2450,11 @@ func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pcommon.Timestamp, mb.metricMongodbIndexSize.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbInsertsPerSecDataPoint adds a data point to mongodb.inserts_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbInsertsPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbInsertsPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbLockAcquireCountDataPoint adds a data point to mongodb.lock.acquire.count metric. func (mb *MetricsBuilder) RecordMongodbLockAcquireCountDataPoint(ts pcommon.Timestamp, val int64, lockTypeAttributeValue AttributeLockType, lockModeAttributeValue AttributeLockMode) { mb.metricMongodbLockAcquireCount.recordDataPoint(mb.startTime, ts, val, lockTypeAttributeValue.String(), lockModeAttributeValue.String()) @@ -2255,6 +2535,11 @@ func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPoint(ts pcommon.Timestamp mb.metricMongodbStorageSize.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbUpdatesPerSecDataPoint adds a data point to mongodb.updates_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbUpdatesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbUpdatesPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbUptimeDataPoint adds a data point to mongodb.uptime metric. func (mb *MetricsBuilder) RecordMongodbUptimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbUptime.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 5508e67e867b5..81b88931ae645 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -76,6 +76,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbCollectionCountDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbCommandsPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbConnectionCountDataPoint(ts, 1, AttributeConnectionTypeActive) @@ -96,6 +100,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbDatabaseCountDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbDeletesPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbDocumentOperationCountDataPoint(ts, 1, AttributeOperationInsert) @@ -104,6 +112,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbExtentCountDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbGetmoresPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbGlobalLockTimeDataPoint(ts, 1) @@ -123,6 +135,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbIndexSizeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbInsertsPerSecDataPoint(ts, 1) + allMetricsCount++ mb.RecordMongodbLockAcquireCountDataPoint(ts, 1, AttributeLockTypeParallelBatchWriteMode, AttributeLockModeShared) @@ -181,6 +197,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbStorageSizeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbUpdatesPerSecDataPoint(ts, 1) + allMetricsCount++ mb.RecordMongodbUptimeDataPoint(ts, 1) @@ -241,6 +261,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.commands_per_sec": + assert.False(t, validatedMetrics["mongodb.commands_per_sec"], "Found a duplicate in the metrics slice: mongodb.commands_per_sec") + validatedMetrics["mongodb.commands_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{command}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.connection.count": assert.False(t, validatedMetrics["mongodb.connection.count"], "Found a duplicate in the metrics slice: mongodb.connection.count") validatedMetrics["mongodb.connection.count"] = true @@ -314,6 +346,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.deletes_per_sec": + assert.False(t, validatedMetrics["mongodb.deletes_per_sec"], "Found a duplicate in the metrics slice: mongodb.deletes_per_sec") + validatedMetrics["mongodb.deletes_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{delete}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.document.operation.count": assert.False(t, validatedMetrics["mongodb.document.operation.count"], "Found a duplicate in the metrics slice: mongodb.document.operation.count") validatedMetrics["mongodb.document.operation.count"] = true @@ -345,6 +389,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.getmores_per_sec": + assert.False(t, validatedMetrics["mongodb.getmores_per_sec"], "Found a duplicate in the metrics slice: mongodb.getmores_per_sec") + validatedMetrics["mongodb.getmores_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{getmore}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.global_lock.time": assert.False(t, validatedMetrics["mongodb.global_lock.time"], "Found a duplicate in the metrics slice: mongodb.global_lock.time") validatedMetrics["mongodb.global_lock.time"] = true @@ -416,6 +472,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.inserts_per_sec": + assert.False(t, validatedMetrics["mongodb.inserts_per_sec"], "Found a duplicate in the metrics slice: mongodb.inserts_per_sec") + validatedMetrics["mongodb.inserts_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{insert}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.lock.acquire.count": assert.False(t, validatedMetrics["mongodb.lock.acquire.count"], "Found a duplicate in the metrics slice: mongodb.lock.acquire.count") validatedMetrics["mongodb.lock.acquire.count"] = true @@ -675,6 +743,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.updates_per_sec": + assert.False(t, validatedMetrics["mongodb.updates_per_sec"], "Found a duplicate in the metrics slice: mongodb.updates_per_sec") + validatedMetrics["mongodb.updates_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{update}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.uptime": assert.False(t, validatedMetrics["mongodb.uptime"], "Found a duplicate in the metrics slice: mongodb.uptime") validatedMetrics["mongodb.uptime"] = true diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index 6b08c571e5e93..ef02b2c01b698 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -5,6 +5,8 @@ all_set: enabled: true mongodb.collection.count: enabled: true + mongodb.commands_per_sec: + enabled: true mongodb.connection.count: enabled: true mongodb.cursor.count: @@ -15,10 +17,14 @@ all_set: enabled: true mongodb.database.count: enabled: true + mongodb.deletes_per_sec: + enabled: true mongodb.document.operation.count: enabled: true mongodb.extent.count: enabled: true + mongodb.getmores_per_sec: + enabled: true mongodb.global_lock.time: enabled: true mongodb.health: @@ -29,6 +35,8 @@ all_set: enabled: true mongodb.index.size: enabled: true + mongodb.inserts_per_sec: + enabled: true mongodb.lock.acquire.count: enabled: true mongodb.lock.acquire.time: @@ -61,6 +69,8 @@ all_set: enabled: true mongodb.storage.size: enabled: true + mongodb.updates_per_sec: + enabled: true mongodb.uptime: enabled: true resource_attributes: @@ -76,6 +86,8 @@ none_set: enabled: false mongodb.collection.count: enabled: false + mongodb.commands_per_sec: + enabled: false mongodb.connection.count: enabled: false mongodb.cursor.count: @@ -86,10 +98,14 @@ none_set: enabled: false mongodb.database.count: enabled: false + mongodb.deletes_per_sec: + enabled: false mongodb.document.operation.count: enabled: false mongodb.extent.count: enabled: false + mongodb.getmores_per_sec: + enabled: false mongodb.global_lock.time: enabled: false mongodb.health: @@ -100,6 +116,8 @@ none_set: enabled: false mongodb.index.size: enabled: false + mongodb.inserts_per_sec: + enabled: false mongodb.lock.acquire.count: enabled: false mongodb.lock.acquire.time: @@ -132,6 +150,8 @@ none_set: enabled: false mongodb.storage.size: enabled: false + mongodb.updates_per_sec: + enabled: false mongodb.uptime: enabled: false resource_attributes: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 0c02dbf90c676..6d8d6c2ac304a 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -364,8 +364,48 @@ metrics: enabled: true gauge: value_type: double - aggregation_temporality: delta # For rate, delta temporality is appropriate - monotonic: false # Allow values to fluctuate + aggregation_temporality: delta + monotonic: false + mongodb.inserts_per_sec: + description: The number of insertions executed per second. + unit: "{insert}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.commands_per_sec: + description: The number of commands executed per second. + unit: "{command}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.getmores_per_sec: + description: The number of getmores executed per second. + unit: "{getmore}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.deletes_per_sec: + description: The number of deletes executed per second. + unit: "{delete}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.updates_per_sec: + description: The number of updates executed per second. + unit: "{update}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index f0ca891fe6e0b..86c0cb6b2fcd3 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -233,13 +233,13 @@ func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, err continue } - // Record the raw count s.mb.RecordMongodbOperationCountDataPoint(now, val, operation) currentCounts[operationVal] = val s.recordOperationPerSecond(now, operationVal, val) } + // For telegraf metrics to get QPS for opcounters // Store current counts for next iteration s.prevCounts = currentCounts s.prevTimestamp = now @@ -269,6 +269,16 @@ func (s *mongodbScraper) recordOperationPerSecond(now pcommon.Timestamp, operati switch operationVal { case "query": s.mb.RecordMongodbQueriesPerSecDataPoint(now, queriesPerSec) + case "insert": + s.mb.RecordMongodbInsertsPerSecDataPoint(now, queriesPerSec) + case "command": + s.mb.RecordMongodbCommandsPerSecDataPoint(now, queriesPerSec) + case "getmore": + s.mb.RecordMongodbGetmoresPerSecDataPoint(now, queriesPerSec) + case "delete": + s.mb.RecordMongodbDeletesPerSecDataPoint(now, queriesPerSec) + case "update": + s.mb.RecordMongodbUpdatesPerSecDataPoint(now, queriesPerSec) default: fmt.Printf("Unhandled operation: %s\n", operationVal) } diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml index 126bf4836318e..c07177631a18c 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml @@ -145,7 +145,52 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{query}/s}' + unit: '{query}/s}' + - description: The total number of insertions per second. + name: mongodb.inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of commands per second. + name: mongodb.commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of getmores per second. + name: mongodb.getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of deletes per second. + name: mongodb.deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of updates per second. + name: mongodb.updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml index df704e02a73e8..129ab977a1d28 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml @@ -145,7 +145,52 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{query}/s}' + unit: '{query}/s}' + - description: The total number of insertions per second. + name: mongodb.inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of commands per second. + name: mongodb.commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of getmores per second. + name: mongodb.getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of deletes per second. + name: mongodb.deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of updates per second. + name: mongodb.updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml index 18c49fb942ef0..6c72066ad593b 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml @@ -145,7 +145,52 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{query}/s}' + unit: '{query}/s}' + - description: The total number of insertions per second. + name: mongodb.inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of commands per second. + name: mongodb.commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of getmores per second. + name: mongodb.getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of deletes per second. + name: mongodb.deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of updates per second. + name: mongodb.updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index 0c415f02a940e..8019d00cd8279 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -157,6 +157,51 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{query}/s}' + - description: The total number of insertions per second. + name: mongodb.inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of commands per second. + name: mongodb.commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of getmores per second. + name: mongodb.getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of deletes per second. + name: mongodb.deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of updates per second. + name: mongodb.updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index 4272c815efaea..b792d4c0c2360 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -156,7 +156,52 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{query}/s}' + unit: '{query}/s}' + - description: The total number of insertions per second. + name: mongodb.inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of commands per second. + name: mongodb.commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of getmores per second. + name: mongodb.getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of deletes per second. + name: mongodb.deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of updates per second. + name: mongodb.updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The latency of operations. gauge: dataPoints: From 802a27c9e64eec191196caa6b034a6ce17936cdb Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Fri, 27 Dec 2024 12:21:28 -0800 Subject: [PATCH 03/19] Added in replica metrics and routing logic for replica/secondary instances --- receiver/mongodbreceiver/client.go | 5 +- receiver/mongodbreceiver/client_test.go | 8 + receiver/mongodbreceiver/config.go | 24 +- receiver/mongodbreceiver/config_test.go | 4 +- receiver/mongodbreceiver/documentation.md | 86 ++++- .../internal/metadata/generated_config.go | 26 +- .../metadata/generated_config_test.go | 12 + .../internal/metadata/generated_metrics.go | 352 +++++++++++++++++- .../metadata/generated_metrics_test.go | 107 +++++- .../internal/metadata/testdata/config.yaml | 24 ++ receiver/mongodbreceiver/metadata.yaml | 50 ++- receiver/mongodbreceiver/metrics.go | 98 ++++- receiver/mongodbreceiver/scraper.go | 197 +++++++++- receiver/mongodbreceiver/scraper_test.go | 18 + .../testdata/integration/expected.4_0.yaml | 56 ++- .../testdata/integration/expected.4_4lpu.yaml | 56 ++- .../testdata/integration/expected.5_0.yaml | 56 ++- .../testdata/scraper/expected.yaml | 56 ++- .../testdata/scraper/partial_scrape.yaml | 56 ++- 19 files changed, 1233 insertions(+), 58 deletions(-) diff --git a/receiver/mongodbreceiver/client.go b/receiver/mongodbreceiver/client.go index 1cf92a5a2c792..a192d83246377 100644 --- a/receiver/mongodbreceiver/client.go +++ b/receiver/mongodbreceiver/client.go @@ -26,6 +26,7 @@ type client interface { DBStats(ctx context.Context, DBName string) (bson.M, error) TopStats(ctx context.Context) (bson.M, error) IndexStats(ctx context.Context, DBName, collectionName string) ([]bson.M, error) + RunCommand(ctx context.Context, db string, command bson.M) (bson.M, error) } // mongodbClient is a mongodb metric scraper client @@ -37,8 +38,8 @@ type mongodbClient struct { // newClient creates a new client to connect and query mongo for the // mongodbreceiver -func newClient(ctx context.Context, config *Config, logger *zap.Logger) (client, error) { - driver, err := mongo.Connect(ctx, config.ClientOptions()) +var newClient = func(ctx context.Context, config *Config, logger *zap.Logger, secondary bool) (client, error) { + driver, err := mongo.Connect(ctx, config.ClientOptions(secondary)) if err != nil { return nil, err } diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index a2808b3f13327..4bc9a7ba0d1f7 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -68,6 +68,14 @@ func (fc *fakeClient) IndexStats(ctx context.Context, dbName, collectionName str return args.Get(0).([]bson.M), args.Error(1) } +func (fc *fakeClient) RunCommand(ctx context.Context, db string, command bson.M) (bson.M, error) { + args := fc.Called(ctx, db, command) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(bson.M), args.Error(1) +} + func TestListDatabaseNames(t *testing.T) { mont := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) diff --git a/receiver/mongodbreceiver/config.go b/receiver/mongodbreceiver/config.go index 9c2c622c1b4a3..782e3bbe57b5b 100644 --- a/receiver/mongodbreceiver/config.go +++ b/receiver/mongodbreceiver/config.go @@ -11,6 +11,7 @@ import ( "time" "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtls" @@ -58,7 +59,28 @@ func (c *Config) Validate() error { return err } -func (c *Config) ClientOptions() *options.ClientOptions { +func (c *Config) ClientOptions(secondary bool) *options.ClientOptions { + if secondary { + // For secondary nodes, create a direct connection + clientOptions := options.Client(). + SetHosts(c.hostlist()). + SetDirect(true). + SetReadPreference(readpref.SecondaryPreferred()) + + if c.Timeout > 0 { + clientOptions.SetConnectTimeout(c.Timeout) + } + + if c.Username != "" && c.Password != "" { + clientOptions.SetAuth(options.Credential{ + Username: c.Username, + Password: string(c.Password), + }) + } + + return clientOptions + } + clientOptions := options.Client() connString := fmt.Sprintf("mongodb://%s", strings.Join(c.hostlist(), ",")) clientOptions.ApplyURI(connString) diff --git a/receiver/mongodbreceiver/config_test.go b/receiver/mongodbreceiver/config_test.go index 011283d5c4761..2e171926ec415 100644 --- a/receiver/mongodbreceiver/config_test.go +++ b/receiver/mongodbreceiver/config_test.go @@ -165,7 +165,7 @@ func TestOptions(t *testing.T) { ReplicaSet: "rs-1", } - clientOptions := cfg.ClientOptions() + clientOptions := cfg.ClientOptions(false) require.Equal(t, clientOptions.Auth.Username, cfg.Username) require.Equal(t, clientOptions.ConnectTimeout.Milliseconds(), @@ -191,7 +191,7 @@ func TestOptionsTLS(t *testing.T) { }, }, } - opts := cfg.ClientOptions() + opts := cfg.ClientOptions(false) require.NotNil(t, opts.TLSConfig) } diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index d4cbe0fb6fdf7..5fe2b9f5af97b 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -36,7 +36,7 @@ The number of collections. ### mongodb.commands_per_sec -The number of queries executed per second. +The number of commands executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -90,7 +90,7 @@ The number of existing databases. ### mongodb.deletes_per_sec -The number of queries executed per second. +The number of deletes executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -120,7 +120,7 @@ The number of extents. ### mongodb.getmores_per_sec -The number of queries executed per second. +The number of getmores executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -166,7 +166,7 @@ Sum of the space allocated to all indexes in the database, including free index ### mongodb.inserts_per_sec -The number of queries executed per second. +The number of insertions executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -232,6 +232,20 @@ The number of operations executed. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +### mongodb.operation.repl.count + +The number of replicated operations executed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | + ### mongodb.operation.time The total time spent performing operations. @@ -254,6 +268,54 @@ The number of queries executed per second. | ---- | ----------- | ---------- | | {query}/s | Gauge | Double | +### mongodb.repl_commands_per_sec + +The number of replicated commands executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {command}/s | Gauge | Double | + +### mongodb.repl_deletes_per_sec + +The number of replicated deletes executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {delete}/s | Gauge | Double | + +### mongodb.repl_getmores_per_sec + +The number of replicated getmores executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {getmore}/s | Gauge | Double | + +### mongodb.repl_inserts_per_sec + +The number of replicated insertions executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {insert}/s | Gauge | Double | + +### mongodb.repl_queries_per_sec + +The number of replicated queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {query}/s | Gauge | Double | + +### mongodb.repl_updates_per_sec + +The number of replicated updates executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {update}/s | Gauge | Double | + ### mongodb.session.count The total number of active sessions. @@ -274,7 +336,7 @@ If collection data is compressed it reflects the compressed size. ### mongodb.updates_per_sec -The number of queries executed per second. +The number of updates executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -374,20 +436,6 @@ The latency of operations. | ---- | ----------- | ------ | | operation | The MongoDB operation with regards to latency | Str: ``read``, ``write``, ``command`` | -### mongodb.operation.repl.count - -The number of replicated operations executed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {operations} | Sum | Int | Cumulative | true | - -#### Attributes - -| Name | Description | Values | -| ---- | ----------- | ------ | -| operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | - ### mongodb.uptime The amount of time that the server has been running. diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index c86bf2c342200..e066dad0fc6e4 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -60,6 +60,12 @@ type MetricsConfig struct { MongodbOperationReplCount MetricConfig `mapstructure:"mongodb.operation.repl.count"` MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` + MongodbReplCommandsPerSec MetricConfig `mapstructure:"mongodb.repl_commands_per_sec"` + MongodbReplDeletesPerSec MetricConfig `mapstructure:"mongodb.repl_deletes_per_sec"` + MongodbReplGetmoresPerSec MetricConfig `mapstructure:"mongodb.repl_getmores_per_sec"` + MongodbReplInsertsPerSec MetricConfig `mapstructure:"mongodb.repl_inserts_per_sec"` + MongodbReplQueriesPerSec MetricConfig `mapstructure:"mongodb.repl_queries_per_sec"` + MongodbReplUpdatesPerSec MetricConfig `mapstructure:"mongodb.repl_updates_per_sec"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` @@ -156,7 +162,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MongodbOperationReplCount: MetricConfig{ - Enabled: false, + Enabled: true, }, MongodbOperationTime: MetricConfig{ Enabled: true, @@ -164,6 +170,24 @@ func DefaultMetricsConfig() MetricsConfig { MongodbQueriesPerSec: MetricConfig{ Enabled: true, }, + MongodbReplCommandsPerSec: MetricConfig{ + Enabled: true, + }, + MongodbReplDeletesPerSec: MetricConfig{ + Enabled: true, + }, + MongodbReplGetmoresPerSec: MetricConfig{ + Enabled: true, + }, + MongodbReplInsertsPerSec: MetricConfig{ + Enabled: true, + }, + MongodbReplQueriesPerSec: MetricConfig{ + Enabled: true, + }, + MongodbReplUpdatesPerSec: MetricConfig{ + Enabled: true, + }, MongodbSessionCount: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 866c5b94d27e6..62eaa8e39a17a 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -57,6 +57,12 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationReplCount: MetricConfig{Enabled: true}, MongodbOperationTime: MetricConfig{Enabled: true}, MongodbQueriesPerSec: MetricConfig{Enabled: true}, + MongodbReplCommandsPerSec: MetricConfig{Enabled: true}, + MongodbReplDeletesPerSec: MetricConfig{Enabled: true}, + MongodbReplGetmoresPerSec: MetricConfig{Enabled: true}, + MongodbReplInsertsPerSec: MetricConfig{Enabled: true}, + MongodbReplQueriesPerSec: MetricConfig{Enabled: true}, + MongodbReplUpdatesPerSec: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, MongodbStorageSize: MetricConfig{Enabled: true}, MongodbUpdatesPerSec: MetricConfig{Enabled: true}, @@ -105,6 +111,12 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationReplCount: MetricConfig{Enabled: false}, MongodbOperationTime: MetricConfig{Enabled: false}, MongodbQueriesPerSec: MetricConfig{Enabled: false}, + MongodbReplCommandsPerSec: MetricConfig{Enabled: false}, + MongodbReplDeletesPerSec: MetricConfig{Enabled: false}, + MongodbReplGetmoresPerSec: MetricConfig{Enabled: false}, + MongodbReplInsertsPerSec: MetricConfig{Enabled: false}, + MongodbReplQueriesPerSec: MetricConfig{Enabled: false}, + MongodbReplUpdatesPerSec: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, MongodbStorageSize: MetricConfig{Enabled: false}, MongodbUpdatesPerSec: MetricConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index f2c41e8d8330c..a58a753d283c9 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -363,7 +363,7 @@ type metricMongodbCommandsPerSec struct { // init fills mongodb.commands_per_sec metric with initial data. func (m *metricMongodbCommandsPerSec) init() { m.data.SetName("mongodb.commands_per_sec") - m.data.SetDescription("The number of queries executed per second.") + m.data.SetDescription("The number of commands executed per second.") m.data.SetUnit("{command}/s") m.data.SetEmptyGauge() } @@ -669,7 +669,7 @@ type metricMongodbDeletesPerSec struct { // init fills mongodb.deletes_per_sec metric with initial data. func (m *metricMongodbDeletesPerSec) init() { m.data.SetName("mongodb.deletes_per_sec") - m.data.SetDescription("The number of queries executed per second.") + m.data.SetDescription("The number of deletes executed per second.") m.data.SetUnit("{delete}/s") m.data.SetEmptyGauge() } @@ -822,7 +822,7 @@ type metricMongodbGetmoresPerSec struct { // init fills mongodb.getmores_per_sec metric with initial data. func (m *metricMongodbGetmoresPerSec) init() { m.data.SetName("mongodb.getmores_per_sec") - m.data.SetDescription("The number of queries executed per second.") + m.data.SetDescription("The number of getmores executed per second.") m.data.SetUnit("{getmore}/s") m.data.SetEmptyGauge() } @@ -1126,7 +1126,7 @@ type metricMongodbInsertsPerSec struct { // init fills mongodb.inserts_per_sec metric with initial data. func (m *metricMongodbInsertsPerSec) init() { m.data.SetName("mongodb.inserts_per_sec") - m.data.SetDescription("The number of queries executed per second.") + m.data.SetDescription("The number of insertions executed per second.") m.data.SetUnit("{insert}/s") m.data.SetEmptyGauge() } @@ -1898,6 +1898,300 @@ func newMetricMongodbQueriesPerSec(cfg MetricConfig) metricMongodbQueriesPerSec return m } +type metricMongodbReplCommandsPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_commands_per_sec metric with initial data. +func (m *metricMongodbReplCommandsPerSec) init() { + m.data.SetName("mongodb.repl_commands_per_sec") + m.data.SetDescription("The number of replicated commands executed per second.") + m.data.SetUnit("{command}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplCommandsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplCommandsPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplCommandsPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplCommandsPerSec(cfg MetricConfig) metricMongodbReplCommandsPerSec { + m := metricMongodbReplCommandsPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbReplDeletesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_deletes_per_sec metric with initial data. +func (m *metricMongodbReplDeletesPerSec) init() { + m.data.SetName("mongodb.repl_deletes_per_sec") + m.data.SetDescription("The number of replicated deletes executed per second.") + m.data.SetUnit("{delete}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplDeletesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplDeletesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplDeletesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplDeletesPerSec(cfg MetricConfig) metricMongodbReplDeletesPerSec { + m := metricMongodbReplDeletesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbReplGetmoresPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_getmores_per_sec metric with initial data. +func (m *metricMongodbReplGetmoresPerSec) init() { + m.data.SetName("mongodb.repl_getmores_per_sec") + m.data.SetDescription("The number of replicated getmores executed per second.") + m.data.SetUnit("{getmore}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplGetmoresPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplGetmoresPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplGetmoresPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplGetmoresPerSec(cfg MetricConfig) metricMongodbReplGetmoresPerSec { + m := metricMongodbReplGetmoresPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbReplInsertsPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_inserts_per_sec metric with initial data. +func (m *metricMongodbReplInsertsPerSec) init() { + m.data.SetName("mongodb.repl_inserts_per_sec") + m.data.SetDescription("The number of replicated insertions executed per second.") + m.data.SetUnit("{insert}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplInsertsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplInsertsPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplInsertsPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplInsertsPerSec(cfg MetricConfig) metricMongodbReplInsertsPerSec { + m := metricMongodbReplInsertsPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbReplQueriesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_queries_per_sec metric with initial data. +func (m *metricMongodbReplQueriesPerSec) init() { + m.data.SetName("mongodb.repl_queries_per_sec") + m.data.SetDescription("The number of replicated queries executed per second.") + m.data.SetUnit("{query}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplQueriesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplQueriesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplQueriesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplQueriesPerSec(cfg MetricConfig) metricMongodbReplQueriesPerSec { + m := metricMongodbReplQueriesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbReplUpdatesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.repl_updates_per_sec metric with initial data. +func (m *metricMongodbReplUpdatesPerSec) init() { + m.data.SetName("mongodb.repl_updates_per_sec") + m.data.SetDescription("The number of replicated updates executed per second.") + m.data.SetUnit("{update}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbReplUpdatesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbReplUpdatesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbReplUpdatesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbReplUpdatesPerSec(cfg MetricConfig) metricMongodbReplUpdatesPerSec { + m := metricMongodbReplUpdatesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbSessionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2009,7 +2303,7 @@ type metricMongodbUpdatesPerSec struct { // init fills mongodb.updates_per_sec metric with initial data. func (m *metricMongodbUpdatesPerSec) init() { m.data.SetName("mongodb.updates_per_sec") - m.data.SetDescription("The number of queries executed per second.") + m.data.SetDescription("The number of updates executed per second.") m.data.SetUnit("{update}/s") m.data.SetEmptyGauge() } @@ -2142,6 +2436,12 @@ type MetricsBuilder struct { metricMongodbOperationReplCount metricMongodbOperationReplCount metricMongodbOperationTime metricMongodbOperationTime metricMongodbQueriesPerSec metricMongodbQueriesPerSec + metricMongodbReplCommandsPerSec metricMongodbReplCommandsPerSec + metricMongodbReplDeletesPerSec metricMongodbReplDeletesPerSec + metricMongodbReplGetmoresPerSec metricMongodbReplGetmoresPerSec + metricMongodbReplInsertsPerSec metricMongodbReplInsertsPerSec + metricMongodbReplQueriesPerSec metricMongodbReplQueriesPerSec + metricMongodbReplUpdatesPerSec metricMongodbReplUpdatesPerSec metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec @@ -2204,6 +2504,12 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationReplCount: newMetricMongodbOperationReplCount(mbc.Metrics.MongodbOperationReplCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), + metricMongodbReplCommandsPerSec: newMetricMongodbReplCommandsPerSec(mbc.Metrics.MongodbReplCommandsPerSec), + metricMongodbReplDeletesPerSec: newMetricMongodbReplDeletesPerSec(mbc.Metrics.MongodbReplDeletesPerSec), + metricMongodbReplGetmoresPerSec: newMetricMongodbReplGetmoresPerSec(mbc.Metrics.MongodbReplGetmoresPerSec), + metricMongodbReplInsertsPerSec: newMetricMongodbReplInsertsPerSec(mbc.Metrics.MongodbReplInsertsPerSec), + metricMongodbReplQueriesPerSec: newMetricMongodbReplQueriesPerSec(mbc.Metrics.MongodbReplQueriesPerSec), + metricMongodbReplUpdatesPerSec: newMetricMongodbReplUpdatesPerSec(mbc.Metrics.MongodbReplUpdatesPerSec), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), @@ -2330,6 +2636,12 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbOperationReplCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) + mb.metricMongodbReplCommandsPerSec.emit(ils.Metrics()) + mb.metricMongodbReplDeletesPerSec.emit(ils.Metrics()) + mb.metricMongodbReplGetmoresPerSec.emit(ils.Metrics()) + mb.metricMongodbReplInsertsPerSec.emit(ils.Metrics()) + mb.metricMongodbReplQueriesPerSec.emit(ils.Metrics()) + mb.metricMongodbReplUpdatesPerSec.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) @@ -2525,6 +2837,36 @@ func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timesta mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbReplCommandsPerSecDataPoint adds a data point to mongodb.repl_commands_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplCommandsPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplCommandsPerSec.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbReplDeletesPerSecDataPoint adds a data point to mongodb.repl_deletes_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplDeletesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplDeletesPerSec.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbReplGetmoresPerSecDataPoint adds a data point to mongodb.repl_getmores_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplGetmoresPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplGetmoresPerSec.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbReplInsertsPerSecDataPoint adds a data point to mongodb.repl_inserts_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplInsertsPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplInsertsPerSec.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbReplQueriesPerSecDataPoint adds a data point to mongodb.repl_queries_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplQueriesPerSec.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbReplUpdatesPerSecDataPoint adds a data point to mongodb.repl_updates_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbReplUpdatesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbReplUpdatesPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbSessionCountDataPoint adds a data point to mongodb.session.count metric. func (mb *MetricsBuilder) RecordMongodbSessionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbSessionCount.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 81b88931ae645..746ea5814298e 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -178,6 +178,7 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationLatencyTimeDataPoint(ts, 1, AttributeOperationLatencyRead) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbOperationReplCountDataPoint(ts, 1, AttributeOperationInsert) @@ -189,6 +190,30 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplCommandsPerSecDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplDeletesPerSecDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplGetmoresPerSecDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplInsertsPerSecDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplQueriesPerSecDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbReplUpdatesPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbSessionCountDataPoint(ts, 1) @@ -266,7 +291,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["mongodb.commands_per_sec"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "The number of commands executed per second.", ms.At(i).Description()) assert.Equal(t, "{command}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -351,7 +376,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["mongodb.deletes_per_sec"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "The number of deletes executed per second.", ms.At(i).Description()) assert.Equal(t, "{delete}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -394,7 +419,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["mongodb.getmores_per_sec"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "The number of getmores executed per second.", ms.At(i).Description()) assert.Equal(t, "{getmore}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -477,7 +502,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["mongodb.inserts_per_sec"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "The number of insertions executed per second.", ms.At(i).Description()) assert.Equal(t, "{insert}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -715,6 +740,78 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_commands_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_commands_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_commands_per_sec") + validatedMetrics["mongodb.repl_commands_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated commands executed per second.", ms.At(i).Description()) + assert.Equal(t, "{command}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_deletes_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_deletes_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_deletes_per_sec") + validatedMetrics["mongodb.repl_deletes_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated deletes executed per second.", ms.At(i).Description()) + assert.Equal(t, "{delete}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_getmores_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_getmores_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_getmores_per_sec") + validatedMetrics["mongodb.repl_getmores_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated getmores executed per second.", ms.At(i).Description()) + assert.Equal(t, "{getmore}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_inserts_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_inserts_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_inserts_per_sec") + validatedMetrics["mongodb.repl_inserts_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated insertions executed per second.", ms.At(i).Description()) + assert.Equal(t, "{insert}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_queries_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_queries_per_sec") + validatedMetrics["mongodb.repl_queries_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "{query}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + case "mongodb.repl_updates_per_sec": + assert.False(t, validatedMetrics["mongodb.repl_updates_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_updates_per_sec") + validatedMetrics["mongodb.repl_updates_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of replicated updates executed per second.", ms.At(i).Description()) + assert.Equal(t, "{update}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.session.count": assert.False(t, validatedMetrics["mongodb.session.count"], "Found a duplicate in the metrics slice: mongodb.session.count") validatedMetrics["mongodb.session.count"] = true @@ -748,7 +845,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["mongodb.updates_per_sec"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) + assert.Equal(t, "The number of updates executed per second.", ms.At(i).Description()) assert.Equal(t, "{update}/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index ef02b2c01b698..1eca8c5c371fe 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -65,6 +65,18 @@ all_set: enabled: true mongodb.queries_per_sec: enabled: true + mongodb.repl_commands_per_sec: + enabled: true + mongodb.repl_deletes_per_sec: + enabled: true + mongodb.repl_getmores_per_sec: + enabled: true + mongodb.repl_inserts_per_sec: + enabled: true + mongodb.repl_queries_per_sec: + enabled: true + mongodb.repl_updates_per_sec: + enabled: true mongodb.session.count: enabled: true mongodb.storage.size: @@ -146,6 +158,18 @@ none_set: enabled: false mongodb.queries_per_sec: enabled: false + mongodb.repl_commands_per_sec: + enabled: false + mongodb.repl_deletes_per_sec: + enabled: false + mongodb.repl_getmores_per_sec: + enabled: false + mongodb.repl_inserts_per_sec: + enabled: false + mongodb.repl_queries_per_sec: + enabled: false + mongodb.repl_updates_per_sec: + enabled: false mongodb.session.count: enabled: false mongodb.storage.size: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 6d8d6c2ac304a..a112ec8ed662d 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -197,7 +197,7 @@ metrics: mongodb.operation.repl.count: description: The number of replicated operations executed. unit: "{operations}" - enabled: false + enabled: true sum: aggregation_temporality: cumulative value_type: int @@ -406,6 +406,54 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false + mongodb.repl_queries_per_sec: + description: The number of replicated queries executed per second. + unit: "{query}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.repl_inserts_per_sec: + description: The number of replicated insertions executed per second. + unit: "{insert}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.repl_commands_per_sec: + description: The number of replicated commands executed per second. + unit: "{command}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.repl_getmores_per_sec: + description: The number of replicated getmores executed per second. + unit: "{getmore}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.repl_deletes_per_sec: + description: The number of replicated deletes executed per second. + unit: "{delete}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false + mongodb.repl_updates_per_sec: + description: The number of replicated updates executed per second. + unit: "{update}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 86c0cb6b2fcd3..dcabb553d9eef 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -4,6 +4,7 @@ package mongodbreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver" import ( + "context" "errors" "fmt" "reflect" @@ -12,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/receiver/scrapererror" + "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver/internal/metadata" ) @@ -220,7 +222,6 @@ func (s *mongodbScraper) recordLatencyTime(now pcommon.Timestamp, doc bson.M, er // Admin Stats func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - // Collect operation counts first currentCounts := make(map[string]int64) fmt.Println("SCRAPER COUNTS @@@@@@@@@@@@@@@@@@@@: ", s.prevCounts) @@ -246,15 +247,108 @@ func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, err } func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + var replDoc bson.M = doc // Default to primary doc + var highestInsertCount int64 = -1 + + fmt.Println("IN OPERATION REPL SECONDARY CLIENTS@@@@@@@@@@@@@ :", len(s.secondaryClients)) + + if len(s.secondaryClients) > 0 { + ctx := context.Background() + for _, secondaryClient := range s.secondaryClients { + status, err := secondaryClient.ServerStatus(ctx, "admin") + if err != nil { + s.logger.Debug("Failed to get secondary server status", zap.Error(err)) + continue + } + + // Debug full server status + s.logger.Debug("Full secondary server status", + zap.Any("host", status["host"]), + zap.Any("stateStr", status["stateStr"]), + zap.Any("ismaster", status["ismaster"]), + zap.Any("secondary", status["secondary"])) + + if opcountersRepl, ok := status["opcountersRepl"].(bson.M); ok { + s.logger.Debug("Got replication metrics", + zap.Any("raw", opcountersRepl), + zap.Any("host", status["host"])) + + if insertCount, ok := opcountersRepl["insert"].(int64); ok { + s.logger.Debug("Comparing insert counts", + zap.Int64("current_highest", highestInsertCount), + zap.Int64("this_node", insertCount), + zap.Any("host", status["host"])) + + if insertCount > highestInsertCount { + highestInsertCount = insertCount + replDoc = status + s.logger.Debug("Using these replication metrics", + zap.Int64("insert_count", insertCount), + zap.Any("host", status["host"])) + } + } + } + } + } + + // Rest of the existing recordOperationsRepl logic using replDoc + currentCounts := make(map[string]int64) + fmt.Println("IN OPERATION REPL SCRAPER PREV COUNTS@@@@@@@@@@@@@ :", s.prevReplCounts) for operationVal, operation := range metadata.MapAttributeOperation { metricPath := []string{"opcountersRepl", operationVal} metricName := "mongodb.operation.repl.count" - val, err := collectMetric(doc, metricPath) + val, err := collectMetric(replDoc, metricPath) if err != nil { + s.logger.Debug("Failed to collect metric", + zap.String("operation", operationVal), + zap.Error(err)) errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, operationVal, err)) continue } + + s.logger.Debug("Collected repl metric", + zap.String("operation", operationVal), + zap.Int64("value", val)) + s.mb.RecordMongodbOperationReplCountDataPoint(now, val, operation) + + currentCounts[operationVal] = val + s.recordReplOperationPerSecond(now, operationVal, val) + } + s.logger.Debug("Updated repl counts", + zap.Any("previous", s.prevReplCounts), + zap.Any("current", currentCounts)) + + s.prevReplCounts = currentCounts + s.prevReplTimestamp = now +} + +func (s *mongodbScraper) recordReplOperationPerSecond(now pcommon.Timestamp, operationVal string, currentCount int64) { + if s.prevReplTimestamp > 0 { + timeDelta := float64(now-s.prevReplTimestamp) / 1e9 + if timeDelta > 0 { + if prevReplCount, exists := s.prevReplCounts[operationVal]; exists { + delta := currentCount - prevReplCount + queriesPerSec := float64(delta) / timeDelta + + switch operationVal { + case "query": + s.mb.RecordMongodbReplQueriesPerSecDataPoint(now, queriesPerSec) + case "insert": + s.mb.RecordMongodbReplInsertsPerSecDataPoint(now, queriesPerSec) + case "command": + s.mb.RecordMongodbReplCommandsPerSecDataPoint(now, queriesPerSec) + case "getmore": + s.mb.RecordMongodbReplGetmoresPerSecDataPoint(now, queriesPerSec) + case "delete": + s.mb.RecordMongodbReplDeletesPerSecDataPoint(now, queriesPerSec) + case "update": + s.mb.RecordMongodbReplUpdatesPerSecDataPoint(now, queriesPerSec) + default: + fmt.Printf("Unhandled repl operation: %s\n", operationVal) + } + } + } } } diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index f52519bfc87d8..a259f75cb4129 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -13,7 +13,9 @@ import ( "github.com/hashicorp/go-version" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -37,38 +39,120 @@ var ( ) type mongodbScraper struct { - logger *zap.Logger - config *Config - client client - mongoVersion *version.Version - mb *metadata.MetricsBuilder - prevTimestamp pcommon.Timestamp - prevCounts map[string]int64 + logger *zap.Logger + config *Config + client client + secondaryClients []client + mongoVersion *version.Version + mb *metadata.MetricsBuilder + prevTimestamp pcommon.Timestamp + prevReplTimestamp pcommon.Timestamp + prevCounts map[string]int64 + prevReplCounts map[string]int64 } func newMongodbScraper(settings receiver.Settings, config *Config) *mongodbScraper { return &mongodbScraper{ - logger: settings.Logger, - config: config, - mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), - mongoVersion: unknownVersion(), - prevTimestamp: pcommon.Timestamp(0), - prevCounts: make(map[string]int64), + logger: settings.Logger, + config: config, + mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), + mongoVersion: unknownVersion(), + prevTimestamp: pcommon.Timestamp(0), + prevReplTimestamp: pcommon.Timestamp(0), + prevCounts: make(map[string]int64), + prevReplCounts: make(map[string]int64), } } func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { - c, err := newClient(ctx, s.config, s.logger) + c, err := newClient(ctx, s.config, s.logger, false) if err != nil { return fmt.Errorf("create mongo client: %w", err) } s.client = c + s.logger.Debug("Primary client connected") + + // Find and connect to secondaries + secondaries, err := s.findSecondaryHosts(ctx) + if err != nil { + s.logger.Warn("failed to find secondary hosts", zap.Error(err)) + return nil + } + + s.logger.Debug("Found secondary hosts", zap.Strings("secondaries", secondaries)) + for _, secondary := range secondaries { + secondaryConfig := *s.config // Copy primary config + secondaryConfig.Hosts = []confignet.TCPAddrConfig{ + { + Endpoint: secondary, + }, + } + + s.logger.Debug("Attempting to connect to secondary", zap.String("host", secondary)) + client, err := newClient(ctx, &secondaryConfig, s.logger, true) + if err != nil { + s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) + continue + } + s.secondaryClients = append(s.secondaryClients, client) + s.logger.Info("Successfully connected to secondary", zap.String("host", secondary)) + } + + s.logger.Debug("Connected to secondaries", zap.Int("count", len(s.secondaryClients))) return nil } +// func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { +// c, err := newClient(ctx, s.config, s.logger) +// if err != nil { +// return fmt.Errorf("create mongo client: %w", err) +// } +// s.client = c + +// // Find and connect to secondaries +// secondaries, err := s.findSecondaryHosts(ctx) +// if err != nil { +// s.logger.Warn("failed to find secondary hosts", zap.Error(err)) +// return nil +// } + +// for _, secondary := range secondaries { +// secondaryConfig := *s.config // Copy primary config +// // Convert string address to TCPAddrConfig +// secondaryConfig.Hosts = []confignet.TCPAddrConfig{ +// { +// Endpoint: secondary, +// }, +// } + +// client, err := newClient(ctx, &secondaryConfig, s.logger) +// if err != nil { +// s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) +// continue +// } +// s.secondaryClients = append(s.secondaryClients, client) +// } + +// return nil +// } + func (s *mongodbScraper) shutdown(ctx context.Context) error { + var errs []error + if s.client != nil { - return s.client.Disconnect(ctx) + if err := s.client.Disconnect(ctx); err != nil { + errs = append(errs, err) + } + } + + for _, client := range s.secondaryClients { + if err := client.Disconnect(ctx); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return fmt.Errorf("multiple disconnect errors: %v", errs) } return nil } @@ -235,3 +319,86 @@ func serverAddressAndPort(serverStatus bson.M) (string, int64, error) { return "", 0, fmt.Errorf("unexpected host format: %s", host) } } + +// func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, error) { +// s.logger.Debug("Attempting to find secondary hosts") +// result, err := s.client.RunCommand(ctx, "admin", bson.M{"replSetGetStatus": 1}) +// if err != nil { +// s.logger.Error("Failed to get replica set status", zap.Error(err)) +// return nil, fmt.Errorf("failed to get replica set status: %w", err) +// } + +// s.logger.Debug("Received replSetGetStatus response", zap.Any("result", result)) +// s.logger.Debug("LOOKING INTO MEMBERS", zap.Any("members", result["members"])) + +// var hosts []string +// if members, ok := result["members"].([]interface{}); ok { +// for _, member := range members { +// s.logger.Debug("Processing member", zap.Any("member", member)) + +// if m, ok := member.(bson.M); ok { +// state, stateOk := m["stateStr"].(string) +// host, hostOk := m["name"].(string) // Changed from "host" to "name" + +// if stateOk && hostOk { +// s.logger.Debug("Found member", +// zap.String("state", state), +// zap.String("host", host)) + +// if state == "SECONDARY" { +// s.logger.Info("Found secondary host", zap.String("host", host)) +// hosts = append(hosts, host) +// } +// } +// } +// } +// } + +// s.logger.Debug("Found secondary hosts", zap.Strings("hosts", hosts)) +// return hosts, nil +// } + +func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, error) { + result, err := s.client.RunCommand(ctx, "admin", bson.M{"replSetGetStatus": 1}) + if err != nil { + s.logger.Error("Failed to get replica set status", zap.Error(err)) + return nil, fmt.Errorf("failed to get replica set status: %w", err) + } + + members, ok := result["members"].(primitive.A) + if !ok { + return nil, fmt.Errorf("invalid members format") + } + + var hosts []string + for _, member := range members { + m, ok := member.(bson.M) + if !ok { + continue + } + + state, ok := m["stateStr"].(string) + if !ok { + continue + } + + name, ok := m["name"].(string) + if !ok { + continue + } + + // Only add actual secondaries, not arbiters or other states + if state == "SECONDARY" { + s.logger.Debug("Found secondary", + zap.String("host", name), + zap.String("state", state)) + hosts = append(hosts, name) + } + } + + if len(hosts) == 0 { + s.logger.Warn("No secondary hosts found in replica set") + } + + return hosts, nil +} diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 859a63a1aaef6..7a25f54a35378 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -40,11 +40,29 @@ func TestScraperLifecycle(t *testing.T) { f := NewFactory() cfg := f.CreateDefaultConfig().(*Config) + mc := &fakeClient{} + // Mock the replica set status command to return an empty set + mc.On("RunCommand", mock.Anything, "admin", bson.M{"replSetGetStatus": 1}).Return(bson.M{ + "ok": 1, + "members": []interface{}{}, + }, nil) + mc.On("Disconnect", mock.Anything).Return(nil) + scraper := newMongodbScraper(receivertest.NewNopSettings(), cfg) + // Save original and replace with test version + originalNewClient := newClient + newClient = func(ctx context.Context, cfg *Config, logger *zap.Logger, secondary bool) (client, error) { + return mc, nil + } + defer func() { + newClient = originalNewClient + }() + require.NoError(t, scraper.start(context.Background(), componenttest.NewNopHost())) require.NoError(t, scraper.shutdown(context.Background())) require.Less(t, time.Since(now), 200*time.Millisecond, "component start and stop should be very fast") + mc.AssertExpectations(t) } var ( diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml index c07177631a18c..2cee52c2b0139 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml @@ -190,7 +190,61 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{update}/s}' + - description: The total number replicated of queries per second. + name: mongodb.repl_queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' + - description: The total number replicated of insertions per second. + name: mongodb.repl_inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of replicated commands per second. + name: mongodb.repl_commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of replicated getmores per second. + name: mongodb.repl_getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of replicated deletes per second. + name: mongodb.repl_deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of replicated updates per second. + name: mongodb.repl_updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml index 129ab977a1d28..62d2874a5d478 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml @@ -190,7 +190,61 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{update}/s}' + - description: The total number replicated of queries per second. + name: mongodb.repl_queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' + - description: The total number replicated of insertions per second. + name: mongodb.repl_inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of replicated commands per second. + name: mongodb.repl_commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of replicated getmores per second. + name: mongodb.repl_getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of replicated deletes per second. + name: mongodb.repl_deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of replicated updates per second. + name: mongodb.repl_updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml index 6c72066ad593b..d54b720902501 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml @@ -190,7 +190,61 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{update}/s}' + - description: The total number replicated of queries per second. + name: mongodb.repl_queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' + - description: The total number replicated of insertions per second. + name: mongodb.repl_inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of replicated commands per second. + name: mongodb.repl_commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of replicated getmores per second. + name: mongodb.repl_getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of replicated deletes per second. + name: mongodb.repl_deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of replicated updates per second. + name: mongodb.repl_updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index 8019d00cd8279..8e50e374cab68 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -201,7 +201,61 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{update}/s}' + - description: The total number replicated of queries per second. + name: mongodb.repl_queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' + - description: The total number replicated of insertions per second. + name: mongodb.repl_inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of replicated commands per second. + name: mongodb.repl_commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of replicated getmores per second. + name: mongodb.repl_getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of replicated deletes per second. + name: mongodb.repl_deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of replicated updates per second. + name: mongodb.repl_updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index b792d4c0c2360..f4b4ee22c040d 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -201,7 +201,61 @@ resourceMetrics: - asInt: "100" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{update}/s}' + - description: The total number replicated of queries per second. + name: mongodb.repl_queries_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{query}/s}' + - description: The total number replicated of insertions per second. + name: mongodb.repl_inserts_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{insert}/s}' + - description: The total number of replicated commands per second. + name: mongodb.repl_commands_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{command}/s}' + - description: The total number of replicated getmores per second. + name: mongodb.repl_getmores_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{getmore}/s}' + - description: The total number of replicated deletes per second. + name: mongodb.repl_deletes_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{delete}/s}' + - description: The total number of replicated updates per second. + name: mongodb.repl_updates_per_sec + gauge: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{update}/s}' - description: The latency of operations. gauge: dataPoints: From 60313de478d16369102597a175d252404231acd4 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Tue, 7 Jan 2025 14:38:55 -0800 Subject: [PATCH 04/19] Added active read/write metrics and removed debug statements --- receiver/mongodbreceiver/documentation.md | 16 ++ .../internal/metadata/generated_config.go | 8 + .../metadata/generated_config_test.go | 4 + .../internal/metadata/generated_metrics.go | 118 ++++++++++ .../metadata/generated_metrics_test.go | 36 +++ .../internal/metadata/testdata/config.yaml | 8 + receiver/mongodbreceiver/metadata.yaml | 20 +- receiver/mongodbreceiver/metrics.go | 59 ++--- receiver/mongodbreceiver/scraper.go | 44 +--- receiver/mongodbreceiver/scraper_test.go | 7 +- receiver/mongodbreceiver/testdata/admin.json | 6 +- .../testdata/integration/expected.4_0.yaml | 110 +-------- .../testdata/integration/expected.4_4lpu.yaml | 110 +-------- .../testdata/integration/expected.5_0.yaml | 110 +-------- .../testdata/scraper/expected.yaml | 218 ++++++++++-------- .../testdata/scraper/partial_scrape.yaml | 111 +-------- 16 files changed, 375 insertions(+), 610 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 5fe2b9f5af97b..5240833844101 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -12,6 +12,22 @@ metrics: enabled: false ``` +### mongodb.active.reads + +The number of read operations currently being processed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {reads} | Sum | Int | Cumulative | false | + +### mongodb.active.writes + +The number of write operations currently being processed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {writes} | Sum | Int | Cumulative | false | + ### mongodb.cache.operations The number of cache operations of the instance. diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index e066dad0fc6e4..82e01cbf2dcd9 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -28,6 +28,8 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for mongodb metrics. type MetricsConfig struct { + MongodbActiveReads MetricConfig `mapstructure:"mongodb.active.reads"` + MongodbActiveWrites MetricConfig `mapstructure:"mongodb.active.writes"` MongodbCacheOperations MetricConfig `mapstructure:"mongodb.cache.operations"` MongodbCollectionCount MetricConfig `mapstructure:"mongodb.collection.count"` MongodbCommandsPerSec MetricConfig `mapstructure:"mongodb.commands_per_sec"` @@ -74,6 +76,12 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ + MongodbActiveReads: MetricConfig{ + Enabled: true, + }, + MongodbActiveWrites: MetricConfig{ + Enabled: true, + }, MongodbCacheOperations: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 62eaa8e39a17a..9776cad94b54c 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -25,6 +25,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ + MongodbActiveReads: MetricConfig{Enabled: true}, + MongodbActiveWrites: MetricConfig{Enabled: true}, MongodbCacheOperations: MetricConfig{Enabled: true}, MongodbCollectionCount: MetricConfig{Enabled: true}, MongodbCommandsPerSec: MetricConfig{Enabled: true}, @@ -79,6 +81,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ + MongodbActiveReads: MetricConfig{Enabled: false}, + MongodbActiveWrites: MetricConfig{Enabled: false}, MongodbCacheOperations: MetricConfig{Enabled: false}, MongodbCollectionCount: MetricConfig{Enabled: false}, MongodbCommandsPerSec: MetricConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index a58a753d283c9..880bb0aaf8a6a 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -250,6 +250,108 @@ var MapAttributeType = map[string]AttributeType{ "miss": AttributeTypeMiss, } +type metricMongodbActiveReads struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.active.reads metric with initial data. +func (m *metricMongodbActiveReads) init() { + m.data.SetName("mongodb.active.reads") + m.data.SetDescription("The number of read operations currently being processed.") + m.data.SetUnit("{reads}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbActiveReads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbActiveReads) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbActiveReads) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbActiveReads(cfg MetricConfig) metricMongodbActiveReads { + m := metricMongodbActiveReads{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbActiveWrites struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.active.writes metric with initial data. +func (m *metricMongodbActiveWrites) init() { + m.data.SetName("mongodb.active.writes") + m.data.SetDescription("The number of write operations currently being processed.") + m.data.SetUnit("{writes}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbActiveWrites) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbActiveWrites) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbActiveWrites) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbActiveWrites(cfg MetricConfig) metricMongodbActiveWrites { + m := metricMongodbActiveWrites{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbCacheOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2404,6 +2506,8 @@ type MetricsBuilder struct { buildInfo component.BuildInfo // contains version information. resourceAttributeIncludeFilter map[string]filter.Filter resourceAttributeExcludeFilter map[string]filter.Filter + metricMongodbActiveReads metricMongodbActiveReads + metricMongodbActiveWrites metricMongodbActiveWrites metricMongodbCacheOperations metricMongodbCacheOperations metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbCommandsPerSec metricMongodbCommandsPerSec @@ -2472,6 +2576,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, + metricMongodbActiveReads: newMetricMongodbActiveReads(mbc.Metrics.MongodbActiveReads), + metricMongodbActiveWrites: newMetricMongodbActiveWrites(mbc.Metrics.MongodbActiveWrites), metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), metricMongodbCommandsPerSec: newMetricMongodbCommandsPerSec(mbc.Metrics.MongodbCommandsPerSec), @@ -2604,6 +2710,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver") ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricMongodbActiveReads.emit(ils.Metrics()) + mb.metricMongodbActiveWrites.emit(ils.Metrics()) mb.metricMongodbCacheOperations.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) mb.metricMongodbCommandsPerSec.emit(ils.Metrics()) @@ -2677,6 +2785,16 @@ func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics return metrics } +// RecordMongodbActiveReadsDataPoint adds a data point to mongodb.active.reads metric. +func (mb *MetricsBuilder) RecordMongodbActiveReadsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbActiveReads.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbActiveWritesDataPoint adds a data point to mongodb.active.writes metric. +func (mb *MetricsBuilder) RecordMongodbActiveWritesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbActiveWrites.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String()) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 746ea5814298e..742a585a05a22 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -68,6 +68,14 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbActiveReadsDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbActiveWritesDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheOperationsDataPoint(ts, 1, AttributeTypeHit) @@ -255,6 +263,34 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics := make(map[string]bool) for i := 0; i < ms.Len(); i++ { switch ms.At(i).Name() { + case "mongodb.active.reads": + assert.False(t, validatedMetrics["mongodb.active.reads"], "Found a duplicate in the metrics slice: mongodb.active.reads") + validatedMetrics["mongodb.active.reads"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of read operations currently being processed.", ms.At(i).Description()) + assert.Equal(t, "{reads}", ms.At(i).Unit()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.active.writes": + assert.False(t, validatedMetrics["mongodb.active.writes"], "Found a duplicate in the metrics slice: mongodb.active.writes") + validatedMetrics["mongodb.active.writes"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of write operations currently being processed.", ms.At(i).Description()) + assert.Equal(t, "{writes}", ms.At(i).Unit()) + assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "mongodb.cache.operations": assert.False(t, validatedMetrics["mongodb.cache.operations"], "Found a duplicate in the metrics slice: mongodb.cache.operations") validatedMetrics["mongodb.cache.operations"] = true diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index 1eca8c5c371fe..e2e73672c13f9 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -1,6 +1,10 @@ default: all_set: metrics: + mongodb.active.reads: + enabled: true + mongodb.active.writes: + enabled: true mongodb.cache.operations: enabled: true mongodb.collection.count: @@ -94,6 +98,10 @@ all_set: enabled: true none_set: metrics: + mongodb.active.reads: + enabled: false + mongodb.active.writes: + enabled: false mongodb.cache.operations: enabled: false mongodb.collection.count: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index a112ec8ed662d..5741daa73ec32 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -453,7 +453,25 @@ metrics: gauge: value_type: double aggregation_temporality: delta - monotonic: false + monotonic: false + mongodb.active.writes: + description: The number of write operations currently being processed. + unit: "{writes}" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + attributes: [] + mongodb.active.reads: + description: The number of read operations currently being processed. + unit: "{reads}" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: false + attributes: [] tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index dcabb553d9eef..e1d801ff4bcd8 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -223,7 +223,6 @@ func (s *mongodbScraper) recordLatencyTime(now pcommon.Timestamp, doc bson.M, er // Admin Stats func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { currentCounts := make(map[string]int64) - fmt.Println("SCRAPER COUNTS @@@@@@@@@@@@@@@@@@@@: ", s.prevCounts) for operationVal, operation := range metadata.MapAttributeOperation { metricPath := []string{"opcounters", operationVal} @@ -247,11 +246,9 @@ func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, err } func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - var replDoc bson.M = doc // Default to primary doc + var replDoc bson.M = doc var highestInsertCount int64 = -1 - fmt.Println("IN OPERATION REPL SECONDARY CLIENTS@@@@@@@@@@@@@ :", len(s.secondaryClients)) - if len(s.secondaryClients) > 0 { ctx := context.Background() for _, secondaryClient := range s.secondaryClients { @@ -261,63 +258,31 @@ func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, continue } - // Debug full server status - s.logger.Debug("Full secondary server status", - zap.Any("host", status["host"]), - zap.Any("stateStr", status["stateStr"]), - zap.Any("ismaster", status["ismaster"]), - zap.Any("secondary", status["secondary"])) - if opcountersRepl, ok := status["opcountersRepl"].(bson.M); ok { - s.logger.Debug("Got replication metrics", - zap.Any("raw", opcountersRepl), - zap.Any("host", status["host"])) - if insertCount, ok := opcountersRepl["insert"].(int64); ok { - s.logger.Debug("Comparing insert counts", - zap.Int64("current_highest", highestInsertCount), - zap.Int64("this_node", insertCount), - zap.Any("host", status["host"])) - if insertCount > highestInsertCount { highestInsertCount = insertCount replDoc = status - s.logger.Debug("Using these replication metrics", - zap.Int64("insert_count", insertCount), - zap.Any("host", status["host"])) } } } } } - // Rest of the existing recordOperationsRepl logic using replDoc currentCounts := make(map[string]int64) - fmt.Println("IN OPERATION REPL SCRAPER PREV COUNTS@@@@@@@@@@@@@ :", s.prevReplCounts) for operationVal, operation := range metadata.MapAttributeOperation { metricPath := []string{"opcountersRepl", operationVal} metricName := "mongodb.operation.repl.count" val, err := collectMetric(replDoc, metricPath) if err != nil { - s.logger.Debug("Failed to collect metric", - zap.String("operation", operationVal), - zap.Error(err)) errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, operationVal, err)) continue } - - s.logger.Debug("Collected repl metric", - zap.String("operation", operationVal), - zap.Int64("value", val)) - s.mb.RecordMongodbOperationReplCountDataPoint(now, val, operation) currentCounts[operationVal] = val s.recordReplOperationPerSecond(now, operationVal, val) } - s.logger.Debug("Updated repl counts", - zap.Any("previous", s.prevReplCounts), - zap.Any("current", currentCounts)) s.prevReplCounts = currentCounts s.prevReplTimestamp = now @@ -381,6 +346,28 @@ func (s *mongodbScraper) recordOperationPerSecond(now pcommon.Timestamp, operati } } +func (s *mongodbScraper) recordActiveWrites(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"globalLock", "activeClients", "writers"} + metricName := "mongodb.active.writes" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbActiveWritesDataPoint(now, val) +} + +func (s *mongodbScraper) recordActiveReads(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"globalLock", "activeClients", "readers"} + metricName := "mongodb.active.reads" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbActiveReadsDataPoint(now, val) +} + func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { storageEngine, err := dig(doc, []string{"storageEngine", "name"}) if err != nil { diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index a259f75cb4129..4633d98342341 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -70,72 +70,32 @@ func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { return fmt.Errorf("create mongo client: %w", err) } s.client = c - s.logger.Debug("Primary client connected") - // Find and connect to secondaries secondaries, err := s.findSecondaryHosts(ctx) if err != nil { s.logger.Warn("failed to find secondary hosts", zap.Error(err)) return nil } - s.logger.Debug("Found secondary hosts", zap.Strings("secondaries", secondaries)) for _, secondary := range secondaries { - secondaryConfig := *s.config // Copy primary config + secondaryConfig := *s.config secondaryConfig.Hosts = []confignet.TCPAddrConfig{ { Endpoint: secondary, }, } - s.logger.Debug("Attempting to connect to secondary", zap.String("host", secondary)) client, err := newClient(ctx, &secondaryConfig, s.logger, true) if err != nil { s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) continue } s.secondaryClients = append(s.secondaryClients, client) - s.logger.Info("Successfully connected to secondary", zap.String("host", secondary)) } - s.logger.Debug("Connected to secondaries", zap.Int("count", len(s.secondaryClients))) return nil } -// func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { -// c, err := newClient(ctx, s.config, s.logger) -// if err != nil { -// return fmt.Errorf("create mongo client: %w", err) -// } -// s.client = c - -// // Find and connect to secondaries -// secondaries, err := s.findSecondaryHosts(ctx) -// if err != nil { -// s.logger.Warn("failed to find secondary hosts", zap.Error(err)) -// return nil -// } - -// for _, secondary := range secondaries { -// secondaryConfig := *s.config // Copy primary config -// // Convert string address to TCPAddrConfig -// secondaryConfig.Hosts = []confignet.TCPAddrConfig{ -// { -// Endpoint: secondary, -// }, -// } - -// client, err := newClient(ctx, &secondaryConfig, s.logger) -// if err != nil { -// s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) -// continue -// } -// s.secondaryClients = append(s.secondaryClients, client) -// } - -// return nil -// } - func (s *mongodbScraper) shutdown(ctx context.Context) error { var errs []error @@ -294,6 +254,8 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordLatencyTime(now, document, errs) s.recordUptime(now, document, errs) s.recordHealth(now, document, errs) + s.recordActiveWrites(now, document, errs) + s.recordActiveReads(now, document, errs) } func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, databaseName string, collectionName string, errs *scrapererror.ScrapeErrors) { diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 7a25f54a35378..5467014c88661 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -112,6 +112,8 @@ var ( "failed to collect metric mongodb.operation.repl.count with attribute(s) update: could not find key for metric", "failed to collect metric mongodb.health: could not find key for metric", "failed to collect metric mongodb.uptime: could not find key for metric", + "failed to collect metric mongodb.active.reads: could not find key for metric", + "failed to collect metric mongodb.active.writes: could not find key for metric", }, "; ")) errAllClientFailedFetch = errors.New( strings.Join( @@ -342,8 +344,9 @@ func TestScraperScrape(t *testing.T) { expectedMetrics := tc.expectedMetricGen(t) require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, - pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreMetricsOrder())) }) } } diff --git a/receiver/mongodbreceiver/testdata/admin.json b/receiver/mongodbreceiver/testdata/admin.json index 4d573d293c98b..64f7a6c75fc7d 100644 --- a/receiver/mongodbreceiver/testdata/admin.json +++ b/receiver/mongodbreceiver/testdata/admin.json @@ -125,13 +125,13 @@ "globalLock": { "activeClients": { "readers": { - "$numberInt": "0" + "$numberInt": "1" }, "total": { - "$numberInt": "16" + "$numberInt": "3" }, "writers": { - "$numberInt": "0" + "$numberInt": "2" } }, "currentQueue": { diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml index 2cee52c2b0139..300e60e9b6317 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml @@ -136,115 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363190502544000" timeUnixNano: "1682363210513475000" isMonotonic: true - unit: '{operations}' - - description: The total number of queries per second. - name: mongodb.queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number of insertions per second. - name: mongodb.inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of commands per second. - name: mongodb.commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of getmores per second. - name: mongodb.getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of deletes per second. - name: mongodb.deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of updates per second. - name: mongodb.updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' - - description: The total number replicated of queries per second. - name: mongodb.repl_queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number replicated of insertions per second. - name: mongodb.repl_inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of replicated commands per second. - name: mongodb.repl_commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of replicated getmores per second. - name: mongodb.repl_getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of replicated deletes per second. - name: mongodb.repl_deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of replicated updates per second. - name: mongodb.repl_updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml index 62d2874a5d478..a0e27d3c01ab0 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml @@ -136,115 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363202250964000" timeUnixNano: "1682363222253814000" isMonotonic: true - unit: '{operations}' - - description: The total number of queries per second. - name: mongodb.queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number of insertions per second. - name: mongodb.inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of commands per second. - name: mongodb.commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of getmores per second. - name: mongodb.getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of deletes per second. - name: mongodb.deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of updates per second. - name: mongodb.updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' - - description: The total number replicated of queries per second. - name: mongodb.repl_queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number replicated of insertions per second. - name: mongodb.repl_inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of replicated commands per second. - name: mongodb.repl_commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of replicated getmores per second. - name: mongodb.repl_getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of replicated deletes per second. - name: mongodb.repl_deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of replicated updates per second. - name: mongodb.repl_updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml index d54b720902501..b1695d769df08 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml @@ -136,115 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363190539043000" timeUnixNano: "1682363210542990000" isMonotonic: true - unit: '{operations}' - - description: The total number of queries per second. - name: mongodb.queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number of insertions per second. - name: mongodb.inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of commands per second. - name: mongodb.commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of getmores per second. - name: mongodb.getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of deletes per second. - name: mongodb.deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of updates per second. - name: mongodb.updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' - - description: The total number replicated of queries per second. - name: mongodb.repl_queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number replicated of insertions per second. - name: mongodb.repl_inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of replicated commands per second. - name: mongodb.repl_commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of replicated getmores per second. - name: mongodb.repl_getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of replicated deletes per second. - name: mongodb.repl_deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of replicated updates per second. - name: mongodb.repl_updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index 8e50e374cab68..f975db0998e8c 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -148,114 +148,132 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - - description: The total number of queries per second. - name: mongodb.queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number of insertions per second. - name: mongodb.inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of commands per second. - name: mongodb.commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of getmores per second. - name: mongodb.getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of deletes per second. - name: mongodb.deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of updates per second. - name: mongodb.updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' - - description: The total number replicated of queries per second. - name: mongodb.repl_queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number replicated of insertions per second. - name: mongodb.repl_inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of replicated commands per second. - name: mongodb.repl_commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of replicated getmores per second. - name: mongodb.repl_getmores_per_sec - gauge: - aggregationTemporality: 2 + # - description: The total number of queries per second. + # name: mongodb.queries_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{query}/s}' + # - description: The total number of insertions per second. + # name: mongodb.inserts_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{insert}/s}' + # - description: The total number of commands per second. + # name: mongodb.commands_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{command}/s}' + # - description: The total number of getmores per second. + # name: mongodb.getmores_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{getmore}/s}' + # - description: The total number of deletes per second. + # name: mongodb.deletes_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{delete}/s}' + # - description: The total number of updates per second. + # name: mongodb.updates_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{update}/s}' + # - description: The total number replicated of queries per second. + # name: mongodb.repl_queries_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{query}/s}' + # - description: The total number replicated of insertions per second. + # name: mongodb.repl_inserts_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{insert}/s}' + # - description: The total number of replicated commands per second. + # name: mongodb.repl_commands_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{command}/s}' + # - description: The total number of replicated getmores per second. + # name: mongodb.repl_getmores_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{getmore}/s}' + # - description: The total number of replicated deletes per second. + # name: mongodb.repl_deletes_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{delete}/s}' + # - description: The total number of replicated updates per second. + # name: mongodb.repl_updates_per_sec + # gauge: + # aggregationTemporality: 2 + # dataPoints: + # - asInt: "100" + # startTimeUnixNano: "1000000" + # timeUnixNano: "2000000" + # unit: '{update}/s}' + - name: mongodb.active.reads + description: The number of read operations currently being processed. + sum: dataPoints: - - asInt: "100" + - asInt: "1" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of replicated deletes per second. - name: mongodb.repl_deletes_per_sec - gauge: aggregationTemporality: 2 + unit: '{reads}' + - name: mongodb.active.writes + description: The number of write operations currently being processed. + sum: dataPoints: - - asInt: "100" + - asInt: "2" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of replicated updates per second. - name: mongodb.repl_updates_per_sec - gauge: aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{writes}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index f4b4ee22c040d..fd325aa3a1a0c 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -148,114 +148,25 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - - description: The total number of queries per second. - name: mongodb.queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number of insertions per second. - name: mongodb.inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of commands per second. - name: mongodb.commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of getmores per second. - name: mongodb.getmores_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of deletes per second. - name: mongodb.deletes_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of updates per second. - name: mongodb.updates_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' - - description: The total number replicated of queries per second. - name: mongodb.repl_queries_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{query}/s}' - - description: The total number replicated of insertions per second. - name: mongodb.repl_inserts_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{insert}/s}' - - description: The total number of replicated commands per second. - name: mongodb.repl_commands_per_sec - gauge: - aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{command}/s}' - - description: The total number of replicated getmores per second. - name: mongodb.repl_getmores_per_sec - gauge: - aggregationTemporality: 2 + + - name: mongodb.active.reads + description: The number of read operations currently being processed. + sum: dataPoints: - - asInt: "100" + - asInt: "1" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{getmore}/s}' - - description: The total number of replicated deletes per second. - name: mongodb.repl_deletes_per_sec - gauge: aggregationTemporality: 2 + unit: '{reads}' + - name: mongodb.active.writes + description: The number of write operations currently being processed. + sum: dataPoints: - - asInt: "100" + - asInt: "2" startTimeUnixNano: "1000000" timeUnixNano: "2000000" - unit: '{delete}/s}' - - description: The total number of replicated updates per second. - name: mongodb.repl_updates_per_sec - gauge: aggregationTemporality: 2 - dataPoints: - - asInt: "100" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - unit: '{update}/s}' + unit: '{writes}' - description: The latency of operations. gauge: dataPoints: From d1eb7d4113a71c9488cf1a828ed352c299a421ab Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 8 Jan 2025 00:41:41 -0800 Subject: [PATCH 05/19] fixed flushes per sec metric --- receiver/mongodbreceiver/documentation.md | 8 ++ .../internal/metadata/generated_config.go | 4 + .../metadata/generated_config_test.go | 2 + .../internal/metadata/generated_metrics.go | 57 +++++++++++++ .../metadata/generated_metrics_test.go | 16 ++++ .../internal/metadata/testdata/config.yaml | 4 + receiver/mongodbreceiver/metadata.yaml | 8 ++ receiver/mongodbreceiver/metrics.go | 24 ++++++ receiver/mongodbreceiver/scraper.go | 79 ++++++------------- receiver/mongodbreceiver/scraper_test.go | 1 + receiver/mongodbreceiver/testdata/admin.json | 5 ++ 11 files changed, 152 insertions(+), 56 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 5240833844101..440ad2b6ad985 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -134,6 +134,14 @@ The number of extents. | ---- | ----------- | ---------- | ----------------------- | --------- | | {extents} | Sum | Int | Cumulative | false | +### mongodb.flushes_per_sec + +The number of flushes executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {flush}/s | Gauge | Double | + ### mongodb.getmores_per_sec The number of getmores executed per second. diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index 82e01cbf2dcd9..60754566d73f9 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -41,6 +41,7 @@ type MetricsConfig struct { MongodbDeletesPerSec MetricConfig `mapstructure:"mongodb.deletes_per_sec"` MongodbDocumentOperationCount MetricConfig `mapstructure:"mongodb.document.operation.count"` MongodbExtentCount MetricConfig `mapstructure:"mongodb.extent.count"` + MongodbFlushesPerSec MetricConfig `mapstructure:"mongodb.flushes_per_sec"` MongodbGetmoresPerSec MetricConfig `mapstructure:"mongodb.getmores_per_sec"` MongodbGlobalLockTime MetricConfig `mapstructure:"mongodb.global_lock.time"` MongodbHealth MetricConfig `mapstructure:"mongodb.health"` @@ -115,6 +116,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbExtentCount: MetricConfig{ Enabled: true, }, + MongodbFlushesPerSec: MetricConfig{ + Enabled: true, + }, MongodbGetmoresPerSec: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 9776cad94b54c..d527783e5903f 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -38,6 +38,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbDeletesPerSec: MetricConfig{Enabled: true}, MongodbDocumentOperationCount: MetricConfig{Enabled: true}, MongodbExtentCount: MetricConfig{Enabled: true}, + MongodbFlushesPerSec: MetricConfig{Enabled: true}, MongodbGetmoresPerSec: MetricConfig{Enabled: true}, MongodbGlobalLockTime: MetricConfig{Enabled: true}, MongodbHealth: MetricConfig{Enabled: true}, @@ -94,6 +95,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbDeletesPerSec: MetricConfig{Enabled: false}, MongodbDocumentOperationCount: MetricConfig{Enabled: false}, MongodbExtentCount: MetricConfig{Enabled: false}, + MongodbFlushesPerSec: MetricConfig{Enabled: false}, MongodbGetmoresPerSec: MetricConfig{Enabled: false}, MongodbGlobalLockTime: MetricConfig{Enabled: false}, MongodbHealth: MetricConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 880bb0aaf8a6a..07fb05cccaa5e 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -915,6 +915,55 @@ func newMetricMongodbExtentCount(cfg MetricConfig) metricMongodbExtentCount { return m } +type metricMongodbFlushesPerSec struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.flushes_per_sec metric with initial data. +func (m *metricMongodbFlushesPerSec) init() { + m.data.SetName("mongodb.flushes_per_sec") + m.data.SetDescription("The number of flushes executed per second.") + m.data.SetUnit("{flush}/s") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbFlushesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbFlushesPerSec) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbFlushesPerSec) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbFlushesPerSec(cfg MetricConfig) metricMongodbFlushesPerSec { + m := metricMongodbFlushesPerSec{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbGetmoresPerSec struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2519,6 +2568,7 @@ type MetricsBuilder struct { metricMongodbDeletesPerSec metricMongodbDeletesPerSec metricMongodbDocumentOperationCount metricMongodbDocumentOperationCount metricMongodbExtentCount metricMongodbExtentCount + metricMongodbFlushesPerSec metricMongodbFlushesPerSec metricMongodbGetmoresPerSec metricMongodbGetmoresPerSec metricMongodbGlobalLockTime metricMongodbGlobalLockTime metricMongodbHealth metricMongodbHealth @@ -2589,6 +2639,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbDeletesPerSec: newMetricMongodbDeletesPerSec(mbc.Metrics.MongodbDeletesPerSec), metricMongodbDocumentOperationCount: newMetricMongodbDocumentOperationCount(mbc.Metrics.MongodbDocumentOperationCount), metricMongodbExtentCount: newMetricMongodbExtentCount(mbc.Metrics.MongodbExtentCount), + metricMongodbFlushesPerSec: newMetricMongodbFlushesPerSec(mbc.Metrics.MongodbFlushesPerSec), metricMongodbGetmoresPerSec: newMetricMongodbGetmoresPerSec(mbc.Metrics.MongodbGetmoresPerSec), metricMongodbGlobalLockTime: newMetricMongodbGlobalLockTime(mbc.Metrics.MongodbGlobalLockTime), metricMongodbHealth: newMetricMongodbHealth(mbc.Metrics.MongodbHealth), @@ -2723,6 +2774,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbDeletesPerSec.emit(ils.Metrics()) mb.metricMongodbDocumentOperationCount.emit(ils.Metrics()) mb.metricMongodbExtentCount.emit(ils.Metrics()) + mb.metricMongodbFlushesPerSec.emit(ils.Metrics()) mb.metricMongodbGetmoresPerSec.emit(ils.Metrics()) mb.metricMongodbGlobalLockTime.emit(ils.Metrics()) mb.metricMongodbHealth.emit(ils.Metrics()) @@ -2850,6 +2902,11 @@ func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pcommon.Timestamp mb.metricMongodbExtentCount.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbFlushesPerSecDataPoint adds a data point to mongodb.flushes_per_sec metric. +func (mb *MetricsBuilder) RecordMongodbFlushesPerSecDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbFlushesPerSec.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbGetmoresPerSecDataPoint adds a data point to mongodb.getmores_per_sec metric. func (mb *MetricsBuilder) RecordMongodbGetmoresPerSecDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbGetmoresPerSec.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 742a585a05a22..d753e63ba597b 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -120,6 +120,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbExtentCountDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbFlushesPerSecDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbGetmoresPerSecDataPoint(ts, 1) @@ -450,6 +454,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.flushes_per_sec": + assert.False(t, validatedMetrics["mongodb.flushes_per_sec"], "Found a duplicate in the metrics slice: mongodb.flushes_per_sec") + validatedMetrics["mongodb.flushes_per_sec"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of flushes executed per second.", ms.At(i).Description()) + assert.Equal(t, "{flush}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.getmores_per_sec": assert.False(t, validatedMetrics["mongodb.getmores_per_sec"], "Found a duplicate in the metrics slice: mongodb.getmores_per_sec") validatedMetrics["mongodb.getmores_per_sec"] = true diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index e2e73672c13f9..e9f47c38039ba 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -27,6 +27,8 @@ all_set: enabled: true mongodb.extent.count: enabled: true + mongodb.flushes_per_sec: + enabled: true mongodb.getmores_per_sec: enabled: true mongodb.global_lock.time: @@ -124,6 +126,8 @@ none_set: enabled: false mongodb.extent.count: enabled: false + mongodb.flushes_per_sec: + enabled: false mongodb.getmores_per_sec: enabled: false mongodb.global_lock.time: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 5741daa73ec32..c4fefdd5cbdb5 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -406,6 +406,14 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false + mongodb.flushes_per_sec: + description: The number of flushes executed per second. + unit: "{flush}/s" + enabled: true + gauge: + value_type: double + aggregation_temporality: delta + monotonic: false mongodb.repl_queries_per_sec: description: The number of replicated queries executed per second. unit: "{query}/s" diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index e1d801ff4bcd8..bcf9be33a7d9f 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -288,6 +288,30 @@ func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, s.prevReplTimestamp = now } +func (s *mongodbScraper) recordFlushesPerSecond(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"wiredTiger", "checkpoint", "total succeed number of checkpoints"} + metricName := "mongodb.flushes_per_sec" + currentFlushes, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + + if s.prevFlushTimestamp > 0 { + timeDelta := float64(now-s.prevFlushTimestamp) / 1e9 + if timeDelta > 0 { + if prevFlushCount := s.prevFlushCount; true { + delta := currentFlushes - prevFlushCount + flushesPerSec := float64(delta) / timeDelta + s.mb.RecordMongodbFlushesPerSecDataPoint(now, flushesPerSec) + } + } + } + + s.prevFlushCount = currentFlushes + s.prevFlushTimestamp = now +} + func (s *mongodbScraper) recordReplOperationPerSecond(now pcommon.Timestamp, operationVal string, currentCount int64) { if s.prevReplTimestamp > 0 { timeDelta := float64(now-s.prevReplTimestamp) / 1e9 diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 4633d98342341..270da1a67954b 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -39,28 +39,32 @@ var ( ) type mongodbScraper struct { - logger *zap.Logger - config *Config - client client - secondaryClients []client - mongoVersion *version.Version - mb *metadata.MetricsBuilder - prevTimestamp pcommon.Timestamp - prevReplTimestamp pcommon.Timestamp - prevCounts map[string]int64 - prevReplCounts map[string]int64 + logger *zap.Logger + config *Config + client client + secondaryClients []client + mongoVersion *version.Version + mb *metadata.MetricsBuilder + prevTimestamp pcommon.Timestamp + prevReplTimestamp pcommon.Timestamp + prevFlushTimestamp pcommon.Timestamp + prevCounts map[string]int64 + prevReplCounts map[string]int64 + prevFlushCount int64 } func newMongodbScraper(settings receiver.Settings, config *Config) *mongodbScraper { return &mongodbScraper{ - logger: settings.Logger, - config: config, - mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), - mongoVersion: unknownVersion(), - prevTimestamp: pcommon.Timestamp(0), - prevReplTimestamp: pcommon.Timestamp(0), - prevCounts: make(map[string]int64), - prevReplCounts: make(map[string]int64), + logger: settings.Logger, + config: config, + mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), + mongoVersion: unknownVersion(), + prevTimestamp: pcommon.Timestamp(0), + prevReplTimestamp: pcommon.Timestamp(0), + prevFlushTimestamp: pcommon.Timestamp(0), + prevCounts: make(map[string]int64), + prevReplCounts: make(map[string]int64), + prevFlushCount: 0, } } @@ -256,6 +260,7 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordHealth(now, document, errs) s.recordActiveWrites(now, document, errs) s.recordActiveReads(now, document, errs) + s.recordFlushesPerSecond(now, document, errs) } func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, databaseName string, collectionName string, errs *scrapererror.ScrapeErrors) { @@ -282,44 +287,6 @@ func serverAddressAndPort(serverStatus bson.M) (string, int64, error) { } } -// func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, error) { -// s.logger.Debug("Attempting to find secondary hosts") -// result, err := s.client.RunCommand(ctx, "admin", bson.M{"replSetGetStatus": 1}) -// if err != nil { -// s.logger.Error("Failed to get replica set status", zap.Error(err)) -// return nil, fmt.Errorf("failed to get replica set status: %w", err) -// } - -// s.logger.Debug("Received replSetGetStatus response", zap.Any("result", result)) -// s.logger.Debug("LOOKING INTO MEMBERS", zap.Any("members", result["members"])) - -// var hosts []string -// if members, ok := result["members"].([]interface{}); ok { -// for _, member := range members { -// s.logger.Debug("Processing member", zap.Any("member", member)) - -// if m, ok := member.(bson.M); ok { -// state, stateOk := m["stateStr"].(string) -// host, hostOk := m["name"].(string) // Changed from "host" to "name" - -// if stateOk && hostOk { -// s.logger.Debug("Found member", -// zap.String("state", state), -// zap.String("host", host)) - -// if state == "SECONDARY" { -// s.logger.Info("Found secondary host", zap.String("host", host)) -// hosts = append(hosts, host) -// } -// } -// } -// } -// } - -// s.logger.Debug("Found secondary hosts", zap.Strings("hosts", hosts)) -// return hosts, nil -// } - func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, error) { result, err := s.client.RunCommand(ctx, "admin", bson.M{"replSetGetStatus": 1}) if err != nil { diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 5467014c88661..32199c49e3379 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -114,6 +114,7 @@ var ( "failed to collect metric mongodb.uptime: could not find key for metric", "failed to collect metric mongodb.active.reads: could not find key for metric", "failed to collect metric mongodb.active.writes: could not find key for metric", + "failed to collect metric mongodb.flushes_per_sec: could not find key for metric", }, "; ")) errAllClientFailedFetch = errors.New( strings.Join( diff --git a/receiver/mongodbreceiver/testdata/admin.json b/receiver/mongodbreceiver/testdata/admin.json index 64f7a6c75fc7d..a51a152fe6c9d 100644 --- a/receiver/mongodbreceiver/testdata/admin.json +++ b/receiver/mongodbreceiver/testdata/admin.json @@ -398,6 +398,11 @@ "open session count": { "$numberInt": "19" } + }, + "checkpoint": { + "total succeed number of checkpoints": { + "$numberInt": "42" + } } }, "version": "4.0.25" From 9c33d36818368addbd7b181228d1666d51697488 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 8 Jan 2025 14:16:10 -0800 Subject: [PATCH 06/19] Wrapped up mongodb telegraf metrics --- .chloggen/chan-tim_mongodbMetrics.yaml | 27 ++ receiver/mongodbreceiver/documentation.md | 32 +++ .../internal/metadata/generated_config.go | 16 ++ .../metadata/generated_config_test.go | 8 + .../internal/metadata/generated_metrics.go | 246 +++++++++++++++++- .../metadata/generated_metrics_test.go | 68 ++++- .../internal/metadata/testdata/config.yaml | 16 ++ receiver/mongodbreceiver/metadata.yaml | 32 +++ receiver/mongodbreceiver/metrics.go | 54 ++++ receiver/mongodbreceiver/scraper.go | 3 + receiver/mongodbreceiver/scraper_test.go | 3 + receiver/mongodbreceiver/testdata/admin.json | 3 +- .../testdata/scraper/expected.yaml | 128 ++------- .../testdata/scraper/partial_scrape.yaml | 22 +- 14 files changed, 535 insertions(+), 123 deletions(-) create mode 100644 .chloggen/chan-tim_mongodbMetrics.yaml diff --git a/.chloggen/chan-tim_mongodbMetrics.yaml b/.chloggen/chan-tim_mongodbMetrics.yaml new file mode 100644 index 0000000000000..e9452318800b3 --- /dev/null +++ b/.chloggen/chan-tim_mongodbMetrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: mongodbreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added new mongodb metrics to acheive parity with Telegraf + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37227] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] \ No newline at end of file diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 440ad2b6ad985..c8b698cb7cfb1 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -28,6 +28,14 @@ The number of write operations currently being processed. | ---- | ----------- | ---------- | ----------------------- | --------- | | {writes} | Sum | Int | Cumulative | false | +### mongodb.cache.dirty.percent + +The percentage of WiredTiger cache that is dirty. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### mongodb.cache.operations The number of cache operations of the instance. @@ -42,6 +50,14 @@ The number of cache operations of the instance. | ---- | ----------- | ------ | | type | The result of a cache request. | Str: ``hit``, ``miss`` | +### mongodb.cache.used.percent + +The percentage of WiredTiger cache in use. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### mongodb.collection.count The number of collections. @@ -284,6 +300,14 @@ The total time spent performing operations. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +### mongodb.page_faults + +The number of page faults. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + ### mongodb.queries_per_sec The number of queries executed per second. @@ -366,6 +390,14 @@ The number of updates executed per second. | ---- | ----------- | ---------- | | {update}/s | Gauge | Double | +### mongodb.wtcache.bytes.read + +The number of bytes read into the WiredTiger cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index 60754566d73f9..b43a3dcfefa74 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -30,7 +30,9 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { type MetricsConfig struct { MongodbActiveReads MetricConfig `mapstructure:"mongodb.active.reads"` MongodbActiveWrites MetricConfig `mapstructure:"mongodb.active.writes"` + MongodbCacheDirtyPercent MetricConfig `mapstructure:"mongodb.cache.dirty.percent"` MongodbCacheOperations MetricConfig `mapstructure:"mongodb.cache.operations"` + MongodbCacheUsedPercent MetricConfig `mapstructure:"mongodb.cache.used.percent"` MongodbCollectionCount MetricConfig `mapstructure:"mongodb.collection.count"` MongodbCommandsPerSec MetricConfig `mapstructure:"mongodb.commands_per_sec"` MongodbConnectionCount MetricConfig `mapstructure:"mongodb.connection.count"` @@ -62,6 +64,7 @@ type MetricsConfig struct { MongodbOperationLatencyTime MetricConfig `mapstructure:"mongodb.operation.latency.time"` MongodbOperationReplCount MetricConfig `mapstructure:"mongodb.operation.repl.count"` MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` + MongodbPageFaults MetricConfig `mapstructure:"mongodb.page_faults"` MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` MongodbReplCommandsPerSec MetricConfig `mapstructure:"mongodb.repl_commands_per_sec"` MongodbReplDeletesPerSec MetricConfig `mapstructure:"mongodb.repl_deletes_per_sec"` @@ -73,6 +76,7 @@ type MetricsConfig struct { MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` MongodbUptime MetricConfig `mapstructure:"mongodb.uptime"` + MongodbWtcacheBytesRead MetricConfig `mapstructure:"mongodb.wtcache.bytes.read"` } func DefaultMetricsConfig() MetricsConfig { @@ -83,9 +87,15 @@ func DefaultMetricsConfig() MetricsConfig { MongodbActiveWrites: MetricConfig{ Enabled: true, }, + MongodbCacheDirtyPercent: MetricConfig{ + Enabled: true, + }, MongodbCacheOperations: MetricConfig{ Enabled: true, }, + MongodbCacheUsedPercent: MetricConfig{ + Enabled: true, + }, MongodbCollectionCount: MetricConfig{ Enabled: true, }, @@ -179,6 +189,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbOperationTime: MetricConfig{ Enabled: true, }, + MongodbPageFaults: MetricConfig{ + Enabled: true, + }, MongodbQueriesPerSec: MetricConfig{ Enabled: true, }, @@ -212,6 +225,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbUptime: MetricConfig{ Enabled: false, }, + MongodbWtcacheBytesRead: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index d527783e5903f..31ec4f99a34a2 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -27,7 +27,9 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: true}, MongodbActiveWrites: MetricConfig{Enabled: true}, + MongodbCacheDirtyPercent: MetricConfig{Enabled: true}, MongodbCacheOperations: MetricConfig{Enabled: true}, + MongodbCacheUsedPercent: MetricConfig{Enabled: true}, MongodbCollectionCount: MetricConfig{Enabled: true}, MongodbCommandsPerSec: MetricConfig{Enabled: true}, MongodbConnectionCount: MetricConfig{Enabled: true}, @@ -59,6 +61,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: true}, MongodbOperationReplCount: MetricConfig{Enabled: true}, MongodbOperationTime: MetricConfig{Enabled: true}, + MongodbPageFaults: MetricConfig{Enabled: true}, MongodbQueriesPerSec: MetricConfig{Enabled: true}, MongodbReplCommandsPerSec: MetricConfig{Enabled: true}, MongodbReplDeletesPerSec: MetricConfig{Enabled: true}, @@ -70,6 +73,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbStorageSize: MetricConfig{Enabled: true}, MongodbUpdatesPerSec: MetricConfig{Enabled: true}, MongodbUptime: MetricConfig{Enabled: true}, + MongodbWtcacheBytesRead: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ Database: ResourceAttributeConfig{Enabled: true}, @@ -84,7 +88,9 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: false}, MongodbActiveWrites: MetricConfig{Enabled: false}, + MongodbCacheDirtyPercent: MetricConfig{Enabled: false}, MongodbCacheOperations: MetricConfig{Enabled: false}, + MongodbCacheUsedPercent: MetricConfig{Enabled: false}, MongodbCollectionCount: MetricConfig{Enabled: false}, MongodbCommandsPerSec: MetricConfig{Enabled: false}, MongodbConnectionCount: MetricConfig{Enabled: false}, @@ -116,6 +122,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: false}, MongodbOperationReplCount: MetricConfig{Enabled: false}, MongodbOperationTime: MetricConfig{Enabled: false}, + MongodbPageFaults: MetricConfig{Enabled: false}, MongodbQueriesPerSec: MetricConfig{Enabled: false}, MongodbReplCommandsPerSec: MetricConfig{Enabled: false}, MongodbReplDeletesPerSec: MetricConfig{Enabled: false}, @@ -127,6 +134,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbStorageSize: MetricConfig{Enabled: false}, MongodbUpdatesPerSec: MetricConfig{Enabled: false}, MongodbUptime: MetricConfig{Enabled: false}, + MongodbWtcacheBytesRead: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ Database: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 07fb05cccaa5e..9028f5fb8eddf 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( @@ -352,6 +352,55 @@ func newMetricMongodbActiveWrites(cfg MetricConfig) metricMongodbActiveWrites { return m } +type metricMongodbCacheDirtyPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.cache.dirty.percent metric with initial data. +func (m *metricMongodbCacheDirtyPercent) init() { + m.data.SetName("mongodb.cache.dirty.percent") + m.data.SetDescription("The percentage of WiredTiger cache that is dirty.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbCacheDirtyPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbCacheDirtyPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbCacheDirtyPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbCacheDirtyPercent(cfg MetricConfig) metricMongodbCacheDirtyPercent { + m := metricMongodbCacheDirtyPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbCacheOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -405,6 +454,55 @@ func newMetricMongodbCacheOperations(cfg MetricConfig) metricMongodbCacheOperati return m } +type metricMongodbCacheUsedPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.cache.used.percent metric with initial data. +func (m *metricMongodbCacheUsedPercent) init() { + m.data.SetName("mongodb.cache.used.percent") + m.data.SetDescription("The percentage of WiredTiger cache in use.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbCacheUsedPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbCacheUsedPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbCacheUsedPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbCacheUsedPercent(cfg MetricConfig) metricMongodbCacheUsedPercent { + m := metricMongodbCacheUsedPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbCollectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2000,6 +2098,57 @@ func newMetricMongodbOperationTime(cfg MetricConfig) metricMongodbOperationTime return m } +type metricMongodbPageFaults struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.page_faults metric with initial data. +func (m *metricMongodbPageFaults) init() { + m.data.SetName("mongodb.page_faults") + m.data.SetDescription("The number of page faults.") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbPageFaults) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbPageFaults) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbPageFaults(cfg MetricConfig) metricMongodbPageFaults { + m := metricMongodbPageFaults{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbQueriesPerSec struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2545,6 +2694,57 @@ func newMetricMongodbUptime(cfg MetricConfig) metricMongodbUptime { return m } +type metricMongodbWtcacheBytesRead struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.wtcache.bytes.read metric with initial data. +func (m *metricMongodbWtcacheBytesRead) init() { + m.data.SetName("mongodb.wtcache.bytes.read") + m.data.SetDescription("The number of bytes read into the WiredTiger cache.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbWtcacheBytesRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbWtcacheBytesRead) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbWtcacheBytesRead) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbWtcacheBytesRead(cfg MetricConfig) metricMongodbWtcacheBytesRead { + m := metricMongodbWtcacheBytesRead{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { @@ -2557,7 +2757,9 @@ type MetricsBuilder struct { resourceAttributeExcludeFilter map[string]filter.Filter metricMongodbActiveReads metricMongodbActiveReads metricMongodbActiveWrites metricMongodbActiveWrites + metricMongodbCacheDirtyPercent metricMongodbCacheDirtyPercent metricMongodbCacheOperations metricMongodbCacheOperations + metricMongodbCacheUsedPercent metricMongodbCacheUsedPercent metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbCommandsPerSec metricMongodbCommandsPerSec metricMongodbConnectionCount metricMongodbConnectionCount @@ -2589,6 +2791,7 @@ type MetricsBuilder struct { metricMongodbOperationLatencyTime metricMongodbOperationLatencyTime metricMongodbOperationReplCount metricMongodbOperationReplCount metricMongodbOperationTime metricMongodbOperationTime + metricMongodbPageFaults metricMongodbPageFaults metricMongodbQueriesPerSec metricMongodbQueriesPerSec metricMongodbReplCommandsPerSec metricMongodbReplCommandsPerSec metricMongodbReplDeletesPerSec metricMongodbReplDeletesPerSec @@ -2600,6 +2803,7 @@ type MetricsBuilder struct { metricMongodbStorageSize metricMongodbStorageSize metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec metricMongodbUptime metricMongodbUptime + metricMongodbWtcacheBytesRead metricMongodbWtcacheBytesRead } // MetricBuilderOption applies changes to default metrics builder. @@ -2628,7 +2832,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt buildInfo: settings.BuildInfo, metricMongodbActiveReads: newMetricMongodbActiveReads(mbc.Metrics.MongodbActiveReads), metricMongodbActiveWrites: newMetricMongodbActiveWrites(mbc.Metrics.MongodbActiveWrites), + metricMongodbCacheDirtyPercent: newMetricMongodbCacheDirtyPercent(mbc.Metrics.MongodbCacheDirtyPercent), metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), + metricMongodbCacheUsedPercent: newMetricMongodbCacheUsedPercent(mbc.Metrics.MongodbCacheUsedPercent), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), metricMongodbCommandsPerSec: newMetricMongodbCommandsPerSec(mbc.Metrics.MongodbCommandsPerSec), metricMongodbConnectionCount: newMetricMongodbConnectionCount(mbc.Metrics.MongodbConnectionCount), @@ -2660,6 +2866,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationLatencyTime: newMetricMongodbOperationLatencyTime(mbc.Metrics.MongodbOperationLatencyTime), metricMongodbOperationReplCount: newMetricMongodbOperationReplCount(mbc.Metrics.MongodbOperationReplCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), + metricMongodbPageFaults: newMetricMongodbPageFaults(mbc.Metrics.MongodbPageFaults), metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), metricMongodbReplCommandsPerSec: newMetricMongodbReplCommandsPerSec(mbc.Metrics.MongodbReplCommandsPerSec), metricMongodbReplDeletesPerSec: newMetricMongodbReplDeletesPerSec(mbc.Metrics.MongodbReplDeletesPerSec), @@ -2671,6 +2878,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), metricMongodbUptime: newMetricMongodbUptime(mbc.Metrics.MongodbUptime), + metricMongodbWtcacheBytesRead: newMetricMongodbWtcacheBytesRead(mbc.Metrics.MongodbWtcacheBytesRead), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } @@ -2763,7 +2971,9 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbActiveReads.emit(ils.Metrics()) mb.metricMongodbActiveWrites.emit(ils.Metrics()) + mb.metricMongodbCacheDirtyPercent.emit(ils.Metrics()) mb.metricMongodbCacheOperations.emit(ils.Metrics()) + mb.metricMongodbCacheUsedPercent.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) mb.metricMongodbCommandsPerSec.emit(ils.Metrics()) mb.metricMongodbConnectionCount.emit(ils.Metrics()) @@ -2795,6 +3005,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbOperationLatencyTime.emit(ils.Metrics()) mb.metricMongodbOperationReplCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) + mb.metricMongodbPageFaults.emit(ils.Metrics()) mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) mb.metricMongodbReplCommandsPerSec.emit(ils.Metrics()) mb.metricMongodbReplDeletesPerSec.emit(ils.Metrics()) @@ -2806,6 +3017,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbStorageSize.emit(ils.Metrics()) mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) mb.metricMongodbUptime.emit(ils.Metrics()) + mb.metricMongodbWtcacheBytesRead.emit(ils.Metrics()) for _, op := range options { op.apply(rm) @@ -2847,11 +3059,21 @@ func (mb *MetricsBuilder) RecordMongodbActiveWritesDataPoint(ts pcommon.Timestam mb.metricMongodbActiveWrites.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbCacheDirtyPercentDataPoint adds a data point to mongodb.cache.dirty.percent metric. +func (mb *MetricsBuilder) RecordMongodbCacheDirtyPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCacheDirtyPercent.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String()) } +// RecordMongodbCacheUsedPercentDataPoint adds a data point to mongodb.cache.used.percent metric. +func (mb *MetricsBuilder) RecordMongodbCacheUsedPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCacheUsedPercent.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric. func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val) @@ -3007,6 +3229,11 @@ func (mb *MetricsBuilder) RecordMongodbOperationTimeDataPoint(ts pcommon.Timesta mb.metricMongodbOperationTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } +// RecordMongodbPageFaultsDataPoint adds a data point to mongodb.page_faults metric. +func (mb *MetricsBuilder) RecordMongodbPageFaultsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbPageFaults.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbQueriesPerSecDataPoint adds a data point to mongodb.queries_per_sec metric. func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) @@ -3062,6 +3289,11 @@ func (mb *MetricsBuilder) RecordMongodbUptimeDataPoint(ts pcommon.Timestamp, val mb.metricMongodbUptime.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbWtcacheBytesReadDataPoint adds a data point to mongodb.wtcache.bytes.read metric. +func (mb *MetricsBuilder) RecordMongodbWtcacheBytesReadDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbWtcacheBytesRead.recordDataPoint(mb.startTime, ts, val) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index d753e63ba597b..d95f21181e905 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -76,10 +76,18 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbActiveWritesDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbCacheDirtyPercentDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheOperationsDataPoint(ts, 1, AttributeTypeHit) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbCacheUsedPercentDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCollectionCountDataPoint(ts, 1) @@ -198,6 +206,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationTimeDataPoint(ts, 1, AttributeOperationInsert) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbPageFaultsDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) @@ -241,6 +253,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbUptimeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbWtcacheBytesReadDataPoint(ts, 1) + rb := mb.NewResourceBuilder() rb.SetDatabase("database-val") rb.SetServerAddress("server.address-val") @@ -295,6 +311,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.cache.dirty.percent": + assert.False(t, validatedMetrics["mongodb.cache.dirty.percent"], "Found a duplicate in the metrics slice: mongodb.cache.dirty.percent") + validatedMetrics["mongodb.cache.dirty.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The percentage of WiredTiger cache that is dirty.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.cache.operations": assert.False(t, validatedMetrics["mongodb.cache.operations"], "Found a duplicate in the metrics slice: mongodb.cache.operations") validatedMetrics["mongodb.cache.operations"] = true @@ -312,6 +340,18 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("type") assert.True(t, ok) assert.EqualValues(t, "hit", attrVal.Str()) + case "mongodb.cache.used.percent": + assert.False(t, validatedMetrics["mongodb.cache.used.percent"], "Found a duplicate in the metrics slice: mongodb.cache.used.percent") + validatedMetrics["mongodb.cache.used.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The percentage of WiredTiger cache in use.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.collection.count": assert.False(t, validatedMetrics["mongodb.collection.count"], "Found a duplicate in the metrics slice: mongodb.collection.count") validatedMetrics["mongodb.collection.count"] = true @@ -780,6 +820,20 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "insert", attrVal.Str()) + case "mongodb.page_faults": + assert.False(t, validatedMetrics["mongodb.page_faults"], "Found a duplicate in the metrics slice: mongodb.page_faults") + validatedMetrics["mongodb.page_faults"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of page faults.", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "mongodb.queries_per_sec": assert.False(t, validatedMetrics["mongodb.queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.queries_per_sec") validatedMetrics["mongodb.queries_per_sec"] = true @@ -912,12 +966,14 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "The amount of time that the server has been running.", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) assert.True(t, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.wtcache.bytes.read": + assert.False(t, validatedMetrics["mongodb.wtcache.bytes.read"], "Found a duplicate in the metrics slice: mongodb.wtcache.bytes.read") + validatedMetrics["mongodb.wtcache.bytes.read"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of bytes read into the WiredTiger cache.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) } } }) diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index e9f47c38039ba..75316c9a30e22 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -5,8 +5,12 @@ all_set: enabled: true mongodb.active.writes: enabled: true + mongodb.cache.dirty.percent: + enabled: true mongodb.cache.operations: enabled: true + mongodb.cache.used.percent: + enabled: true mongodb.collection.count: enabled: true mongodb.commands_per_sec: @@ -69,6 +73,8 @@ all_set: enabled: true mongodb.operation.time: enabled: true + mongodb.page_faults: + enabled: true mongodb.queries_per_sec: enabled: true mongodb.repl_commands_per_sec: @@ -91,6 +97,8 @@ all_set: enabled: true mongodb.uptime: enabled: true + mongodb.wtcache.bytes.read: + enabled: true resource_attributes: database: enabled: true @@ -104,8 +112,12 @@ none_set: enabled: false mongodb.active.writes: enabled: false + mongodb.cache.dirty.percent: + enabled: false mongodb.cache.operations: enabled: false + mongodb.cache.used.percent: + enabled: false mongodb.collection.count: enabled: false mongodb.commands_per_sec: @@ -168,6 +180,8 @@ none_set: enabled: false mongodb.operation.time: enabled: false + mongodb.page_faults: + enabled: false mongodb.queries_per_sec: enabled: false mongodb.repl_commands_per_sec: @@ -190,6 +204,8 @@ none_set: enabled: false mongodb.uptime: enabled: false + mongodb.wtcache.bytes.read: + enabled: false resource_attributes: database: enabled: false diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index c4fefdd5cbdb5..0e6b8626bf246 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -480,6 +480,38 @@ metrics: aggregation_temporality: cumulative monotonic: false attributes: [] + mongodb.wtcache.bytes.read: + description: The number of bytes read into the WiredTiger cache. + unit: "By" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + attributes: [] + mongodb.cache.dirty.percent: + description: The percentage of WiredTiger cache that is dirty. + unit: "1" + enabled: true + gauge: + value_type: double + attributes: [] + mongodb.cache.used.percent: + description: The percentage of WiredTiger cache in use. + unit: "1" + enabled: true + gauge: + value_type: double + attributes: [] + mongodb.page_faults: + description: The number of page faults. + unit: "{faults}" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + attributes: [] tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index bcf9be33a7d9f..5a0b0a16afba3 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -392,6 +392,57 @@ func (s *mongodbScraper) recordActiveReads(now pcommon.Timestamp, doc bson.M, er s.mb.RecordMongodbActiveReadsDataPoint(now, val) } +func (s *mongodbScraper) recordWTCacheBytes(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"wiredTiger", "cache", "bytes read into cache"} + metricName := "mongodb.wtcache.bytes.read" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbWtcacheBytesReadDataPoint(now, val) +} + +func (s *mongodbScraper) recordCachePercentages(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + wt, ok := doc["wiredTiger"].(bson.M) + if !ok { + errs.AddPartial(2, errors.New("failed to find wiredTiger metrics")) + return + } + + cache, ok := wt["cache"].(bson.M) + if !ok { + errs.AddPartial(2, errors.New("failed to find cache metrics")) + return + } + + // Calculate dirty percentage + trackedDirtyBytes, err1 := collectMetric(cache, []string{"tracked dirty bytes in the cache"}) + maxBytes, err2 := collectMetric(cache, []string{"maximum bytes configured"}) + if err1 == nil && err2 == nil && maxBytes > 0 { + dirtyPercent := float64(trackedDirtyBytes) / float64(maxBytes) * 100 + s.mb.RecordMongodbCacheDirtyPercentDataPoint(now, dirtyPercent) + } + + // Calculate used percentage + bytesInUse, err3 := collectMetric(cache, []string{"bytes currently in the cache"}) + if err3 == nil && maxBytes > 0 { + usedPercent := float64(bytesInUse) / float64(maxBytes) * 100 + s.mb.RecordMongodbCacheUsedPercentDataPoint(now, usedPercent) + } +} + +func (s *mongodbScraper) recordPageFaults(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"extra_info", "page_faults"} + metricName := "mongodb.page_faults" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbPageFaultsDataPoint(now, val) +} + func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { storageEngine, err := dig(doc, []string{"storageEngine", "name"}) if err != nil { @@ -710,6 +761,9 @@ func collectMetric(document bson.M, path []string) (int64, error) { } func dig(document bson.M, path []string) (any, error) { + if len(path) == 0 { + return nil, errKeyNotFound + } curItem, remainingPath := path[0], path[1:] value := document[curItem] if value == nil { diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 270da1a67954b..465b39f7d535b 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -261,6 +261,9 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordActiveWrites(now, document, errs) s.recordActiveReads(now, document, errs) s.recordFlushesPerSecond(now, document, errs) + s.recordWTCacheBytes(now, document, errs) + s.recordCachePercentages(now, document, errs) + s.recordPageFaults(now, document, errs) } func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, databaseName string, collectionName string, errs *scrapererror.ScrapeErrors) { diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 32199c49e3379..d8441c70489f8 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -115,6 +115,9 @@ var ( "failed to collect metric mongodb.active.reads: could not find key for metric", "failed to collect metric mongodb.active.writes: could not find key for metric", "failed to collect metric mongodb.flushes_per_sec: could not find key for metric", + "failed to collect metric mongodb.page_faults: could not find key for metric", + "failed to collect metric mongodb.wtcache.bytes.read: could not find key for metric", + "failed to find wiredTiger metrics", }, "; ")) errAllClientFailedFetch = errors.New( strings.Join( diff --git a/receiver/mongodbreceiver/testdata/admin.json b/receiver/mongodbreceiver/testdata/admin.json index a51a152fe6c9d..872b3d0cced71 100644 --- a/receiver/mongodbreceiver/testdata/admin.json +++ b/receiver/mongodbreceiver/testdata/admin.json @@ -392,7 +392,8 @@ "wiredTiger": { "cache": { "pages read into cache": 14, - "pages requested from the cache": 215 + "pages requested from the cache": 215, + "bytes read into cache": 10 }, "session": { "open session count": { diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index f975db0998e8c..d44a95bd54794 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -148,114 +148,6 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - # - description: The total number of queries per second. - # name: mongodb.queries_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{query}/s}' - # - description: The total number of insertions per second. - # name: mongodb.inserts_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{insert}/s}' - # - description: The total number of commands per second. - # name: mongodb.commands_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{command}/s}' - # - description: The total number of getmores per second. - # name: mongodb.getmores_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{getmore}/s}' - # - description: The total number of deletes per second. - # name: mongodb.deletes_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{delete}/s}' - # - description: The total number of updates per second. - # name: mongodb.updates_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{update}/s}' - # - description: The total number replicated of queries per second. - # name: mongodb.repl_queries_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{query}/s}' - # - description: The total number replicated of insertions per second. - # name: mongodb.repl_inserts_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{insert}/s}' - # - description: The total number of replicated commands per second. - # name: mongodb.repl_commands_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{command}/s}' - # - description: The total number of replicated getmores per second. - # name: mongodb.repl_getmores_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{getmore}/s}' - # - description: The total number of replicated deletes per second. - # name: mongodb.repl_deletes_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{delete}/s}' - # - description: The total number of replicated updates per second. - # name: mongodb.repl_updates_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{update}/s}' - name: mongodb.active.reads description: The number of read operations currently being processed. sum: @@ -274,6 +166,26 @@ resourceMetrics: timeUnixNano: "2000000" aggregationTemporality: 2 unit: '{writes}' + - description: The number of bytes read into the WiredTiger cache. + name: mongodb.wtcache.bytes.read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" # Value from test fixture + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: 'By' + - description: The number of page faults. + name: mongodb.page_faults + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" # Value from test fixture (extra_info.page_faults) + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index fd325aa3a1a0c..879caace423a8 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -166,7 +166,27 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" aggregationTemporality: 2 - unit: '{writes}' + unit: '{writes}' + - description: The number of bytes read into the WiredTiger cache. + name: mongodb.wtcache.bytes.read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" # Value from test fixture + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: 'By' + - description: The number of page faults. + name: mongodb.page_faults + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" # Value from test fixture (extra_info.page_faults) + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' - description: The latency of operations. gauge: dataPoints: From 60368d103f2760fdc190b855574eabf348c9aefc Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 15 Jan 2025 15:23:33 -0800 Subject: [PATCH 07/19] fixed build error --- receiver/mongodbreceiver/metrics.go | 2 +- receiver/mongodbreceiver/scraper.go | 4 ++-- receiver/mongodbreceiver/scraper_test.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 5a0b0a16afba3..abe8c0df13704 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-version" "go.mongodb.org/mongo-driver/bson" "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/receiver/scrapererror" + "go.opentelemetry.io/collector/scraper/scrapererror" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver/internal/metadata" diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index cb69ef34c4304..4b2920844324f 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -89,12 +89,12 @@ func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { }, } - client, err := newClient(ctx, &secondaryConfig, s.logger, true) + secondaryClient, err := newClient(ctx, &secondaryConfig, s.logger, true) if err != nil { s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) continue } - s.secondaryClients = append(s.secondaryClients, client) + s.secondaryClients = append(s.secondaryClients, secondaryClient) } return nil diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 9f51eeb642935..43406898d76c8 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -44,14 +44,14 @@ func TestScraperLifecycle(t *testing.T) { // Mock the replica set status command to return an empty set mc.On("RunCommand", mock.Anything, "admin", bson.M{"replSetGetStatus": 1}).Return(bson.M{ "ok": 1, - "members": []interface{}{}, + "members": []any{}, }, nil) mc.On("Disconnect", mock.Anything).Return(nil) scraper := newMongodbScraper(receivertest.NewNopSettings(), cfg) // Save original and replace with test version originalNewClient := newClient - newClient = func(ctx context.Context, cfg *Config, logger *zap.Logger, secondary bool) (client, error) { + newClient = func(_ context.Context, _ *Config, _ *zap.Logger, _ bool) (client, error) { return mc, nil } defer func() { From d558ff75e8ffebaa2aac390250295c78f5e1c947 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 15 Jan 2025 15:43:12 -0800 Subject: [PATCH 08/19] fixed make generate error --- receiver/mongodbreceiver/generated_package_test.go | 3 ++- .../internal/metadata/generated_metrics_test.go | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 080891042403b..17e9f23be856d 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,8 +3,9 @@ package mongodbreceiver import ( - "go.uber.org/goleak" "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index de1636da2d07a..96cb53ec7aae2 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -980,7 +980,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "The number of bytes read into the WiredTiger cache.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) assert.True(t, ms.At(i).Sum().IsMonotonic()) - } + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + } } }) } From cbe7cfdd9b917191b2c841e6af5ce6eb5c888e83 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Thu, 16 Jan 2025 12:33:47 -0800 Subject: [PATCH 09/19] disable metrics, users can enable if they want to see the new metrics --- receiver/mongodbreceiver/documentation.md | 298 +++++++++--------- .../mongodbreceiver/generated_package_test.go | 3 +- .../internal/metadata/generated_config.go | 40 +-- .../metadata/generated_config_test.go | 10 +- .../internal/metadata/generated_metrics.go | 14 +- .../metadata/generated_metrics_test.go | 20 -- receiver/mongodbreceiver/metadata.yaml | 40 +-- receiver/mongodbreceiver/scraper_test.go | 23 +- .../testdata/scraper/expected.yaml | 38 --- .../testdata/scraper/partial_scrape.yaml | 41 +-- 10 files changed, 206 insertions(+), 321 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index c8b698cb7cfb1..0def02eb9305b 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -12,30 +12,6 @@ metrics: enabled: false ``` -### mongodb.active.reads - -The number of read operations currently being processed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {reads} | Sum | Int | Cumulative | false | - -### mongodb.active.writes - -The number of write operations currently being processed. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {writes} | Sum | Int | Cumulative | false | - -### mongodb.cache.dirty.percent - -The percentage of WiredTiger cache that is dirty. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - ### mongodb.cache.operations The number of cache operations of the instance. @@ -50,14 +26,6 @@ The number of cache operations of the instance. | ---- | ----------- | ------ | | type | The result of a cache request. | Str: ``hit``, ``miss`` | -### mongodb.cache.used.percent - -The percentage of WiredTiger cache in use. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - ### mongodb.collection.count The number of collections. @@ -66,14 +34,6 @@ The number of collections. | ---- | ----------- | ---------- | ----------------------- | --------- | | {collections} | Sum | Int | Cumulative | false | -### mongodb.commands_per_sec - -The number of commands executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {command}/s | Gauge | Double | - ### mongodb.connection.count The number of connections. @@ -120,14 +80,6 @@ The number of existing databases. | ---- | ----------- | ---------- | ----------------------- | --------- | | {databases} | Sum | Int | Cumulative | false | -### mongodb.deletes_per_sec - -The number of deletes executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {delete}/s | Gauge | Double | - ### mongodb.document.operation.count The number of document operations executed. @@ -150,22 +102,6 @@ The number of extents. | ---- | ----------- | ---------- | ----------------------- | --------- | | {extents} | Sum | Int | Cumulative | false | -### mongodb.flushes_per_sec - -The number of flushes executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {flush}/s | Gauge | Double | - -### mongodb.getmores_per_sec - -The number of getmores executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {getmore}/s | Gauge | Double | - ### mongodb.global_lock.time The time the global lock has been held. @@ -204,14 +140,6 @@ Sum of the space allocated to all indexes in the database, including free index | ---- | ----------- | ---------- | ----------------------- | --------- | | By | Sum | Int | Cumulative | false | -### mongodb.inserts_per_sec - -The number of insertions executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {insert}/s | Gauge | Double | - ### mongodb.memory.usage The amount of memory used. @@ -272,13 +200,13 @@ The number of operations executed. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | -### mongodb.operation.repl.count +### mongodb.operation.time -The number of replicated operations executed. +The total time spent performing operations. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| {operations} | Sum | Int | Cumulative | true | +| ms | Sum | Int | Cumulative | true | #### Attributes @@ -286,127 +214,97 @@ The number of replicated operations executed. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | -### mongodb.operation.time +### mongodb.session.count -The total time spent performing operations. +The total number of active sessions. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| ms | Sum | Int | Cumulative | true | - -#### Attributes +| {sessions} | Sum | Int | Cumulative | false | -| Name | Description | Values | -| ---- | ----------- | ------ | -| operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +### mongodb.storage.size -### mongodb.page_faults +The total amount of storage allocated to this collection. -The number of page faults. +If collection data is compressed it reflects the compressed size. | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | -| {faults} | Sum | Int | Cumulative | true | +| By | Sum | Int | Cumulative | true | -### mongodb.queries_per_sec +## Optional Metrics -The number of queries executed per second. +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {query}/s | Gauge | Double | +```yaml +metrics: + : + enabled: true +``` -### mongodb.repl_commands_per_sec +### mongodb.active.reads -The number of replicated commands executed per second. +The number of read operations currently being processed. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {command}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {reads} | Sum | Int | Cumulative | false | -### mongodb.repl_deletes_per_sec +### mongodb.active.writes -The number of replicated deletes executed per second. +The number of write operations currently being processed. -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {delete}/s | Gauge | Double | +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {writes} | Sum | Int | Cumulative | false | -### mongodb.repl_getmores_per_sec +### mongodb.cache.dirty.percent -The number of replicated getmores executed per second. +The percentage of WiredTiger cache that is dirty. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {getmore}/s | Gauge | Double | +| 1 | Gauge | Double | -### mongodb.repl_inserts_per_sec +### mongodb.cache.used.percent -The number of replicated insertions executed per second. +The percentage of WiredTiger cache in use. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {insert}/s | Gauge | Double | +| 1 | Gauge | Double | -### mongodb.repl_queries_per_sec +### mongodb.commands_per_sec -The number of replicated queries executed per second. +The number of commands executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {query}/s | Gauge | Double | +| {command}/s | Gauge | Double | -### mongodb.repl_updates_per_sec +### mongodb.deletes_per_sec -The number of replicated updates executed per second. +The number of deletes executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {update}/s | Gauge | Double | - -### mongodb.session.count - -The total number of active sessions. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| {sessions} | Sum | Int | Cumulative | false | - -### mongodb.storage.size - -The total amount of storage allocated to this collection. - -If collection data is compressed it reflects the compressed size. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Cumulative | true | +| {delete}/s | Gauge | Double | -### mongodb.updates_per_sec +### mongodb.flushes_per_sec -The number of updates executed per second. +The number of flushes executed per second. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {update}/s | Gauge | Double | - -### mongodb.wtcache.bytes.read - -The number of bytes read into the WiredTiger cache. - -| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | -| ---- | ----------- | ---------- | ----------------------- | --------- | -| By | Sum | Int | Cumulative | true | +| {flush}/s | Gauge | Double | -## Optional Metrics +### mongodb.getmores_per_sec -The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: +The number of getmores executed per second. -```yaml -metrics: - : - enabled: true -``` +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {getmore}/s | Gauge | Double | ### mongodb.health @@ -418,6 +316,14 @@ A value of '1' indicates healthy. A value of '0' indicates unhealthy. | ---- | ----------- | ---------- | | 1 | Gauge | Int | +### mongodb.inserts_per_sec + +The number of insertions executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {insert}/s | Gauge | Double | + ### mongodb.lock.acquire.count Number of times the lock was acquired in the specified mode. @@ -492,6 +398,92 @@ The latency of operations. | ---- | ----------- | ------ | | operation | The MongoDB operation with regards to latency | Str: ``read``, ``write``, ``command`` | +### mongodb.operation.repl.count + +The number of replicated operations executed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {operations} | Sum | Int | Cumulative | true | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | + +### mongodb.page_faults + +The number of page faults. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + +### mongodb.queries_per_sec + +The number of queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {query}/s | Gauge | Double | + +### mongodb.repl_commands_per_sec + +The number of replicated commands executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {command}/s | Gauge | Double | + +### mongodb.repl_deletes_per_sec + +The number of replicated deletes executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {delete}/s | Gauge | Double | + +### mongodb.repl_getmores_per_sec + +The number of replicated getmores executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {getmore}/s | Gauge | Double | + +### mongodb.repl_inserts_per_sec + +The number of replicated insertions executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {insert}/s | Gauge | Double | + +### mongodb.repl_queries_per_sec + +The number of replicated queries executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {query}/s | Gauge | Double | + +### mongodb.repl_updates_per_sec + +The number of replicated updates executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {update}/s | Gauge | Double | + +### mongodb.updates_per_sec + +The number of updates executed per second. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {update}/s | Gauge | Double | + ### mongodb.uptime The amount of time that the server has been running. @@ -500,6 +492,14 @@ The amount of time that the server has been running. | ---- | ----------- | ---------- | ----------------------- | --------- | | ms | Sum | Int | Cumulative | true | +### mongodb.wtcache.bytes.read + +The number of bytes read into the WiredTiger cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 17e9f23be856d..080891042403b 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package mongodbreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index b43a3dcfefa74..cd328ef117f27 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -82,25 +82,25 @@ type MetricsConfig struct { func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ MongodbActiveReads: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbActiveWrites: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbCacheDirtyPercent: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbCacheOperations: MetricConfig{ Enabled: true, }, MongodbCacheUsedPercent: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbCollectionCount: MetricConfig{ Enabled: true, }, MongodbCommandsPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbConnectionCount: MetricConfig{ Enabled: true, @@ -118,7 +118,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MongodbDeletesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbDocumentOperationCount: MetricConfig{ Enabled: true, @@ -127,10 +127,10 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MongodbFlushesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbGetmoresPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbGlobalLockTime: MetricConfig{ Enabled: true, @@ -148,7 +148,7 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MongodbInsertsPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbLockAcquireCount: MetricConfig{ Enabled: false, @@ -184,34 +184,34 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, MongodbOperationReplCount: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbOperationTime: MetricConfig{ Enabled: true, }, MongodbPageFaults: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbQueriesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplCommandsPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplDeletesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplGetmoresPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplInsertsPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplQueriesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbReplUpdatesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbSessionCount: MetricConfig{ Enabled: true, @@ -220,13 +220,13 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: true, }, MongodbUpdatesPerSec: MetricConfig{ - Enabled: true, + Enabled: false, }, MongodbUptime: MetricConfig{ Enabled: false, }, MongodbWtcacheBytesRead: MetricConfig{ - Enabled: true, + Enabled: false, }, } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 05edb3210af4c..31ec4f99a34a2 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -147,8 +147,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } @@ -192,8 +193,9 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 9028f5fb8eddf..c5f0fb86722d2 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the value connection_type attribute. +// AttributeConnectionType specifies the a value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the value lock_mode attribute. +// AttributeLockMode specifies the a value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the value lock_type attribute. +// AttributeLockType specifies the a value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the value memory_type attribute. +// AttributeMemoryType specifies the a value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the value operation attribute. +// AttributeOperation specifies the a value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the value operation_latency attribute. +// AttributeOperationLatency specifies the a value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the value type attribute. +// AttributeType specifies the a value type attribute. type AttributeType int const ( diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 96cb53ec7aae2..b2fc309a9a933 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -68,15 +68,12 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount := 0 allMetricsCount := 0 - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbActiveReadsDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbActiveWritesDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheDirtyPercentDataPoint(ts, 1) @@ -84,7 +81,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbCacheOperationsDataPoint(ts, 1, AttributeTypeHit) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheUsedPercentDataPoint(ts, 1) @@ -92,7 +88,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbCollectionCountDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCommandsPerSecDataPoint(ts, 1) @@ -116,7 +111,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbDatabaseCountDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbDeletesPerSecDataPoint(ts, 1) @@ -128,11 +122,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbExtentCountDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbFlushesPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbGetmoresPerSecDataPoint(ts, 1) @@ -155,7 +147,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbIndexSizeDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbInsertsPerSecDataPoint(ts, 1) @@ -198,7 +189,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationLatencyTimeDataPoint(ts, 1, AttributeOperationLatencyRead) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbOperationReplCountDataPoint(ts, 1, AttributeOperationInsert) @@ -206,35 +196,27 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationTimeDataPoint(ts, 1, AttributeOperationInsert) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbPageFaultsDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplCommandsPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplDeletesPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplGetmoresPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplInsertsPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplQueriesPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplUpdatesPerSecDataPoint(ts, 1) @@ -246,14 +228,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbStorageSizeDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbUpdatesPerSecDataPoint(ts, 1) allMetricsCount++ mb.RecordMongodbUptimeDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbWtcacheBytesReadDataPoint(ts, 1) diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index fe07151ee7607..dd0ce4a1225b0 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -197,7 +197,7 @@ metrics: mongodb.operation.repl.count: description: The number of replicated operations executed. unit: "{operations}" - enabled: true + enabled: false sum: aggregation_temporality: cumulative value_type: int @@ -361,7 +361,7 @@ metrics: mongodb.queries_per_sec: description: The number of queries executed per second. unit: "{query}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -369,7 +369,7 @@ metrics: mongodb.inserts_per_sec: description: The number of insertions executed per second. unit: "{insert}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -377,7 +377,7 @@ metrics: mongodb.commands_per_sec: description: The number of commands executed per second. unit: "{command}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -385,7 +385,7 @@ metrics: mongodb.getmores_per_sec: description: The number of getmores executed per second. unit: "{getmore}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -393,7 +393,7 @@ metrics: mongodb.deletes_per_sec: description: The number of deletes executed per second. unit: "{delete}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -401,7 +401,7 @@ metrics: mongodb.updates_per_sec: description: The number of updates executed per second. unit: "{update}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -409,7 +409,7 @@ metrics: mongodb.flushes_per_sec: description: The number of flushes executed per second. unit: "{flush}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -417,7 +417,7 @@ metrics: mongodb.repl_queries_per_sec: description: The number of replicated queries executed per second. unit: "{query}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -425,7 +425,7 @@ metrics: mongodb.repl_inserts_per_sec: description: The number of replicated insertions executed per second. unit: "{insert}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -433,7 +433,7 @@ metrics: mongodb.repl_commands_per_sec: description: The number of replicated commands executed per second. unit: "{command}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -441,7 +441,7 @@ metrics: mongodb.repl_getmores_per_sec: description: The number of replicated getmores executed per second. unit: "{getmore}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -449,7 +449,7 @@ metrics: mongodb.repl_deletes_per_sec: description: The number of replicated deletes executed per second. unit: "{delete}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -457,7 +457,7 @@ metrics: mongodb.repl_updates_per_sec: description: The number of replicated updates executed per second. unit: "{update}/s" - enabled: true + enabled: false gauge: value_type: double aggregation_temporality: delta @@ -465,7 +465,7 @@ metrics: mongodb.active.writes: description: The number of write operations currently being processed. unit: "{writes}" - enabled: true + enabled: false sum: value_type: int aggregation_temporality: cumulative @@ -474,7 +474,7 @@ metrics: mongodb.active.reads: description: The number of read operations currently being processed. unit: "{reads}" - enabled: true + enabled: false sum: value_type: int aggregation_temporality: cumulative @@ -483,7 +483,7 @@ metrics: mongodb.wtcache.bytes.read: description: The number of bytes read into the WiredTiger cache. unit: "By" - enabled: true + enabled: false sum: value_type: int aggregation_temporality: cumulative @@ -492,21 +492,21 @@ metrics: mongodb.cache.dirty.percent: description: The percentage of WiredTiger cache that is dirty. unit: "1" - enabled: true + enabled: false gauge: value_type: double attributes: [] mongodb.cache.used.percent: description: The percentage of WiredTiger cache in use. unit: "1" - enabled: true + enabled: false gauge: value_type: double attributes: [] mongodb.page_faults: description: The number of page faults. unit: "{faults}" - enabled: true + enabled: false sum: value_type: int aggregation_temporality: cumulative diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 43406898d76c8..9effd0448b0fa 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -40,29 +40,11 @@ func TestScraperLifecycle(t *testing.T) { f := NewFactory() cfg := f.CreateDefaultConfig().(*Config) - mc := &fakeClient{} - // Mock the replica set status command to return an empty set - mc.On("RunCommand", mock.Anything, "admin", bson.M{"replSetGetStatus": 1}).Return(bson.M{ - "ok": 1, - "members": []any{}, - }, nil) - mc.On("Disconnect", mock.Anything).Return(nil) - scraper := newMongodbScraper(receivertest.NewNopSettings(), cfg) - // Save original and replace with test version - originalNewClient := newClient - newClient = func(_ context.Context, _ *Config, _ *zap.Logger, _ bool) (client, error) { - return mc, nil - } - defer func() { - newClient = originalNewClient - }() - require.NoError(t, scraper.start(context.Background(), componenttest.NewNopHost())) require.NoError(t, scraper.shutdown(context.Background())) require.Less(t, time.Since(now), 200*time.Millisecond, "component start and stop should be very fast") - mc.AssertExpectations(t) } var ( @@ -348,9 +330,8 @@ func TestScraperScrape(t *testing.T) { expectedMetrics := tc.expectedMetricGen(t) require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, - pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), - pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp(), - pmetrictest.IgnoreMetricsOrder())) + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) }) } } diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index d44a95bd54794..4a22e2a46351f 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -148,44 +148,6 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - - name: mongodb.active.reads - description: The number of read operations currently being processed. - sum: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - aggregationTemporality: 2 - unit: '{reads}' - - name: mongodb.active.writes - description: The number of write operations currently being processed. - sum: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - aggregationTemporality: 2 - unit: '{writes}' - - description: The number of bytes read into the WiredTiger cache. - name: mongodb.wtcache.bytes.read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" # Value from test fixture - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - isMonotonic: true - unit: 'By' - - description: The number of page faults. - name: mongodb.page_faults - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "0" # Value from test fixture (extra_info.page_faults) - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - isMonotonic: true - unit: '{faults}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index 879caace423a8..6b3cb474afca1 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -148,45 +148,6 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - - - name: mongodb.active.reads - description: The number of read operations currently being processed. - sum: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - aggregationTemporality: 2 - unit: '{reads}' - - name: mongodb.active.writes - description: The number of write operations currently being processed. - sum: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - aggregationTemporality: 2 - unit: '{writes}' - - description: The number of bytes read into the WiredTiger cache. - name: mongodb.wtcache.bytes.read - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "10" # Value from test fixture - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - isMonotonic: true - unit: 'By' - - description: The number of page faults. - name: mongodb.page_faults - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "0" # Value from test fixture (extra_info.page_faults) - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - isMonotonic: true - unit: '{faults}' - description: The latency of operations. gauge: dataPoints: @@ -283,4 +244,4 @@ resourceMetrics: unit: ms scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver - version: latest + version: latest \ No newline at end of file From 2f06b9e6859cd941524e39e9f23508a9c749a1df Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Thu, 16 Jan 2025 14:19:43 -0800 Subject: [PATCH 10/19] fixed lifecycle test taking too long --- receiver/mongodbreceiver/generated_package_test.go | 3 ++- .../internal/metadata/generated_config_test.go | 10 ++++------ .../internal/metadata/generated_metrics.go | 14 +++++++------- receiver/mongodbreceiver/scraper.go | 5 +++++ receiver/mongodbreceiver/scraper_test.go | 7 +++++++ 5 files changed, 25 insertions(+), 14 deletions(-) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 080891042403b..17e9f23be856d 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,8 +3,9 @@ package mongodbreceiver import ( - "go.uber.org/goleak" "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 31ec4f99a34a2..05edb3210af4c 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -147,9 +147,8 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } @@ -193,9 +192,8 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index c5f0fb86722d2..9028f5fb8eddf 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 4b2920844324f..9df79b9774caa 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -75,6 +75,11 @@ func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { } s.client = c + // Skip secondary host discovery if direct connection is enabled + if s.config.DirectConnection { + return nil + } + secondaries, err := s.findSecondaryHosts(ctx) if err != nil { s.logger.Warn("failed to find secondary hosts", zap.Error(err)) diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 9effd0448b0fa..a978cf2b1eeb3 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -40,6 +40,13 @@ func TestScraperLifecycle(t *testing.T) { f := NewFactory() cfg := f.CreateDefaultConfig().(*Config) + /* NOTE: + setting direct connection to true because originally, the scraper tests only ONE mongodb instance. + added in routing logic to detect multiple mongodb instances which takes longer than 2 milliseconds. + since this test is testing for lifecycle (start and shutting down ONE instance). + */ + cfg.DirectConnection = true + scraper := newMongodbScraper(receivertest.NewNopSettings(), cfg) require.NoError(t, scraper.start(context.Background(), componenttest.NewNopHost())) require.NoError(t, scraper.shutdown(context.Background())) From 4531088f247e39819fb407d0e320e80adca5fc6b Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Mon, 27 Jan 2025 11:20:59 -0800 Subject: [PATCH 11/19] fixed small comments on PR --- receiver/mongodbreceiver/client_test.go | 11 ++++++++++- receiver/mongodbreceiver/scraper.go | 6 +----- .../testdata/integration/expected.4_4lpu.yaml | 2 +- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index 443615c306e1b..2e7487391a94a 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -74,7 +74,16 @@ func (fc *fakeClient) RunCommand(ctx context.Context, db string, command bson.M) if args.Get(0) == nil { return nil, args.Error(1) } - return args.Get(0).(bson.M), args.Error(1) + + result, ok := args.Get(0).(bson.M) + if !ok { + err := errors.New("mock returned invalid type") + zap.L().Error("type assertion failed", + zap.String("expected", "bson.M")) + return nil, err + } + + return result, args.Error(1) } func TestListDatabaseNames(t *testing.T) { diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 9df79b9774caa..c4a59934b2ab0 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -304,7 +304,7 @@ func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, erro members, ok := result["members"].(primitive.A) if !ok { - return nil, fmt.Errorf("invalid members format") + return nil, fmt.Errorf("invalid members format: expected type primitive.A but got %T, value: %v", result["members"], result["members"]) } var hosts []string @@ -333,9 +333,5 @@ func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, erro } } - if len(hosts) == 0 { - s.logger.Warn("No secondary hosts found in replica set") - } - return hosts, nil } diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml index a0e27d3c01ab0..98fb6055110e9 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_4lpu.yaml @@ -136,7 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363202250964000" timeUnixNano: "1682363222253814000" isMonotonic: true - unit: '{operations}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: From abdd742ba5cac2fedc1321eda39870a3303ecbd7 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Mon, 27 Jan 2025 11:22:34 -0800 Subject: [PATCH 12/19] fixed empty spaces --- receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml | 2 +- receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml | 2 +- receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml index 300e60e9b6317..7e2bbd364a1d6 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.4_0.yaml @@ -136,7 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363190502544000" timeUnixNano: "1682363210513475000" isMonotonic: true - unit: '{operations}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml index b1695d769df08..054c25999b599 100644 --- a/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml +++ b/receiver/mongodbreceiver/testdata/integration/expected.5_0.yaml @@ -136,7 +136,7 @@ resourceMetrics: startTimeUnixNano: "1682363190539043000" timeUnixNano: "1682363210542990000" isMonotonic: true - unit: '{operations}' + unit: '{operations}' - description: The total time spent performing operations. name: mongodb.operation.time sum: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index 6b3cb474afca1..ab033da4869dc 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -244,4 +244,4 @@ resourceMetrics: unit: ms scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver - version: latest \ No newline at end of file + version: latest From 36b0bb544d2981799812e1da896a35d684c8f6d3 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Mon, 27 Jan 2025 14:18:03 -0800 Subject: [PATCH 13/19] removed replica metrics and routing logic into a new PR --- receiver/mongodbreceiver/client.go | 5 +- receiver/mongodbreceiver/config.go | 24 +----- receiver/mongodbreceiver/config_test.go | 4 +- receiver/mongodbreceiver/metadata.yaml | 48 ------------ receiver/mongodbreceiver/metrics.go | 63 +--------------- receiver/mongodbreceiver/scraper.go | 94 +----------------------- receiver/mongodbreceiver/scraper_test.go | 7 -- 7 files changed, 8 insertions(+), 237 deletions(-) diff --git a/receiver/mongodbreceiver/client.go b/receiver/mongodbreceiver/client.go index a192d83246377..1cf92a5a2c792 100644 --- a/receiver/mongodbreceiver/client.go +++ b/receiver/mongodbreceiver/client.go @@ -26,7 +26,6 @@ type client interface { DBStats(ctx context.Context, DBName string) (bson.M, error) TopStats(ctx context.Context) (bson.M, error) IndexStats(ctx context.Context, DBName, collectionName string) ([]bson.M, error) - RunCommand(ctx context.Context, db string, command bson.M) (bson.M, error) } // mongodbClient is a mongodb metric scraper client @@ -38,8 +37,8 @@ type mongodbClient struct { // newClient creates a new client to connect and query mongo for the // mongodbreceiver -var newClient = func(ctx context.Context, config *Config, logger *zap.Logger, secondary bool) (client, error) { - driver, err := mongo.Connect(ctx, config.ClientOptions(secondary)) +func newClient(ctx context.Context, config *Config, logger *zap.Logger) (client, error) { + driver, err := mongo.Connect(ctx, config.ClientOptions()) if err != nil { return nil, err } diff --git a/receiver/mongodbreceiver/config.go b/receiver/mongodbreceiver/config.go index 75fca410961f2..4d89797d324e8 100644 --- a/receiver/mongodbreceiver/config.go +++ b/receiver/mongodbreceiver/config.go @@ -11,7 +11,6 @@ import ( "time" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/readpref" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtls" @@ -60,28 +59,7 @@ func (c *Config) Validate() error { return err } -func (c *Config) ClientOptions(secondary bool) *options.ClientOptions { - if secondary { - // For secondary nodes, create a direct connection - clientOptions := options.Client(). - SetHosts(c.hostlist()). - SetDirect(true). - SetReadPreference(readpref.SecondaryPreferred()) - - if c.Timeout > 0 { - clientOptions.SetConnectTimeout(c.Timeout) - } - - if c.Username != "" && c.Password != "" { - clientOptions.SetAuth(options.Credential{ - Username: c.Username, - Password: string(c.Password), - }) - } - - return clientOptions - } - +func (c *Config) ClientOptions() *options.ClientOptions { clientOptions := options.Client() connString := "mongodb://" + strings.Join(c.hostlist(), ",") clientOptions.ApplyURI(connString) diff --git a/receiver/mongodbreceiver/config_test.go b/receiver/mongodbreceiver/config_test.go index b3d45d553b457..21f69c1140928 100644 --- a/receiver/mongodbreceiver/config_test.go +++ b/receiver/mongodbreceiver/config_test.go @@ -165,7 +165,7 @@ func TestOptions(t *testing.T) { ReplicaSet: "rs-1", } - clientOptions := cfg.ClientOptions(false) + clientOptions := cfg.ClientOptions() require.Equal(t, clientOptions.Auth.Username, cfg.Username) require.Equal(t, clientOptions.ConnectTimeout.Milliseconds(), @@ -191,7 +191,7 @@ func TestOptionsTLS(t *testing.T) { }, }, } - opts := cfg.ClientOptions(false) + opts := cfg.ClientOptions() require.NotNil(t, opts.TLSConfig) } diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index dd0ce4a1225b0..7949fec2ef3d5 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -410,54 +410,6 @@ metrics: description: The number of flushes executed per second. unit: "{flush}/s" enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_queries_per_sec: - description: The number of replicated queries executed per second. - unit: "{query}/s" - enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_inserts_per_sec: - description: The number of replicated insertions executed per second. - unit: "{insert}/s" - enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_commands_per_sec: - description: The number of replicated commands executed per second. - unit: "{command}/s" - enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_getmores_per_sec: - description: The number of replicated getmores executed per second. - unit: "{getmore}/s" - enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_deletes_per_sec: - description: The number of replicated deletes executed per second. - unit: "{delete}/s" - enabled: false - gauge: - value_type: double - aggregation_temporality: delta - monotonic: false - mongodb.repl_updates_per_sec: - description: The number of replicated updates executed per second. - unit: "{update}/s" - enabled: false gauge: value_type: double aggregation_temporality: delta diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index abe8c0df13704..dfdee23b11954 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -4,7 +4,6 @@ package mongodbreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver" import ( - "context" "errors" "fmt" "reflect" @@ -13,7 +12,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/scraper/scrapererror" - "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver/internal/metadata" ) @@ -246,46 +244,16 @@ func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, err } func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - var replDoc bson.M = doc - var highestInsertCount int64 = -1 - - if len(s.secondaryClients) > 0 { - ctx := context.Background() - for _, secondaryClient := range s.secondaryClients { - status, err := secondaryClient.ServerStatus(ctx, "admin") - if err != nil { - s.logger.Debug("Failed to get secondary server status", zap.Error(err)) - continue - } - - if opcountersRepl, ok := status["opcountersRepl"].(bson.M); ok { - if insertCount, ok := opcountersRepl["insert"].(int64); ok { - if insertCount > highestInsertCount { - highestInsertCount = insertCount - replDoc = status - } - } - } - } - } - - currentCounts := make(map[string]int64) for operationVal, operation := range metadata.MapAttributeOperation { metricPath := []string{"opcountersRepl", operationVal} metricName := "mongodb.operation.repl.count" - val, err := collectMetric(replDoc, metricPath) + val, err := collectMetric(doc, metricPath) if err != nil { errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, operationVal, err)) continue } s.mb.RecordMongodbOperationReplCountDataPoint(now, val, operation) - - currentCounts[operationVal] = val - s.recordReplOperationPerSecond(now, operationVal, val) } - - s.prevReplCounts = currentCounts - s.prevReplTimestamp = now } func (s *mongodbScraper) recordFlushesPerSecond(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { @@ -312,35 +280,6 @@ func (s *mongodbScraper) recordFlushesPerSecond(now pcommon.Timestamp, doc bson. s.prevFlushTimestamp = now } -func (s *mongodbScraper) recordReplOperationPerSecond(now pcommon.Timestamp, operationVal string, currentCount int64) { - if s.prevReplTimestamp > 0 { - timeDelta := float64(now-s.prevReplTimestamp) / 1e9 - if timeDelta > 0 { - if prevReplCount, exists := s.prevReplCounts[operationVal]; exists { - delta := currentCount - prevReplCount - queriesPerSec := float64(delta) / timeDelta - - switch operationVal { - case "query": - s.mb.RecordMongodbReplQueriesPerSecDataPoint(now, queriesPerSec) - case "insert": - s.mb.RecordMongodbReplInsertsPerSecDataPoint(now, queriesPerSec) - case "command": - s.mb.RecordMongodbReplCommandsPerSecDataPoint(now, queriesPerSec) - case "getmore": - s.mb.RecordMongodbReplGetmoresPerSecDataPoint(now, queriesPerSec) - case "delete": - s.mb.RecordMongodbReplDeletesPerSecDataPoint(now, queriesPerSec) - case "update": - s.mb.RecordMongodbReplUpdatesPerSecDataPoint(now, queriesPerSec) - default: - fmt.Printf("Unhandled repl operation: %s\n", operationVal) - } - } - } - } -} - func (s *mongodbScraper) recordOperationPerSecond(now pcommon.Timestamp, operationVal string, currentCount int64) { if s.prevTimestamp > 0 { timeDelta := float64(now-s.prevTimestamp) / 1e9 diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index c4a59934b2ab0..d9fcc79ed4812 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -13,9 +13,7 @@ import ( "github.com/hashicorp/go-version" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -42,14 +40,11 @@ type mongodbScraper struct { logger *zap.Logger config *Config client client - secondaryClients []client mongoVersion *version.Version mb *metadata.MetricsBuilder prevTimestamp pcommon.Timestamp - prevReplTimestamp pcommon.Timestamp prevFlushTimestamp pcommon.Timestamp prevCounts map[string]int64 - prevReplCounts map[string]int64 prevFlushCount int64 } @@ -60,68 +55,24 @@ func newMongodbScraper(settings receiver.Settings, config *Config) *mongodbScrap mb: metadata.NewMetricsBuilder(config.MetricsBuilderConfig, settings), mongoVersion: unknownVersion(), prevTimestamp: pcommon.Timestamp(0), - prevReplTimestamp: pcommon.Timestamp(0), prevFlushTimestamp: pcommon.Timestamp(0), prevCounts: make(map[string]int64), - prevReplCounts: make(map[string]int64), prevFlushCount: 0, } } func (s *mongodbScraper) start(ctx context.Context, _ component.Host) error { - c, err := newClient(ctx, s.config, s.logger, false) + c, err := newClient(ctx, s.config, s.logger) if err != nil { return fmt.Errorf("create mongo client: %w", err) } s.client = c - - // Skip secondary host discovery if direct connection is enabled - if s.config.DirectConnection { - return nil - } - - secondaries, err := s.findSecondaryHosts(ctx) - if err != nil { - s.logger.Warn("failed to find secondary hosts", zap.Error(err)) - return nil - } - - for _, secondary := range secondaries { - secondaryConfig := *s.config - secondaryConfig.Hosts = []confignet.TCPAddrConfig{ - { - Endpoint: secondary, - }, - } - - secondaryClient, err := newClient(ctx, &secondaryConfig, s.logger, true) - if err != nil { - s.logger.Warn("failed to connect to secondary", zap.String("host", secondary), zap.Error(err)) - continue - } - s.secondaryClients = append(s.secondaryClients, secondaryClient) - } - return nil } func (s *mongodbScraper) shutdown(ctx context.Context) error { - var errs []error - if s.client != nil { - if err := s.client.Disconnect(ctx); err != nil { - errs = append(errs, err) - } - } - - for _, client := range s.secondaryClients { - if err := client.Disconnect(ctx); err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return fmt.Errorf("multiple disconnect errors: %v", errs) + return s.client.Disconnect(ctx) } return nil } @@ -294,44 +245,3 @@ func serverAddressAndPort(serverStatus bson.M) (string, int64, error) { return "", 0, fmt.Errorf("unexpected host format: %s", host) } } - -func (s *mongodbScraper) findSecondaryHosts(ctx context.Context) ([]string, error) { - result, err := s.client.RunCommand(ctx, "admin", bson.M{"replSetGetStatus": 1}) - if err != nil { - s.logger.Error("Failed to get replica set status", zap.Error(err)) - return nil, fmt.Errorf("failed to get replica set status: %w", err) - } - - members, ok := result["members"].(primitive.A) - if !ok { - return nil, fmt.Errorf("invalid members format: expected type primitive.A but got %T, value: %v", result["members"], result["members"]) - } - - var hosts []string - for _, member := range members { - m, ok := member.(bson.M) - if !ok { - continue - } - - state, ok := m["stateStr"].(string) - if !ok { - continue - } - - name, ok := m["name"].(string) - if !ok { - continue - } - - // Only add actual secondaries, not arbiters or other states - if state == "SECONDARY" { - s.logger.Debug("Found secondary", - zap.String("host", name), - zap.String("state", state)) - hosts = append(hosts, name) - } - } - - return hosts, nil -} diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index a978cf2b1eeb3..9effd0448b0fa 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -40,13 +40,6 @@ func TestScraperLifecycle(t *testing.T) { f := NewFactory() cfg := f.CreateDefaultConfig().(*Config) - /* NOTE: - setting direct connection to true because originally, the scraper tests only ONE mongodb instance. - added in routing logic to detect multiple mongodb instances which takes longer than 2 milliseconds. - since this test is testing for lifecycle (start and shutting down ONE instance). - */ - cfg.DirectConnection = true - scraper := newMongodbScraper(receivertest.NewNopSettings(), cfg) require.NoError(t, scraper.start(context.Background(), componenttest.NewNopHost())) require.NoError(t, scraper.shutdown(context.Background())) From 591d125037a7b404662f206d3cb056b2c50edeb2 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Mon, 27 Jan 2025 14:18:46 -0800 Subject: [PATCH 14/19] removed repl metrics metadata --- receiver/mongodbreceiver/documentation.md | 48 --- .../mongodbreceiver/generated_package_test.go | 3 +- .../internal/metadata/generated_config.go | 24 -- .../metadata/generated_config_test.go | 22 +- .../internal/metadata/generated_metrics.go | 356 +----------------- .../metadata/generated_metrics_test.go | 90 ----- .../internal/metadata/testdata/config.yaml | 24 -- 7 files changed, 14 insertions(+), 553 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 0def02eb9305b..7f8074478a988 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -428,54 +428,6 @@ The number of queries executed per second. | ---- | ----------- | ---------- | | {query}/s | Gauge | Double | -### mongodb.repl_commands_per_sec - -The number of replicated commands executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {command}/s | Gauge | Double | - -### mongodb.repl_deletes_per_sec - -The number of replicated deletes executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {delete}/s | Gauge | Double | - -### mongodb.repl_getmores_per_sec - -The number of replicated getmores executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {getmore}/s | Gauge | Double | - -### mongodb.repl_inserts_per_sec - -The number of replicated insertions executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {insert}/s | Gauge | Double | - -### mongodb.repl_queries_per_sec - -The number of replicated queries executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {query}/s | Gauge | Double | - -### mongodb.repl_updates_per_sec - -The number of replicated updates executed per second. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| {update}/s | Gauge | Double | - ### mongodb.updates_per_sec The number of updates executed per second. diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 17e9f23be856d..080891042403b 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package mongodbreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index cd328ef117f27..cdbb351702e50 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -66,12 +66,6 @@ type MetricsConfig struct { MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` MongodbPageFaults MetricConfig `mapstructure:"mongodb.page_faults"` MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` - MongodbReplCommandsPerSec MetricConfig `mapstructure:"mongodb.repl_commands_per_sec"` - MongodbReplDeletesPerSec MetricConfig `mapstructure:"mongodb.repl_deletes_per_sec"` - MongodbReplGetmoresPerSec MetricConfig `mapstructure:"mongodb.repl_getmores_per_sec"` - MongodbReplInsertsPerSec MetricConfig `mapstructure:"mongodb.repl_inserts_per_sec"` - MongodbReplQueriesPerSec MetricConfig `mapstructure:"mongodb.repl_queries_per_sec"` - MongodbReplUpdatesPerSec MetricConfig `mapstructure:"mongodb.repl_updates_per_sec"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` @@ -195,24 +189,6 @@ func DefaultMetricsConfig() MetricsConfig { MongodbQueriesPerSec: MetricConfig{ Enabled: false, }, - MongodbReplCommandsPerSec: MetricConfig{ - Enabled: false, - }, - MongodbReplDeletesPerSec: MetricConfig{ - Enabled: false, - }, - MongodbReplGetmoresPerSec: MetricConfig{ - Enabled: false, - }, - MongodbReplInsertsPerSec: MetricConfig{ - Enabled: false, - }, - MongodbReplQueriesPerSec: MetricConfig{ - Enabled: false, - }, - MongodbReplUpdatesPerSec: MetricConfig{ - Enabled: false, - }, MongodbSessionCount: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 05edb3210af4c..092e44c2f5fae 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -63,12 +63,6 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationTime: MetricConfig{Enabled: true}, MongodbPageFaults: MetricConfig{Enabled: true}, MongodbQueriesPerSec: MetricConfig{Enabled: true}, - MongodbReplCommandsPerSec: MetricConfig{Enabled: true}, - MongodbReplDeletesPerSec: MetricConfig{Enabled: true}, - MongodbReplGetmoresPerSec: MetricConfig{Enabled: true}, - MongodbReplInsertsPerSec: MetricConfig{Enabled: true}, - MongodbReplQueriesPerSec: MetricConfig{Enabled: true}, - MongodbReplUpdatesPerSec: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, MongodbStorageSize: MetricConfig{Enabled: true}, MongodbUpdatesPerSec: MetricConfig{Enabled: true}, @@ -124,12 +118,6 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationTime: MetricConfig{Enabled: false}, MongodbPageFaults: MetricConfig{Enabled: false}, MongodbQueriesPerSec: MetricConfig{Enabled: false}, - MongodbReplCommandsPerSec: MetricConfig{Enabled: false}, - MongodbReplDeletesPerSec: MetricConfig{Enabled: false}, - MongodbReplGetmoresPerSec: MetricConfig{Enabled: false}, - MongodbReplInsertsPerSec: MetricConfig{Enabled: false}, - MongodbReplQueriesPerSec: MetricConfig{Enabled: false}, - MongodbReplUpdatesPerSec: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, MongodbStorageSize: MetricConfig{Enabled: false}, MongodbUpdatesPerSec: MetricConfig{Enabled: false}, @@ -147,8 +135,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } @@ -192,8 +181,9 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 9028f5fb8eddf..fdcfb6d72ce13 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the value connection_type attribute. +// AttributeConnectionType specifies the a value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the value lock_mode attribute. +// AttributeLockMode specifies the a value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the value lock_type attribute. +// AttributeLockType specifies the a value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the value memory_type attribute. +// AttributeMemoryType specifies the a value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the value operation attribute. +// AttributeOperation specifies the a value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the value operation_latency attribute. +// AttributeOperationLatency specifies the a value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the value type attribute. +// AttributeType specifies the a value type attribute. type AttributeType int const ( @@ -2198,300 +2198,6 @@ func newMetricMongodbQueriesPerSec(cfg MetricConfig) metricMongodbQueriesPerSec return m } -type metricMongodbReplCommandsPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_commands_per_sec metric with initial data. -func (m *metricMongodbReplCommandsPerSec) init() { - m.data.SetName("mongodb.repl_commands_per_sec") - m.data.SetDescription("The number of replicated commands executed per second.") - m.data.SetUnit("{command}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplCommandsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplCommandsPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplCommandsPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplCommandsPerSec(cfg MetricConfig) metricMongodbReplCommandsPerSec { - m := metricMongodbReplCommandsPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricMongodbReplDeletesPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_deletes_per_sec metric with initial data. -func (m *metricMongodbReplDeletesPerSec) init() { - m.data.SetName("mongodb.repl_deletes_per_sec") - m.data.SetDescription("The number of replicated deletes executed per second.") - m.data.SetUnit("{delete}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplDeletesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplDeletesPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplDeletesPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplDeletesPerSec(cfg MetricConfig) metricMongodbReplDeletesPerSec { - m := metricMongodbReplDeletesPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricMongodbReplGetmoresPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_getmores_per_sec metric with initial data. -func (m *metricMongodbReplGetmoresPerSec) init() { - m.data.SetName("mongodb.repl_getmores_per_sec") - m.data.SetDescription("The number of replicated getmores executed per second.") - m.data.SetUnit("{getmore}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplGetmoresPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplGetmoresPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplGetmoresPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplGetmoresPerSec(cfg MetricConfig) metricMongodbReplGetmoresPerSec { - m := metricMongodbReplGetmoresPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricMongodbReplInsertsPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_inserts_per_sec metric with initial data. -func (m *metricMongodbReplInsertsPerSec) init() { - m.data.SetName("mongodb.repl_inserts_per_sec") - m.data.SetDescription("The number of replicated insertions executed per second.") - m.data.SetUnit("{insert}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplInsertsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplInsertsPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplInsertsPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplInsertsPerSec(cfg MetricConfig) metricMongodbReplInsertsPerSec { - m := metricMongodbReplInsertsPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricMongodbReplQueriesPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_queries_per_sec metric with initial data. -func (m *metricMongodbReplQueriesPerSec) init() { - m.data.SetName("mongodb.repl_queries_per_sec") - m.data.SetDescription("The number of replicated queries executed per second.") - m.data.SetUnit("{query}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplQueriesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplQueriesPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplQueriesPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplQueriesPerSec(cfg MetricConfig) metricMongodbReplQueriesPerSec { - m := metricMongodbReplQueriesPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - -type metricMongodbReplUpdatesPerSec struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.repl_updates_per_sec metric with initial data. -func (m *metricMongodbReplUpdatesPerSec) init() { - m.data.SetName("mongodb.repl_updates_per_sec") - m.data.SetDescription("The number of replicated updates executed per second.") - m.data.SetUnit("{update}/s") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbReplUpdatesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbReplUpdatesPerSec) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbReplUpdatesPerSec) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbReplUpdatesPerSec(cfg MetricConfig) metricMongodbReplUpdatesPerSec { - m := metricMongodbReplUpdatesPerSec{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - type metricMongodbSessionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2793,12 +2499,6 @@ type MetricsBuilder struct { metricMongodbOperationTime metricMongodbOperationTime metricMongodbPageFaults metricMongodbPageFaults metricMongodbQueriesPerSec metricMongodbQueriesPerSec - metricMongodbReplCommandsPerSec metricMongodbReplCommandsPerSec - metricMongodbReplDeletesPerSec metricMongodbReplDeletesPerSec - metricMongodbReplGetmoresPerSec metricMongodbReplGetmoresPerSec - metricMongodbReplInsertsPerSec metricMongodbReplInsertsPerSec - metricMongodbReplQueriesPerSec metricMongodbReplQueriesPerSec - metricMongodbReplUpdatesPerSec metricMongodbReplUpdatesPerSec metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec @@ -2868,12 +2568,6 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), metricMongodbPageFaults: newMetricMongodbPageFaults(mbc.Metrics.MongodbPageFaults), metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), - metricMongodbReplCommandsPerSec: newMetricMongodbReplCommandsPerSec(mbc.Metrics.MongodbReplCommandsPerSec), - metricMongodbReplDeletesPerSec: newMetricMongodbReplDeletesPerSec(mbc.Metrics.MongodbReplDeletesPerSec), - metricMongodbReplGetmoresPerSec: newMetricMongodbReplGetmoresPerSec(mbc.Metrics.MongodbReplGetmoresPerSec), - metricMongodbReplInsertsPerSec: newMetricMongodbReplInsertsPerSec(mbc.Metrics.MongodbReplInsertsPerSec), - metricMongodbReplQueriesPerSec: newMetricMongodbReplQueriesPerSec(mbc.Metrics.MongodbReplQueriesPerSec), - metricMongodbReplUpdatesPerSec: newMetricMongodbReplUpdatesPerSec(mbc.Metrics.MongodbReplUpdatesPerSec), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), @@ -3007,12 +2701,6 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbOperationTime.emit(ils.Metrics()) mb.metricMongodbPageFaults.emit(ils.Metrics()) mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) - mb.metricMongodbReplCommandsPerSec.emit(ils.Metrics()) - mb.metricMongodbReplDeletesPerSec.emit(ils.Metrics()) - mb.metricMongodbReplGetmoresPerSec.emit(ils.Metrics()) - mb.metricMongodbReplInsertsPerSec.emit(ils.Metrics()) - mb.metricMongodbReplQueriesPerSec.emit(ils.Metrics()) - mb.metricMongodbReplUpdatesPerSec.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) @@ -3239,36 +2927,6 @@ func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timesta mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbReplCommandsPerSecDataPoint adds a data point to mongodb.repl_commands_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplCommandsPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplCommandsPerSec.recordDataPoint(mb.startTime, ts, val) -} - -// RecordMongodbReplDeletesPerSecDataPoint adds a data point to mongodb.repl_deletes_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplDeletesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplDeletesPerSec.recordDataPoint(mb.startTime, ts, val) -} - -// RecordMongodbReplGetmoresPerSecDataPoint adds a data point to mongodb.repl_getmores_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplGetmoresPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplGetmoresPerSec.recordDataPoint(mb.startTime, ts, val) -} - -// RecordMongodbReplInsertsPerSecDataPoint adds a data point to mongodb.repl_inserts_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplInsertsPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplInsertsPerSec.recordDataPoint(mb.startTime, ts, val) -} - -// RecordMongodbReplQueriesPerSecDataPoint adds a data point to mongodb.repl_queries_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplQueriesPerSec.recordDataPoint(mb.startTime, ts, val) -} - -// RecordMongodbReplUpdatesPerSecDataPoint adds a data point to mongodb.repl_updates_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbReplUpdatesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbReplUpdatesPerSec.recordDataPoint(mb.startTime, ts, val) -} - // RecordMongodbSessionCountDataPoint adds a data point to mongodb.session.count metric. func (mb *MetricsBuilder) RecordMongodbSessionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbSessionCount.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index b2fc309a9a933..123f5522ee487 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -202,24 +202,6 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) - allMetricsCount++ - mb.RecordMongodbReplCommandsPerSecDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordMongodbReplDeletesPerSecDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordMongodbReplGetmoresPerSecDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordMongodbReplInsertsPerSecDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordMongodbReplQueriesPerSecDataPoint(ts, 1) - - allMetricsCount++ - mb.RecordMongodbReplUpdatesPerSecDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbSessionCountDataPoint(ts, 1) @@ -826,78 +808,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_commands_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_commands_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_commands_per_sec") - validatedMetrics["mongodb.repl_commands_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated commands executed per second.", ms.At(i).Description()) - assert.Equal(t, "{command}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_deletes_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_deletes_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_deletes_per_sec") - validatedMetrics["mongodb.repl_deletes_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated deletes executed per second.", ms.At(i).Description()) - assert.Equal(t, "{delete}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_getmores_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_getmores_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_getmores_per_sec") - validatedMetrics["mongodb.repl_getmores_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated getmores executed per second.", ms.At(i).Description()) - assert.Equal(t, "{getmore}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_inserts_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_inserts_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_inserts_per_sec") - validatedMetrics["mongodb.repl_inserts_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated insertions executed per second.", ms.At(i).Description()) - assert.Equal(t, "{insert}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_queries_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_queries_per_sec") - validatedMetrics["mongodb.repl_queries_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated queries executed per second.", ms.At(i).Description()) - assert.Equal(t, "{query}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.repl_updates_per_sec": - assert.False(t, validatedMetrics["mongodb.repl_updates_per_sec"], "Found a duplicate in the metrics slice: mongodb.repl_updates_per_sec") - validatedMetrics["mongodb.repl_updates_per_sec"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The number of replicated updates executed per second.", ms.At(i).Description()) - assert.Equal(t, "{update}/s", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.session.count": assert.False(t, validatedMetrics["mongodb.session.count"], "Found a duplicate in the metrics slice: mongodb.session.count") validatedMetrics["mongodb.session.count"] = true diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index 75316c9a30e22..ab1e61d82f257 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -77,18 +77,6 @@ all_set: enabled: true mongodb.queries_per_sec: enabled: true - mongodb.repl_commands_per_sec: - enabled: true - mongodb.repl_deletes_per_sec: - enabled: true - mongodb.repl_getmores_per_sec: - enabled: true - mongodb.repl_inserts_per_sec: - enabled: true - mongodb.repl_queries_per_sec: - enabled: true - mongodb.repl_updates_per_sec: - enabled: true mongodb.session.count: enabled: true mongodb.storage.size: @@ -184,18 +172,6 @@ none_set: enabled: false mongodb.queries_per_sec: enabled: false - mongodb.repl_commands_per_sec: - enabled: false - mongodb.repl_deletes_per_sec: - enabled: false - mongodb.repl_getmores_per_sec: - enabled: false - mongodb.repl_inserts_per_sec: - enabled: false - mongodb.repl_queries_per_sec: - enabled: false - mongodb.repl_updates_per_sec: - enabled: false mongodb.session.count: enabled: false mongodb.storage.size: From aae695d93b992d0fd8ae4ba67578c2e26dfbca6f Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Mon, 27 Jan 2025 17:28:02 -0800 Subject: [PATCH 15/19] fixed make generate error --- receiver/mongodbreceiver/generated_package_test.go | 3 ++- .../internal/metadata/generated_config_test.go | 10 ++++------ .../internal/metadata/generated_metrics.go | 14 +++++++------- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 080891042403b..17e9f23be856d 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,8 +3,9 @@ package mongodbreceiver import ( - "go.uber.org/goleak" "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 092e44c2f5fae..23de07f334a0f 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -135,9 +135,8 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } @@ -181,9 +180,8 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index fdcfb6d72ce13..daac3dea31022 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( From e66d4e964daf50fab4fb007ac8fec11aa2797f4c Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Tue, 25 Feb 2025 17:19:14 -0800 Subject: [PATCH 16/19] fixed comments --- receiver/mongodbreceiver/documentation.md | 30 +- .../mongodbreceiver/generated_package_test.go | 3 +- .../internal/metadata/generated_config.go | 36 +- .../metadata/generated_config_test.go | 42 ++- .../internal/metadata/generated_metrics.go | 339 ++++++------------ .../metadata/generated_metrics_test.go | 86 ++--- .../internal/metadata/testdata/config.yaml | 36 +- receiver/mongodbreceiver/metadata.yaml | 28 +- receiver/mongodbreceiver/metrics.go | 45 +-- receiver/mongodbreceiver/scraper.go | 1 - receiver/mongodbreceiver/scraper_test.go | 3 +- 11 files changed, 213 insertions(+), 436 deletions(-) diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 7f8074478a988..9dddb6172ea6c 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -258,23 +258,7 @@ The number of write operations currently being processed. | ---- | ----------- | ---------- | ----------------------- | --------- | | {writes} | Sum | Int | Cumulative | false | -### mongodb.cache.dirty.percent - -The percentage of WiredTiger cache that is dirty. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - -### mongodb.cache.used.percent - -The percentage of WiredTiger cache in use. - -| Unit | Metric Type | Value Type | -| ---- | ----------- | ---------- | -| 1 | Gauge | Double | - -### mongodb.commands_per_sec +### mongodb.commands.rate The number of commands executed per second. @@ -282,7 +266,7 @@ The number of commands executed per second. | ---- | ----------- | ---------- | | {command}/s | Gauge | Double | -### mongodb.deletes_per_sec +### mongodb.deletes.rate The number of deletes executed per second. @@ -290,7 +274,7 @@ The number of deletes executed per second. | ---- | ----------- | ---------- | | {delete}/s | Gauge | Double | -### mongodb.flushes_per_sec +### mongodb.flushes.rate The number of flushes executed per second. @@ -298,7 +282,7 @@ The number of flushes executed per second. | ---- | ----------- | ---------- | | {flush}/s | Gauge | Double | -### mongodb.getmores_per_sec +### mongodb.getmores.rate The number of getmores executed per second. @@ -316,7 +300,7 @@ A value of '1' indicates healthy. A value of '0' indicates unhealthy. | ---- | ----------- | ---------- | | 1 | Gauge | Int | -### mongodb.inserts_per_sec +### mongodb.inserts.rate The number of insertions executed per second. @@ -420,7 +404,7 @@ The number of page faults. | ---- | ----------- | ---------- | ----------------------- | --------- | | {faults} | Sum | Int | Cumulative | true | -### mongodb.queries_per_sec +### mongodb.queries.rate The number of queries executed per second. @@ -428,7 +412,7 @@ The number of queries executed per second. | ---- | ----------- | ---------- | | {query}/s | Gauge | Double | -### mongodb.updates_per_sec +### mongodb.updates.rate The number of updates executed per second. diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 17e9f23be856d..080891042403b 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package mongodbreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index cdbb351702e50..e2d58b9b857c1 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -30,27 +30,25 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { type MetricsConfig struct { MongodbActiveReads MetricConfig `mapstructure:"mongodb.active.reads"` MongodbActiveWrites MetricConfig `mapstructure:"mongodb.active.writes"` - MongodbCacheDirtyPercent MetricConfig `mapstructure:"mongodb.cache.dirty.percent"` MongodbCacheOperations MetricConfig `mapstructure:"mongodb.cache.operations"` - MongodbCacheUsedPercent MetricConfig `mapstructure:"mongodb.cache.used.percent"` MongodbCollectionCount MetricConfig `mapstructure:"mongodb.collection.count"` - MongodbCommandsPerSec MetricConfig `mapstructure:"mongodb.commands_per_sec"` + MongodbCommandsRate MetricConfig `mapstructure:"mongodb.commands.rate"` MongodbConnectionCount MetricConfig `mapstructure:"mongodb.connection.count"` MongodbCursorCount MetricConfig `mapstructure:"mongodb.cursor.count"` MongodbCursorTimeoutCount MetricConfig `mapstructure:"mongodb.cursor.timeout.count"` MongodbDataSize MetricConfig `mapstructure:"mongodb.data.size"` MongodbDatabaseCount MetricConfig `mapstructure:"mongodb.database.count"` - MongodbDeletesPerSec MetricConfig `mapstructure:"mongodb.deletes_per_sec"` + MongodbDeletesRate MetricConfig `mapstructure:"mongodb.deletes.rate"` MongodbDocumentOperationCount MetricConfig `mapstructure:"mongodb.document.operation.count"` MongodbExtentCount MetricConfig `mapstructure:"mongodb.extent.count"` - MongodbFlushesPerSec MetricConfig `mapstructure:"mongodb.flushes_per_sec"` - MongodbGetmoresPerSec MetricConfig `mapstructure:"mongodb.getmores_per_sec"` + MongodbFlushesRate MetricConfig `mapstructure:"mongodb.flushes.rate"` + MongodbGetmoresRate MetricConfig `mapstructure:"mongodb.getmores.rate"` MongodbGlobalLockTime MetricConfig `mapstructure:"mongodb.global_lock.time"` MongodbHealth MetricConfig `mapstructure:"mongodb.health"` MongodbIndexAccessCount MetricConfig `mapstructure:"mongodb.index.access.count"` MongodbIndexCount MetricConfig `mapstructure:"mongodb.index.count"` MongodbIndexSize MetricConfig `mapstructure:"mongodb.index.size"` - MongodbInsertsPerSec MetricConfig `mapstructure:"mongodb.inserts_per_sec"` + MongodbInsertsRate MetricConfig `mapstructure:"mongodb.inserts.rate"` MongodbLockAcquireCount MetricConfig `mapstructure:"mongodb.lock.acquire.count"` MongodbLockAcquireTime MetricConfig `mapstructure:"mongodb.lock.acquire.time"` MongodbLockAcquireWaitCount MetricConfig `mapstructure:"mongodb.lock.acquire.wait_count"` @@ -65,10 +63,10 @@ type MetricsConfig struct { MongodbOperationReplCount MetricConfig `mapstructure:"mongodb.operation.repl.count"` MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` MongodbPageFaults MetricConfig `mapstructure:"mongodb.page_faults"` - MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` + MongodbQueriesRate MetricConfig `mapstructure:"mongodb.queries.rate"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` - MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` + MongodbUpdatesRate MetricConfig `mapstructure:"mongodb.updates.rate"` MongodbUptime MetricConfig `mapstructure:"mongodb.uptime"` MongodbWtcacheBytesRead MetricConfig `mapstructure:"mongodb.wtcache.bytes.read"` } @@ -81,19 +79,13 @@ func DefaultMetricsConfig() MetricsConfig { MongodbActiveWrites: MetricConfig{ Enabled: false, }, - MongodbCacheDirtyPercent: MetricConfig{ - Enabled: false, - }, MongodbCacheOperations: MetricConfig{ Enabled: true, }, - MongodbCacheUsedPercent: MetricConfig{ - Enabled: false, - }, MongodbCollectionCount: MetricConfig{ Enabled: true, }, - MongodbCommandsPerSec: MetricConfig{ + MongodbCommandsRate: MetricConfig{ Enabled: false, }, MongodbConnectionCount: MetricConfig{ @@ -111,7 +103,7 @@ func DefaultMetricsConfig() MetricsConfig { MongodbDatabaseCount: MetricConfig{ Enabled: true, }, - MongodbDeletesPerSec: MetricConfig{ + MongodbDeletesRate: MetricConfig{ Enabled: false, }, MongodbDocumentOperationCount: MetricConfig{ @@ -120,10 +112,10 @@ func DefaultMetricsConfig() MetricsConfig { MongodbExtentCount: MetricConfig{ Enabled: true, }, - MongodbFlushesPerSec: MetricConfig{ + MongodbFlushesRate: MetricConfig{ Enabled: false, }, - MongodbGetmoresPerSec: MetricConfig{ + MongodbGetmoresRate: MetricConfig{ Enabled: false, }, MongodbGlobalLockTime: MetricConfig{ @@ -141,7 +133,7 @@ func DefaultMetricsConfig() MetricsConfig { MongodbIndexSize: MetricConfig{ Enabled: true, }, - MongodbInsertsPerSec: MetricConfig{ + MongodbInsertsRate: MetricConfig{ Enabled: false, }, MongodbLockAcquireCount: MetricConfig{ @@ -186,7 +178,7 @@ func DefaultMetricsConfig() MetricsConfig { MongodbPageFaults: MetricConfig{ Enabled: false, }, - MongodbQueriesPerSec: MetricConfig{ + MongodbQueriesRate: MetricConfig{ Enabled: false, }, MongodbSessionCount: MetricConfig{ @@ -195,7 +187,7 @@ func DefaultMetricsConfig() MetricsConfig { MongodbStorageSize: MetricConfig{ Enabled: true, }, - MongodbUpdatesPerSec: MetricConfig{ + MongodbUpdatesRate: MetricConfig{ Enabled: false, }, MongodbUptime: MetricConfig{ diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 23de07f334a0f..213f4b0f22d6e 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -27,27 +27,25 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: true}, MongodbActiveWrites: MetricConfig{Enabled: true}, - MongodbCacheDirtyPercent: MetricConfig{Enabled: true}, MongodbCacheOperations: MetricConfig{Enabled: true}, - MongodbCacheUsedPercent: MetricConfig{Enabled: true}, MongodbCollectionCount: MetricConfig{Enabled: true}, - MongodbCommandsPerSec: MetricConfig{Enabled: true}, + MongodbCommandsRate: MetricConfig{Enabled: true}, MongodbConnectionCount: MetricConfig{Enabled: true}, MongodbCursorCount: MetricConfig{Enabled: true}, MongodbCursorTimeoutCount: MetricConfig{Enabled: true}, MongodbDataSize: MetricConfig{Enabled: true}, MongodbDatabaseCount: MetricConfig{Enabled: true}, - MongodbDeletesPerSec: MetricConfig{Enabled: true}, + MongodbDeletesRate: MetricConfig{Enabled: true}, MongodbDocumentOperationCount: MetricConfig{Enabled: true}, MongodbExtentCount: MetricConfig{Enabled: true}, - MongodbFlushesPerSec: MetricConfig{Enabled: true}, - MongodbGetmoresPerSec: MetricConfig{Enabled: true}, + MongodbFlushesRate: MetricConfig{Enabled: true}, + MongodbGetmoresRate: MetricConfig{Enabled: true}, MongodbGlobalLockTime: MetricConfig{Enabled: true}, MongodbHealth: MetricConfig{Enabled: true}, MongodbIndexAccessCount: MetricConfig{Enabled: true}, MongodbIndexCount: MetricConfig{Enabled: true}, MongodbIndexSize: MetricConfig{Enabled: true}, - MongodbInsertsPerSec: MetricConfig{Enabled: true}, + MongodbInsertsRate: MetricConfig{Enabled: true}, MongodbLockAcquireCount: MetricConfig{Enabled: true}, MongodbLockAcquireTime: MetricConfig{Enabled: true}, MongodbLockAcquireWaitCount: MetricConfig{Enabled: true}, @@ -62,10 +60,10 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationReplCount: MetricConfig{Enabled: true}, MongodbOperationTime: MetricConfig{Enabled: true}, MongodbPageFaults: MetricConfig{Enabled: true}, - MongodbQueriesPerSec: MetricConfig{Enabled: true}, + MongodbQueriesRate: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, MongodbStorageSize: MetricConfig{Enabled: true}, - MongodbUpdatesPerSec: MetricConfig{Enabled: true}, + MongodbUpdatesRate: MetricConfig{Enabled: true}, MongodbUptime: MetricConfig{Enabled: true}, MongodbWtcacheBytesRead: MetricConfig{Enabled: true}, }, @@ -82,27 +80,25 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: false}, MongodbActiveWrites: MetricConfig{Enabled: false}, - MongodbCacheDirtyPercent: MetricConfig{Enabled: false}, MongodbCacheOperations: MetricConfig{Enabled: false}, - MongodbCacheUsedPercent: MetricConfig{Enabled: false}, MongodbCollectionCount: MetricConfig{Enabled: false}, - MongodbCommandsPerSec: MetricConfig{Enabled: false}, + MongodbCommandsRate: MetricConfig{Enabled: false}, MongodbConnectionCount: MetricConfig{Enabled: false}, MongodbCursorCount: MetricConfig{Enabled: false}, MongodbCursorTimeoutCount: MetricConfig{Enabled: false}, MongodbDataSize: MetricConfig{Enabled: false}, MongodbDatabaseCount: MetricConfig{Enabled: false}, - MongodbDeletesPerSec: MetricConfig{Enabled: false}, + MongodbDeletesRate: MetricConfig{Enabled: false}, MongodbDocumentOperationCount: MetricConfig{Enabled: false}, MongodbExtentCount: MetricConfig{Enabled: false}, - MongodbFlushesPerSec: MetricConfig{Enabled: false}, - MongodbGetmoresPerSec: MetricConfig{Enabled: false}, + MongodbFlushesRate: MetricConfig{Enabled: false}, + MongodbGetmoresRate: MetricConfig{Enabled: false}, MongodbGlobalLockTime: MetricConfig{Enabled: false}, MongodbHealth: MetricConfig{Enabled: false}, MongodbIndexAccessCount: MetricConfig{Enabled: false}, MongodbIndexCount: MetricConfig{Enabled: false}, MongodbIndexSize: MetricConfig{Enabled: false}, - MongodbInsertsPerSec: MetricConfig{Enabled: false}, + MongodbInsertsRate: MetricConfig{Enabled: false}, MongodbLockAcquireCount: MetricConfig{Enabled: false}, MongodbLockAcquireTime: MetricConfig{Enabled: false}, MongodbLockAcquireWaitCount: MetricConfig{Enabled: false}, @@ -117,10 +113,10 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationReplCount: MetricConfig{Enabled: false}, MongodbOperationTime: MetricConfig{Enabled: false}, MongodbPageFaults: MetricConfig{Enabled: false}, - MongodbQueriesPerSec: MetricConfig{Enabled: false}, + MongodbQueriesRate: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, MongodbStorageSize: MetricConfig{Enabled: false}, - MongodbUpdatesPerSec: MetricConfig{Enabled: false}, + MongodbUpdatesRate: MetricConfig{Enabled: false}, MongodbUptime: MetricConfig{Enabled: false}, MongodbWtcacheBytesRead: MetricConfig{Enabled: false}, }, @@ -135,8 +131,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } @@ -180,8 +177,9 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 227209f45b8dd..48ac8f39ee49d 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the value connection_type attribute. +// AttributeConnectionType specifies the a value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the value lock_mode attribute. +// AttributeLockMode specifies the a value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the value lock_type attribute. +// AttributeLockType specifies the a value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the value memory_type attribute. +// AttributeMemoryType specifies the a value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the value operation attribute. +// AttributeOperation specifies the a value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the value operation_latency attribute. +// AttributeOperationLatency specifies the a value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the value type attribute. +// AttributeType specifies the a value type attribute. type AttributeType int const ( @@ -352,55 +352,6 @@ func newMetricMongodbActiveWrites(cfg MetricConfig) metricMongodbActiveWrites { return m } -type metricMongodbCacheDirtyPercent struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.cache.dirty.percent metric with initial data. -func (m *metricMongodbCacheDirtyPercent) init() { - m.data.SetName("mongodb.cache.dirty.percent") - m.data.SetDescription("The percentage of WiredTiger cache that is dirty.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbCacheDirtyPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbCacheDirtyPercent) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbCacheDirtyPercent) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbCacheDirtyPercent(cfg MetricConfig) metricMongodbCacheDirtyPercent { - m := metricMongodbCacheDirtyPercent{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - type metricMongodbCacheOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -454,55 +405,6 @@ func newMetricMongodbCacheOperations(cfg MetricConfig) metricMongodbCacheOperati return m } -type metricMongodbCacheUsedPercent struct { - data pmetric.Metric // data buffer for generated metric. - config MetricConfig // metric config provided by user. - capacity int // max observed number of data points added to the metric. -} - -// init fills mongodb.cache.used.percent metric with initial data. -func (m *metricMongodbCacheUsedPercent) init() { - m.data.SetName("mongodb.cache.used.percent") - m.data.SetDescription("The percentage of WiredTiger cache in use.") - m.data.SetUnit("1") - m.data.SetEmptyGauge() -} - -func (m *metricMongodbCacheUsedPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { - if !m.config.Enabled { - return - } - dp := m.data.Gauge().DataPoints().AppendEmpty() - dp.SetStartTimestamp(start) - dp.SetTimestamp(ts) - dp.SetDoubleValue(val) -} - -// updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbCacheUsedPercent) updateCapacity() { - if m.data.Gauge().DataPoints().Len() > m.capacity { - m.capacity = m.data.Gauge().DataPoints().Len() - } -} - -// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbCacheUsedPercent) emit(metrics pmetric.MetricSlice) { - if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { - m.updateCapacity() - m.data.MoveTo(metrics.AppendEmpty()) - m.init() - } -} - -func newMetricMongodbCacheUsedPercent(cfg MetricConfig) metricMongodbCacheUsedPercent { - m := metricMongodbCacheUsedPercent{config: cfg} - if cfg.Enabled { - m.data = pmetric.NewMetric() - m.init() - } - return m -} - type metricMongodbCollectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -554,21 +456,21 @@ func newMetricMongodbCollectionCount(cfg MetricConfig) metricMongodbCollectionCo return m } -type metricMongodbCommandsPerSec struct { +type metricMongodbCommandsRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.commands_per_sec metric with initial data. -func (m *metricMongodbCommandsPerSec) init() { - m.data.SetName("mongodb.commands_per_sec") +// init fills mongodb.commands.rate metric with initial data. +func (m *metricMongodbCommandsRate) init() { + m.data.SetName("mongodb.commands.rate") m.data.SetDescription("The number of commands executed per second.") m.data.SetUnit("{command}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbCommandsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbCommandsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -579,14 +481,14 @@ func (m *metricMongodbCommandsPerSec) recordDataPoint(start pcommon.Timestamp, t } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbCommandsPerSec) updateCapacity() { +func (m *metricMongodbCommandsRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbCommandsPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbCommandsRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -594,8 +496,8 @@ func (m *metricMongodbCommandsPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbCommandsPerSec(cfg MetricConfig) metricMongodbCommandsPerSec { - m := metricMongodbCommandsPerSec{config: cfg} +func newMetricMongodbCommandsRate(cfg MetricConfig) metricMongodbCommandsRate { + m := metricMongodbCommandsRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -860,21 +762,21 @@ func newMetricMongodbDatabaseCount(cfg MetricConfig) metricMongodbDatabaseCount return m } -type metricMongodbDeletesPerSec struct { +type metricMongodbDeletesRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.deletes_per_sec metric with initial data. -func (m *metricMongodbDeletesPerSec) init() { - m.data.SetName("mongodb.deletes_per_sec") +// init fills mongodb.deletes.rate metric with initial data. +func (m *metricMongodbDeletesRate) init() { + m.data.SetName("mongodb.deletes.rate") m.data.SetDescription("The number of deletes executed per second.") m.data.SetUnit("{delete}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbDeletesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbDeletesRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -885,14 +787,14 @@ func (m *metricMongodbDeletesPerSec) recordDataPoint(start pcommon.Timestamp, ts } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbDeletesPerSec) updateCapacity() { +func (m *metricMongodbDeletesRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbDeletesPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbDeletesRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -900,8 +802,8 @@ func (m *metricMongodbDeletesPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbDeletesPerSec(cfg MetricConfig) metricMongodbDeletesPerSec { - m := metricMongodbDeletesPerSec{config: cfg} +func newMetricMongodbDeletesRate(cfg MetricConfig) metricMongodbDeletesRate { + m := metricMongodbDeletesRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1013,21 +915,21 @@ func newMetricMongodbExtentCount(cfg MetricConfig) metricMongodbExtentCount { return m } -type metricMongodbFlushesPerSec struct { +type metricMongodbFlushesRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.flushes_per_sec metric with initial data. -func (m *metricMongodbFlushesPerSec) init() { - m.data.SetName("mongodb.flushes_per_sec") +// init fills mongodb.flushes.rate metric with initial data. +func (m *metricMongodbFlushesRate) init() { + m.data.SetName("mongodb.flushes.rate") m.data.SetDescription("The number of flushes executed per second.") m.data.SetUnit("{flush}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbFlushesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbFlushesRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -1038,14 +940,14 @@ func (m *metricMongodbFlushesPerSec) recordDataPoint(start pcommon.Timestamp, ts } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbFlushesPerSec) updateCapacity() { +func (m *metricMongodbFlushesRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbFlushesPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbFlushesRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1053,8 +955,8 @@ func (m *metricMongodbFlushesPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbFlushesPerSec(cfg MetricConfig) metricMongodbFlushesPerSec { - m := metricMongodbFlushesPerSec{config: cfg} +func newMetricMongodbFlushesRate(cfg MetricConfig) metricMongodbFlushesRate { + m := metricMongodbFlushesRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1062,21 +964,21 @@ func newMetricMongodbFlushesPerSec(cfg MetricConfig) metricMongodbFlushesPerSec return m } -type metricMongodbGetmoresPerSec struct { +type metricMongodbGetmoresRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.getmores_per_sec metric with initial data. -func (m *metricMongodbGetmoresPerSec) init() { - m.data.SetName("mongodb.getmores_per_sec") +// init fills mongodb.getmores.rate metric with initial data. +func (m *metricMongodbGetmoresRate) init() { + m.data.SetName("mongodb.getmores.rate") m.data.SetDescription("The number of getmores executed per second.") m.data.SetUnit("{getmore}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbGetmoresPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbGetmoresRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -1087,14 +989,14 @@ func (m *metricMongodbGetmoresPerSec) recordDataPoint(start pcommon.Timestamp, t } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbGetmoresPerSec) updateCapacity() { +func (m *metricMongodbGetmoresRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbGetmoresPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbGetmoresRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1102,8 +1004,8 @@ func (m *metricMongodbGetmoresPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbGetmoresPerSec(cfg MetricConfig) metricMongodbGetmoresPerSec { - m := metricMongodbGetmoresPerSec{config: cfg} +func newMetricMongodbGetmoresRate(cfg MetricConfig) metricMongodbGetmoresRate { + m := metricMongodbGetmoresRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -1366,21 +1268,21 @@ func newMetricMongodbIndexSize(cfg MetricConfig) metricMongodbIndexSize { return m } -type metricMongodbInsertsPerSec struct { +type metricMongodbInsertsRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.inserts_per_sec metric with initial data. -func (m *metricMongodbInsertsPerSec) init() { - m.data.SetName("mongodb.inserts_per_sec") +// init fills mongodb.inserts.rate metric with initial data. +func (m *metricMongodbInsertsRate) init() { + m.data.SetName("mongodb.inserts.rate") m.data.SetDescription("The number of insertions executed per second.") m.data.SetUnit("{insert}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbInsertsPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbInsertsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -1391,14 +1293,14 @@ func (m *metricMongodbInsertsPerSec) recordDataPoint(start pcommon.Timestamp, ts } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbInsertsPerSec) updateCapacity() { +func (m *metricMongodbInsertsRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbInsertsPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbInsertsRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1406,8 +1308,8 @@ func (m *metricMongodbInsertsPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbInsertsPerSec(cfg MetricConfig) metricMongodbInsertsPerSec { - m := metricMongodbInsertsPerSec{config: cfg} +func newMetricMongodbInsertsRate(cfg MetricConfig) metricMongodbInsertsRate { + m := metricMongodbInsertsRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -2149,21 +2051,21 @@ func newMetricMongodbPageFaults(cfg MetricConfig) metricMongodbPageFaults { return m } -type metricMongodbQueriesPerSec struct { +type metricMongodbQueriesRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.queries_per_sec metric with initial data. -func (m *metricMongodbQueriesPerSec) init() { - m.data.SetName("mongodb.queries_per_sec") +// init fills mongodb.queries.rate metric with initial data. +func (m *metricMongodbQueriesRate) init() { + m.data.SetName("mongodb.queries.rate") m.data.SetDescription("The number of queries executed per second.") m.data.SetUnit("{query}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbQueriesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbQueriesRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -2174,14 +2076,14 @@ func (m *metricMongodbQueriesPerSec) recordDataPoint(start pcommon.Timestamp, ts } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbQueriesPerSec) updateCapacity() { +func (m *metricMongodbQueriesRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbQueriesPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbQueriesRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2189,8 +2091,8 @@ func (m *metricMongodbQueriesPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbQueriesPerSec(cfg MetricConfig) metricMongodbQueriesPerSec { - m := metricMongodbQueriesPerSec{config: cfg} +func newMetricMongodbQueriesRate(cfg MetricConfig) metricMongodbQueriesRate { + m := metricMongodbQueriesRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -2300,21 +2202,21 @@ func newMetricMongodbStorageSize(cfg MetricConfig) metricMongodbStorageSize { return m } -type metricMongodbUpdatesPerSec struct { +type metricMongodbUpdatesRate struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills mongodb.updates_per_sec metric with initial data. -func (m *metricMongodbUpdatesPerSec) init() { - m.data.SetName("mongodb.updates_per_sec") +// init fills mongodb.updates.rate metric with initial data. +func (m *metricMongodbUpdatesRate) init() { + m.data.SetName("mongodb.updates.rate") m.data.SetDescription("The number of updates executed per second.") m.data.SetUnit("{update}/s") m.data.SetEmptyGauge() } -func (m *metricMongodbUpdatesPerSec) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { +func (m *metricMongodbUpdatesRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.config.Enabled { return } @@ -2325,14 +2227,14 @@ func (m *metricMongodbUpdatesPerSec) recordDataPoint(start pcommon.Timestamp, ts } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricMongodbUpdatesPerSec) updateCapacity() { +func (m *metricMongodbUpdatesRate) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbUpdatesPerSec) emit(metrics pmetric.MetricSlice) { +func (m *metricMongodbUpdatesRate) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2340,8 +2242,8 @@ func (m *metricMongodbUpdatesPerSec) emit(metrics pmetric.MetricSlice) { } } -func newMetricMongodbUpdatesPerSec(cfg MetricConfig) metricMongodbUpdatesPerSec { - m := metricMongodbUpdatesPerSec{config: cfg} +func newMetricMongodbUpdatesRate(cfg MetricConfig) metricMongodbUpdatesRate { + m := metricMongodbUpdatesRate{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -2463,27 +2365,25 @@ type MetricsBuilder struct { resourceAttributeExcludeFilter map[string]filter.Filter metricMongodbActiveReads metricMongodbActiveReads metricMongodbActiveWrites metricMongodbActiveWrites - metricMongodbCacheDirtyPercent metricMongodbCacheDirtyPercent metricMongodbCacheOperations metricMongodbCacheOperations - metricMongodbCacheUsedPercent metricMongodbCacheUsedPercent metricMongodbCollectionCount metricMongodbCollectionCount - metricMongodbCommandsPerSec metricMongodbCommandsPerSec + metricMongodbCommandsRate metricMongodbCommandsRate metricMongodbConnectionCount metricMongodbConnectionCount metricMongodbCursorCount metricMongodbCursorCount metricMongodbCursorTimeoutCount metricMongodbCursorTimeoutCount metricMongodbDataSize metricMongodbDataSize metricMongodbDatabaseCount metricMongodbDatabaseCount - metricMongodbDeletesPerSec metricMongodbDeletesPerSec + metricMongodbDeletesRate metricMongodbDeletesRate metricMongodbDocumentOperationCount metricMongodbDocumentOperationCount metricMongodbExtentCount metricMongodbExtentCount - metricMongodbFlushesPerSec metricMongodbFlushesPerSec - metricMongodbGetmoresPerSec metricMongodbGetmoresPerSec + metricMongodbFlushesRate metricMongodbFlushesRate + metricMongodbGetmoresRate metricMongodbGetmoresRate metricMongodbGlobalLockTime metricMongodbGlobalLockTime metricMongodbHealth metricMongodbHealth metricMongodbIndexAccessCount metricMongodbIndexAccessCount metricMongodbIndexCount metricMongodbIndexCount metricMongodbIndexSize metricMongodbIndexSize - metricMongodbInsertsPerSec metricMongodbInsertsPerSec + metricMongodbInsertsRate metricMongodbInsertsRate metricMongodbLockAcquireCount metricMongodbLockAcquireCount metricMongodbLockAcquireTime metricMongodbLockAcquireTime metricMongodbLockAcquireWaitCount metricMongodbLockAcquireWaitCount @@ -2498,10 +2398,10 @@ type MetricsBuilder struct { metricMongodbOperationReplCount metricMongodbOperationReplCount metricMongodbOperationTime metricMongodbOperationTime metricMongodbPageFaults metricMongodbPageFaults - metricMongodbQueriesPerSec metricMongodbQueriesPerSec + metricMongodbQueriesRate metricMongodbQueriesRate metricMongodbSessionCount metricMongodbSessionCount metricMongodbStorageSize metricMongodbStorageSize - metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec + metricMongodbUpdatesRate metricMongodbUpdatesRate metricMongodbUptime metricMongodbUptime metricMongodbWtcacheBytesRead metricMongodbWtcacheBytesRead } @@ -2523,6 +2423,7 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { mb.startTime = startTime }) } + func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, @@ -2531,27 +2432,25 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt buildInfo: settings.BuildInfo, metricMongodbActiveReads: newMetricMongodbActiveReads(mbc.Metrics.MongodbActiveReads), metricMongodbActiveWrites: newMetricMongodbActiveWrites(mbc.Metrics.MongodbActiveWrites), - metricMongodbCacheDirtyPercent: newMetricMongodbCacheDirtyPercent(mbc.Metrics.MongodbCacheDirtyPercent), metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), - metricMongodbCacheUsedPercent: newMetricMongodbCacheUsedPercent(mbc.Metrics.MongodbCacheUsedPercent), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), - metricMongodbCommandsPerSec: newMetricMongodbCommandsPerSec(mbc.Metrics.MongodbCommandsPerSec), + metricMongodbCommandsRate: newMetricMongodbCommandsRate(mbc.Metrics.MongodbCommandsRate), metricMongodbConnectionCount: newMetricMongodbConnectionCount(mbc.Metrics.MongodbConnectionCount), metricMongodbCursorCount: newMetricMongodbCursorCount(mbc.Metrics.MongodbCursorCount), metricMongodbCursorTimeoutCount: newMetricMongodbCursorTimeoutCount(mbc.Metrics.MongodbCursorTimeoutCount), metricMongodbDataSize: newMetricMongodbDataSize(mbc.Metrics.MongodbDataSize), metricMongodbDatabaseCount: newMetricMongodbDatabaseCount(mbc.Metrics.MongodbDatabaseCount), - metricMongodbDeletesPerSec: newMetricMongodbDeletesPerSec(mbc.Metrics.MongodbDeletesPerSec), + metricMongodbDeletesRate: newMetricMongodbDeletesRate(mbc.Metrics.MongodbDeletesRate), metricMongodbDocumentOperationCount: newMetricMongodbDocumentOperationCount(mbc.Metrics.MongodbDocumentOperationCount), metricMongodbExtentCount: newMetricMongodbExtentCount(mbc.Metrics.MongodbExtentCount), - metricMongodbFlushesPerSec: newMetricMongodbFlushesPerSec(mbc.Metrics.MongodbFlushesPerSec), - metricMongodbGetmoresPerSec: newMetricMongodbGetmoresPerSec(mbc.Metrics.MongodbGetmoresPerSec), + metricMongodbFlushesRate: newMetricMongodbFlushesRate(mbc.Metrics.MongodbFlushesRate), + metricMongodbGetmoresRate: newMetricMongodbGetmoresRate(mbc.Metrics.MongodbGetmoresRate), metricMongodbGlobalLockTime: newMetricMongodbGlobalLockTime(mbc.Metrics.MongodbGlobalLockTime), metricMongodbHealth: newMetricMongodbHealth(mbc.Metrics.MongodbHealth), metricMongodbIndexAccessCount: newMetricMongodbIndexAccessCount(mbc.Metrics.MongodbIndexAccessCount), metricMongodbIndexCount: newMetricMongodbIndexCount(mbc.Metrics.MongodbIndexCount), metricMongodbIndexSize: newMetricMongodbIndexSize(mbc.Metrics.MongodbIndexSize), - metricMongodbInsertsPerSec: newMetricMongodbInsertsPerSec(mbc.Metrics.MongodbInsertsPerSec), + metricMongodbInsertsRate: newMetricMongodbInsertsRate(mbc.Metrics.MongodbInsertsRate), metricMongodbLockAcquireCount: newMetricMongodbLockAcquireCount(mbc.Metrics.MongodbLockAcquireCount), metricMongodbLockAcquireTime: newMetricMongodbLockAcquireTime(mbc.Metrics.MongodbLockAcquireTime), metricMongodbLockAcquireWaitCount: newMetricMongodbLockAcquireWaitCount(mbc.Metrics.MongodbLockAcquireWaitCount), @@ -2566,10 +2465,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationReplCount: newMetricMongodbOperationReplCount(mbc.Metrics.MongodbOperationReplCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), metricMongodbPageFaults: newMetricMongodbPageFaults(mbc.Metrics.MongodbPageFaults), - metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), + metricMongodbQueriesRate: newMetricMongodbQueriesRate(mbc.Metrics.MongodbQueriesRate), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), - metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), + metricMongodbUpdatesRate: newMetricMongodbUpdatesRate(mbc.Metrics.MongodbUpdatesRate), metricMongodbUptime: newMetricMongodbUptime(mbc.Metrics.MongodbUptime), metricMongodbWtcacheBytesRead: newMetricMongodbWtcacheBytesRead(mbc.Metrics.MongodbWtcacheBytesRead), resourceAttributeIncludeFilter: make(map[string]filter.Filter), @@ -2664,27 +2563,25 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbActiveReads.emit(ils.Metrics()) mb.metricMongodbActiveWrites.emit(ils.Metrics()) - mb.metricMongodbCacheDirtyPercent.emit(ils.Metrics()) mb.metricMongodbCacheOperations.emit(ils.Metrics()) - mb.metricMongodbCacheUsedPercent.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) - mb.metricMongodbCommandsPerSec.emit(ils.Metrics()) + mb.metricMongodbCommandsRate.emit(ils.Metrics()) mb.metricMongodbConnectionCount.emit(ils.Metrics()) mb.metricMongodbCursorCount.emit(ils.Metrics()) mb.metricMongodbCursorTimeoutCount.emit(ils.Metrics()) mb.metricMongodbDataSize.emit(ils.Metrics()) mb.metricMongodbDatabaseCount.emit(ils.Metrics()) - mb.metricMongodbDeletesPerSec.emit(ils.Metrics()) + mb.metricMongodbDeletesRate.emit(ils.Metrics()) mb.metricMongodbDocumentOperationCount.emit(ils.Metrics()) mb.metricMongodbExtentCount.emit(ils.Metrics()) - mb.metricMongodbFlushesPerSec.emit(ils.Metrics()) - mb.metricMongodbGetmoresPerSec.emit(ils.Metrics()) + mb.metricMongodbFlushesRate.emit(ils.Metrics()) + mb.metricMongodbGetmoresRate.emit(ils.Metrics()) mb.metricMongodbGlobalLockTime.emit(ils.Metrics()) mb.metricMongodbHealth.emit(ils.Metrics()) mb.metricMongodbIndexAccessCount.emit(ils.Metrics()) mb.metricMongodbIndexCount.emit(ils.Metrics()) mb.metricMongodbIndexSize.emit(ils.Metrics()) - mb.metricMongodbInsertsPerSec.emit(ils.Metrics()) + mb.metricMongodbInsertsRate.emit(ils.Metrics()) mb.metricMongodbLockAcquireCount.emit(ils.Metrics()) mb.metricMongodbLockAcquireTime.emit(ils.Metrics()) mb.metricMongodbLockAcquireWaitCount.emit(ils.Metrics()) @@ -2699,10 +2596,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbOperationReplCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) mb.metricMongodbPageFaults.emit(ils.Metrics()) - mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) + mb.metricMongodbQueriesRate.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) mb.metricMongodbStorageSize.emit(ils.Metrics()) - mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) + mb.metricMongodbUpdatesRate.emit(ils.Metrics()) mb.metricMongodbUptime.emit(ils.Metrics()) mb.metricMongodbWtcacheBytesRead.emit(ils.Metrics()) @@ -2746,29 +2643,19 @@ func (mb *MetricsBuilder) RecordMongodbActiveWritesDataPoint(ts pcommon.Timestam mb.metricMongodbActiveWrites.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbCacheDirtyPercentDataPoint adds a data point to mongodb.cache.dirty.percent metric. -func (mb *MetricsBuilder) RecordMongodbCacheDirtyPercentDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbCacheDirtyPercent.recordDataPoint(mb.startTime, ts, val) -} - // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String()) } -// RecordMongodbCacheUsedPercentDataPoint adds a data point to mongodb.cache.used.percent metric. -func (mb *MetricsBuilder) RecordMongodbCacheUsedPercentDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbCacheUsedPercent.recordDataPoint(mb.startTime, ts, val) -} - // RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric. func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbCommandsPerSecDataPoint adds a data point to mongodb.commands_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbCommandsPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbCommandsPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbCommandsRateDataPoint adds a data point to mongodb.commands.rate metric. +func (mb *MetricsBuilder) RecordMongodbCommandsRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCommandsRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbConnectionCountDataPoint adds a data point to mongodb.connection.count metric. @@ -2796,9 +2683,9 @@ func (mb *MetricsBuilder) RecordMongodbDatabaseCountDataPoint(ts pcommon.Timesta mb.metricMongodbDatabaseCount.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbDeletesPerSecDataPoint adds a data point to mongodb.deletes_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbDeletesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbDeletesPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbDeletesRateDataPoint adds a data point to mongodb.deletes.rate metric. +func (mb *MetricsBuilder) RecordMongodbDeletesRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbDeletesRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbDocumentOperationCountDataPoint adds a data point to mongodb.document.operation.count metric. @@ -2811,14 +2698,14 @@ func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pcommon.Timestamp mb.metricMongodbExtentCount.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbFlushesPerSecDataPoint adds a data point to mongodb.flushes_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbFlushesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbFlushesPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbFlushesRateDataPoint adds a data point to mongodb.flushes.rate metric. +func (mb *MetricsBuilder) RecordMongodbFlushesRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbFlushesRate.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbGetmoresPerSecDataPoint adds a data point to mongodb.getmores_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbGetmoresPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbGetmoresPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbGetmoresRateDataPoint adds a data point to mongodb.getmores.rate metric. +func (mb *MetricsBuilder) RecordMongodbGetmoresRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbGetmoresRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbGlobalLockTimeDataPoint adds a data point to mongodb.global_lock.time metric. @@ -2846,9 +2733,9 @@ func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pcommon.Timestamp, mb.metricMongodbIndexSize.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbInsertsPerSecDataPoint adds a data point to mongodb.inserts_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbInsertsPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbInsertsPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbInsertsRateDataPoint adds a data point to mongodb.inserts.rate metric. +func (mb *MetricsBuilder) RecordMongodbInsertsRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbInsertsRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbLockAcquireCountDataPoint adds a data point to mongodb.lock.acquire.count metric. @@ -2921,9 +2808,9 @@ func (mb *MetricsBuilder) RecordMongodbPageFaultsDataPoint(ts pcommon.Timestamp, mb.metricMongodbPageFaults.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbQueriesPerSecDataPoint adds a data point to mongodb.queries_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbQueriesRateDataPoint adds a data point to mongodb.queries.rate metric. +func (mb *MetricsBuilder) RecordMongodbQueriesRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbQueriesRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbSessionCountDataPoint adds a data point to mongodb.session.count metric. @@ -2936,9 +2823,9 @@ func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPoint(ts pcommon.Timestamp mb.metricMongodbStorageSize.recordDataPoint(mb.startTime, ts, val) } -// RecordMongodbUpdatesPerSecDataPoint adds a data point to mongodb.updates_per_sec metric. -func (mb *MetricsBuilder) RecordMongodbUpdatesPerSecDataPoint(ts pcommon.Timestamp, val float64) { - mb.metricMongodbUpdatesPerSec.recordDataPoint(mb.startTime, ts, val) +// RecordMongodbUpdatesRateDataPoint adds a data point to mongodb.updates.rate metric. +func (mb *MetricsBuilder) RecordMongodbUpdatesRateDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbUpdatesRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbUptimeDataPoint adds a data point to mongodb.uptime metric. diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index 123f5522ee487..f9a2b39f6de0b 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -74,22 +74,16 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbActiveWritesDataPoint(ts, 1) - allMetricsCount++ - mb.RecordMongodbCacheDirtyPercentDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheOperationsDataPoint(ts, 1, AttributeTypeHit) - allMetricsCount++ - mb.RecordMongodbCacheUsedPercentDataPoint(ts, 1) - defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCollectionCountDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbCommandsPerSecDataPoint(ts, 1) + mb.RecordMongodbCommandsRateDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -112,7 +106,7 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordMongodbDatabaseCountDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbDeletesPerSecDataPoint(ts, 1) + mb.RecordMongodbDeletesRateDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -123,10 +117,10 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordMongodbExtentCountDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbFlushesPerSecDataPoint(ts, 1) + mb.RecordMongodbFlushesRateDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbGetmoresPerSecDataPoint(ts, 1) + mb.RecordMongodbGetmoresRateDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -148,7 +142,7 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordMongodbIndexSizeDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbInsertsPerSecDataPoint(ts, 1) + mb.RecordMongodbInsertsRateDataPoint(ts, 1) allMetricsCount++ mb.RecordMongodbLockAcquireCountDataPoint(ts, 1, AttributeLockTypeParallelBatchWriteMode, AttributeLockModeShared) @@ -200,7 +194,7 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordMongodbPageFaultsDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) + mb.RecordMongodbQueriesRateDataPoint(ts, 1) defaultMetricsCount++ allMetricsCount++ @@ -211,7 +205,7 @@ func TestMetricsBuilder(t *testing.T) { mb.RecordMongodbStorageSizeDataPoint(ts, 1) allMetricsCount++ - mb.RecordMongodbUpdatesPerSecDataPoint(ts, 1) + mb.RecordMongodbUpdatesRateDataPoint(ts, 1) allMetricsCount++ mb.RecordMongodbUptimeDataPoint(ts, 1) @@ -273,18 +267,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.cache.dirty.percent": - assert.False(t, validatedMetrics["mongodb.cache.dirty.percent"], "Found a duplicate in the metrics slice: mongodb.cache.dirty.percent") - validatedMetrics["mongodb.cache.dirty.percent"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The percentage of WiredTiger cache that is dirty.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.cache.operations": assert.False(t, validatedMetrics["mongodb.cache.operations"], "Found a duplicate in the metrics slice: mongodb.cache.operations") validatedMetrics["mongodb.cache.operations"] = true @@ -302,18 +284,6 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("type") assert.True(t, ok) assert.EqualValues(t, "hit", attrVal.Str()) - case "mongodb.cache.used.percent": - assert.False(t, validatedMetrics["mongodb.cache.used.percent"], "Found a duplicate in the metrics slice: mongodb.cache.used.percent") - validatedMetrics["mongodb.cache.used.percent"] = true - assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) - assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "The percentage of WiredTiger cache in use.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) - dp := ms.At(i).Gauge().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.collection.count": assert.False(t, validatedMetrics["mongodb.collection.count"], "Found a duplicate in the metrics slice: mongodb.collection.count") validatedMetrics["mongodb.collection.count"] = true @@ -328,9 +298,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.commands_per_sec": - assert.False(t, validatedMetrics["mongodb.commands_per_sec"], "Found a duplicate in the metrics slice: mongodb.commands_per_sec") - validatedMetrics["mongodb.commands_per_sec"] = true + case "mongodb.commands.rate": + assert.False(t, validatedMetrics["mongodb.commands.rate"], "Found a duplicate in the metrics slice: mongodb.commands.rate") + validatedMetrics["mongodb.commands.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of commands executed per second.", ms.At(i).Description()) @@ -413,9 +383,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.deletes_per_sec": - assert.False(t, validatedMetrics["mongodb.deletes_per_sec"], "Found a duplicate in the metrics slice: mongodb.deletes_per_sec") - validatedMetrics["mongodb.deletes_per_sec"] = true + case "mongodb.deletes.rate": + assert.False(t, validatedMetrics["mongodb.deletes.rate"], "Found a duplicate in the metrics slice: mongodb.deletes.rate") + validatedMetrics["mongodb.deletes.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of deletes executed per second.", ms.At(i).Description()) @@ -456,9 +426,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.flushes_per_sec": - assert.False(t, validatedMetrics["mongodb.flushes_per_sec"], "Found a duplicate in the metrics slice: mongodb.flushes_per_sec") - validatedMetrics["mongodb.flushes_per_sec"] = true + case "mongodb.flushes.rate": + assert.False(t, validatedMetrics["mongodb.flushes.rate"], "Found a duplicate in the metrics slice: mongodb.flushes.rate") + validatedMetrics["mongodb.flushes.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of flushes executed per second.", ms.At(i).Description()) @@ -468,9 +438,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) - case "mongodb.getmores_per_sec": - assert.False(t, validatedMetrics["mongodb.getmores_per_sec"], "Found a duplicate in the metrics slice: mongodb.getmores_per_sec") - validatedMetrics["mongodb.getmores_per_sec"] = true + case "mongodb.getmores.rate": + assert.False(t, validatedMetrics["mongodb.getmores.rate"], "Found a duplicate in the metrics slice: mongodb.getmores.rate") + validatedMetrics["mongodb.getmores.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of getmores executed per second.", ms.At(i).Description()) @@ -551,9 +521,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.inserts_per_sec": - assert.False(t, validatedMetrics["mongodb.inserts_per_sec"], "Found a duplicate in the metrics slice: mongodb.inserts_per_sec") - validatedMetrics["mongodb.inserts_per_sec"] = true + case "mongodb.inserts.rate": + assert.False(t, validatedMetrics["mongodb.inserts.rate"], "Found a duplicate in the metrics slice: mongodb.inserts.rate") + validatedMetrics["mongodb.inserts.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of insertions executed per second.", ms.At(i).Description()) @@ -796,9 +766,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.queries_per_sec": - assert.False(t, validatedMetrics["mongodb.queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.queries_per_sec") - validatedMetrics["mongodb.queries_per_sec"] = true + case "mongodb.queries.rate": + assert.False(t, validatedMetrics["mongodb.queries.rate"], "Found a duplicate in the metrics slice: mongodb.queries.rate") + validatedMetrics["mongodb.queries.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of queries executed per second.", ms.At(i).Description()) @@ -836,9 +806,9 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) - case "mongodb.updates_per_sec": - assert.False(t, validatedMetrics["mongodb.updates_per_sec"], "Found a duplicate in the metrics slice: mongodb.updates_per_sec") - validatedMetrics["mongodb.updates_per_sec"] = true + case "mongodb.updates.rate": + assert.False(t, validatedMetrics["mongodb.updates.rate"], "Found a duplicate in the metrics slice: mongodb.updates.rate") + validatedMetrics["mongodb.updates.rate"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of updates executed per second.", ms.At(i).Description()) diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index ab1e61d82f257..633c89e9ec22c 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -5,15 +5,11 @@ all_set: enabled: true mongodb.active.writes: enabled: true - mongodb.cache.dirty.percent: - enabled: true mongodb.cache.operations: enabled: true - mongodb.cache.used.percent: - enabled: true mongodb.collection.count: enabled: true - mongodb.commands_per_sec: + mongodb.commands.rate: enabled: true mongodb.connection.count: enabled: true @@ -25,15 +21,15 @@ all_set: enabled: true mongodb.database.count: enabled: true - mongodb.deletes_per_sec: + mongodb.deletes.rate: enabled: true mongodb.document.operation.count: enabled: true mongodb.extent.count: enabled: true - mongodb.flushes_per_sec: + mongodb.flushes.rate: enabled: true - mongodb.getmores_per_sec: + mongodb.getmores.rate: enabled: true mongodb.global_lock.time: enabled: true @@ -45,7 +41,7 @@ all_set: enabled: true mongodb.index.size: enabled: true - mongodb.inserts_per_sec: + mongodb.inserts.rate: enabled: true mongodb.lock.acquire.count: enabled: true @@ -75,13 +71,13 @@ all_set: enabled: true mongodb.page_faults: enabled: true - mongodb.queries_per_sec: + mongodb.queries.rate: enabled: true mongodb.session.count: enabled: true mongodb.storage.size: enabled: true - mongodb.updates_per_sec: + mongodb.updates.rate: enabled: true mongodb.uptime: enabled: true @@ -100,15 +96,11 @@ none_set: enabled: false mongodb.active.writes: enabled: false - mongodb.cache.dirty.percent: - enabled: false mongodb.cache.operations: enabled: false - mongodb.cache.used.percent: - enabled: false mongodb.collection.count: enabled: false - mongodb.commands_per_sec: + mongodb.commands.rate: enabled: false mongodb.connection.count: enabled: false @@ -120,15 +112,15 @@ none_set: enabled: false mongodb.database.count: enabled: false - mongodb.deletes_per_sec: + mongodb.deletes.rate: enabled: false mongodb.document.operation.count: enabled: false mongodb.extent.count: enabled: false - mongodb.flushes_per_sec: + mongodb.flushes.rate: enabled: false - mongodb.getmores_per_sec: + mongodb.getmores.rate: enabled: false mongodb.global_lock.time: enabled: false @@ -140,7 +132,7 @@ none_set: enabled: false mongodb.index.size: enabled: false - mongodb.inserts_per_sec: + mongodb.inserts.rate: enabled: false mongodb.lock.acquire.count: enabled: false @@ -170,13 +162,13 @@ none_set: enabled: false mongodb.page_faults: enabled: false - mongodb.queries_per_sec: + mongodb.queries.rate: enabled: false mongodb.session.count: enabled: false mongodb.storage.size: enabled: false - mongodb.updates_per_sec: + mongodb.updates.rate: enabled: false mongodb.uptime: enabled: false diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 7949fec2ef3d5..8c90c5545fc9e 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -358,7 +358,7 @@ metrics: monotonic: true aggregation_temporality: cumulative attributes: [ ] - mongodb.queries_per_sec: + mongodb.queries.rate: description: The number of queries executed per second. unit: "{query}/s" enabled: false @@ -366,7 +366,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.inserts_per_sec: + mongodb.inserts.rate: description: The number of insertions executed per second. unit: "{insert}/s" enabled: false @@ -374,7 +374,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.commands_per_sec: + mongodb.commands.rate: description: The number of commands executed per second. unit: "{command}/s" enabled: false @@ -382,7 +382,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.getmores_per_sec: + mongodb.getmores.rate: description: The number of getmores executed per second. unit: "{getmore}/s" enabled: false @@ -390,7 +390,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.deletes_per_sec: + mongodb.deletes.rate: description: The number of deletes executed per second. unit: "{delete}/s" enabled: false @@ -398,7 +398,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.updates_per_sec: + mongodb.updates.rate: description: The number of updates executed per second. unit: "{update}/s" enabled: false @@ -406,7 +406,7 @@ metrics: value_type: double aggregation_temporality: delta monotonic: false - mongodb.flushes_per_sec: + mongodb.flushes.rate: description: The number of flushes executed per second. unit: "{flush}/s" enabled: false @@ -441,20 +441,6 @@ metrics: aggregation_temporality: cumulative monotonic: true attributes: [] - mongodb.cache.dirty.percent: - description: The percentage of WiredTiger cache that is dirty. - unit: "1" - enabled: false - gauge: - value_type: double - attributes: [] - mongodb.cache.used.percent: - description: The percentage of WiredTiger cache in use. - unit: "1" - enabled: false - gauge: - value_type: double - attributes: [] mongodb.page_faults: description: The number of page faults. unit: "{faults}" diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index dfdee23b11954..8e63163c45e4a 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -258,7 +258,7 @@ func (s *mongodbScraper) recordOperationsRepl(now pcommon.Timestamp, doc bson.M, func (s *mongodbScraper) recordFlushesPerSecond(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { metricPath := []string{"wiredTiger", "checkpoint", "total succeed number of checkpoints"} - metricName := "mongodb.flushes_per_sec" + metricName := "mongodb.flushes.rate" currentFlushes, err := collectMetric(doc, metricPath) if err != nil { errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) @@ -271,7 +271,7 @@ func (s *mongodbScraper) recordFlushesPerSecond(now pcommon.Timestamp, doc bson. if prevFlushCount := s.prevFlushCount; true { delta := currentFlushes - prevFlushCount flushesPerSec := float64(delta) / timeDelta - s.mb.RecordMongodbFlushesPerSecDataPoint(now, flushesPerSec) + s.mb.RecordMongodbFlushesRateDataPoint(now, flushesPerSec) } } } @@ -290,17 +290,17 @@ func (s *mongodbScraper) recordOperationPerSecond(now pcommon.Timestamp, operati switch operationVal { case "query": - s.mb.RecordMongodbQueriesPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbQueriesRateDataPoint(now, queriesPerSec) case "insert": - s.mb.RecordMongodbInsertsPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbInsertsRateDataPoint(now, queriesPerSec) case "command": - s.mb.RecordMongodbCommandsPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbCommandsRateDataPoint(now, queriesPerSec) case "getmore": - s.mb.RecordMongodbGetmoresPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbGetmoresRateDataPoint(now, queriesPerSec) case "delete": - s.mb.RecordMongodbDeletesPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbDeletesRateDataPoint(now, queriesPerSec) case "update": - s.mb.RecordMongodbUpdatesPerSecDataPoint(now, queriesPerSec) + s.mb.RecordMongodbUpdatesRateDataPoint(now, queriesPerSec) default: fmt.Printf("Unhandled operation: %s\n", operationVal) } @@ -342,35 +342,6 @@ func (s *mongodbScraper) recordWTCacheBytes(now pcommon.Timestamp, doc bson.M, e s.mb.RecordMongodbWtcacheBytesReadDataPoint(now, val) } -func (s *mongodbScraper) recordCachePercentages(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { - wt, ok := doc["wiredTiger"].(bson.M) - if !ok { - errs.AddPartial(2, errors.New("failed to find wiredTiger metrics")) - return - } - - cache, ok := wt["cache"].(bson.M) - if !ok { - errs.AddPartial(2, errors.New("failed to find cache metrics")) - return - } - - // Calculate dirty percentage - trackedDirtyBytes, err1 := collectMetric(cache, []string{"tracked dirty bytes in the cache"}) - maxBytes, err2 := collectMetric(cache, []string{"maximum bytes configured"}) - if err1 == nil && err2 == nil && maxBytes > 0 { - dirtyPercent := float64(trackedDirtyBytes) / float64(maxBytes) * 100 - s.mb.RecordMongodbCacheDirtyPercentDataPoint(now, dirtyPercent) - } - - // Calculate used percentage - bytesInUse, err3 := collectMetric(cache, []string{"bytes currently in the cache"}) - if err3 == nil && maxBytes > 0 { - usedPercent := float64(bytesInUse) / float64(maxBytes) * 100 - s.mb.RecordMongodbCacheUsedPercentDataPoint(now, usedPercent) - } -} - func (s *mongodbScraper) recordPageFaults(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { metricPath := []string{"extra_info", "page_faults"} metricName := "mongodb.page_faults" diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index d9fcc79ed4812..1486717748198 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -218,7 +218,6 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordActiveReads(now, document, errs) s.recordFlushesPerSecond(now, document, errs) s.recordWTCacheBytes(now, document, errs) - s.recordCachePercentages(now, document, errs) s.recordPageFaults(now, document, errs) } diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 9effd0448b0fa..bc5b019da5dac 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -96,10 +96,9 @@ var ( "failed to collect metric mongodb.uptime: could not find key for metric", "failed to collect metric mongodb.active.reads: could not find key for metric", "failed to collect metric mongodb.active.writes: could not find key for metric", - "failed to collect metric mongodb.flushes_per_sec: could not find key for metric", + "failed to collect metric mongodb.flushes.rate: could not find key for metric", "failed to collect metric mongodb.page_faults: could not find key for metric", "failed to collect metric mongodb.wtcache.bytes.read: could not find key for metric", - "failed to find wiredTiger metrics", }, "; ")) errAllClientFailedFetch = errors.New( strings.Join( From ab9fee4e333db6c32a38a354192b7bb128f696e2 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Tue, 25 Feb 2025 17:28:08 -0800 Subject: [PATCH 17/19] fixed lint --- .chloggen/chan-tim_mongodbMetrics.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.chloggen/chan-tim_mongodbMetrics.yaml b/.chloggen/chan-tim_mongodbMetrics.yaml index e9452318800b3..e749ed4761f52 100644 --- a/.chloggen/chan-tim_mongodbMetrics.yaml +++ b/.chloggen/chan-tim_mongodbMetrics.yaml @@ -7,7 +7,7 @@ change_type: enhancement component: mongodbreceiver # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Added new mongodb metrics to acheive parity with Telegraf +note: Added new mongodb metrics to achieve parity with Telegraf # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. issues: [37227] From 4b7896b92f13398c1a4c3087335f2c553a541bb9 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Tue, 25 Feb 2025 17:42:19 -0800 Subject: [PATCH 18/19] fixed lint --- .../mongodbreceiver/generated_package_test.go | 3 ++- .../internal/metadata/generated_config_test.go | 10 ++++------ .../internal/metadata/generated_metrics.go | 15 +++++++-------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 080891042403b..17e9f23be856d 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,8 +3,9 @@ package mongodbreceiver import ( - "go.uber.org/goleak" "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 213f4b0f22d6e..1ed31c0117fa7 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -131,9 +131,8 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } @@ -177,9 +176,8 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 48ac8f39ee49d..a82b51a8fd021 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( @@ -2423,7 +2423,6 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { mb.startTime = startTime }) } - func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, From 671cad6f7d34c72793ffadb05bfcdbea54a78bc0 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Thu, 6 Mar 2025 13:59:08 -0800 Subject: [PATCH 19/19] fixed make generate --- .../mongodbreceiver/generated_component_test.go | 12 +++++++----- .../mongodbreceiver/generated_package_test.go | 3 ++- .../internal/metadata/generated_config_test.go | 10 ++++------ .../internal/metadata/generated_metrics.go | 17 ++++++++--------- .../internal/metadata/generated_metrics_test.go | 2 +- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/receiver/mongodbreceiver/generated_component_test.go b/receiver/mongodbreceiver/generated_component_test.go index d842fa1a21932..36822dcbcc2c2 100644 --- a/receiver/mongodbreceiver/generated_component_test.go +++ b/receiver/mongodbreceiver/generated_component_test.go @@ -15,8 +15,10 @@ import ( "go.opentelemetry.io/collector/receiver/receivertest" ) +var typ = component.MustNewType("mongodb") + func TestComponentFactoryType(t *testing.T) { - require.Equal(t, "mongodb", NewFactory().Type().String()) + require.Equal(t, typ, NewFactory().Type()) } func TestComponentConfigStruct(t *testing.T) { @@ -27,8 +29,8 @@ func TestComponentLifecycle(t *testing.T) { factory := NewFactory() tests := []struct { - name string createFn func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) + name string }{ { @@ -48,19 +50,19 @@ func TestComponentLifecycle(t *testing.T) { for _, tt := range tests { t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(typ), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(typ), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(typ), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/receiver/mongodbreceiver/generated_package_test.go b/receiver/mongodbreceiver/generated_package_test.go index 080891042403b..17e9f23be856d 100644 --- a/receiver/mongodbreceiver/generated_package_test.go +++ b/receiver/mongodbreceiver/generated_package_test.go @@ -3,8 +3,9 @@ package mongodbreceiver import ( - "go.uber.org/goleak" "testing" + + "go.uber.org/goleak" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index fa29b03c03438..1ca27d9e775cf 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -143,9 +143,8 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } @@ -189,9 +188,8 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { - t.Errorf("Config mismatch (-expected +actual):\n%s", diff) - } + diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) + require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) }) } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 9ec218b7e9baf..6ed6d87275722 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( @@ -2723,7 +2723,6 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { mb.startTime = startTime }) } - func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, @@ -2864,7 +2863,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() - ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver") + ils.Scope().SetName(ScopeName) ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbActiveReads.emit(ils.Metrics()) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index b51ab2a51f878..1e382d2c883e2 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -57,7 +57,7 @@ func TestMetricsBuilder(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) - settings := receivertest.NewNopSettings() + settings := receivertest.NewNopSettings(receivertest.NopType) settings.Logger = zap.New(observedZapCore) mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start))