Skip to content

[chore] Change QueueBatch implementation to use the final config #12705

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .chloggen/fix-start-order.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver)
component: exporterhelper

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix order of starting between queue and batch.

# One or more tracking issues or pull requests related to the change
issues: [12705]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
22 changes: 21 additions & 1 deletion exporter/exporterhelper/internal/queue_sender.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,5 +40,25 @@ func NewQueueSender(
return nil
}

return queuebatch.NewQueueBatch(qSet, qCfg, bCfg, exportFunc)
return queuebatch.NewQueueBatch(qSet, newQueueBatchConfig(qCfg, bCfg), exportFunc)
}

func newQueueBatchConfig(qCfg exporterqueue.Config, bCfg exporterbatcher.Config) queuebatch.Config {
qbCfg := queuebatch.Config{
Enabled: true,
WaitForResult: !qCfg.Enabled,
Sizer: exporterbatcher.SizerTypeRequests,
QueueSize: qCfg.QueueSize,
NumConsumers: qCfg.NumConsumers,
BlockOnOverflow: qCfg.Blocking,
StorageID: qCfg.StorageID,
}
if bCfg.Enabled {
qbCfg.Batch = &queuebatch.BatchConfig{
FlushTimeout: bCfg.FlushTimeout,
MinSize: bCfg.MinSize,
MaxSize: bCfg.MaxSize,
}
}
return qbCfg
}
2 changes: 1 addition & 1 deletion exporter/exporterhelper/internal/queuebatch/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ type Config struct {

// BatchConfig it configures how the requests are consumed from the queue and batch together during consumption.
// TODO: This will be changed to Optional when available.
BatchConfig *BatchConfig `mapstructure:"batch"`
Batch *BatchConfig `mapstructure:"batch"`
}

// Validate checks if the Config is valid
Expand Down
15 changes: 6 additions & 9 deletions exporter/exporterhelper/internal/queuebatch/default_batcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ type batch struct {

// defaultBatcher continuously batch incoming requests and flushes asynchronously if minimum size limit is met or on timeout.
type defaultBatcher struct {
batchCfg exporterbatcher.Config
batchCfg BatchConfig
workerPool chan struct{}
consumeFunc sender.SendFunc[request.Request]
stopWG sync.WaitGroup
Expand All @@ -34,10 +34,7 @@ type defaultBatcher struct {
shutdownCh chan struct{}
}

func newDefaultBatcher(batchCfg exporterbatcher.Config,
consumeFunc sender.SendFunc[request.Request],
maxWorkers int,
) *defaultBatcher {
func newDefaultBatcher(batchCfg BatchConfig, consumeFunc sender.SendFunc[request.Request], maxWorkers int) *defaultBatcher {
// TODO: Determine what is the right behavior for this in combination with async queue.
var workerPool chan struct{}
if maxWorkers != 0 {
Expand All @@ -56,7 +53,7 @@ func newDefaultBatcher(batchCfg exporterbatcher.Config,
}

func (qb *defaultBatcher) resetTimer() {
if qb.batchCfg.FlushTimeout != 0 {
if qb.batchCfg.FlushTimeout > 0 {
qb.timer.Reset(qb.batchCfg.FlushTimeout)
}
}
Expand All @@ -65,7 +62,7 @@ func (qb *defaultBatcher) Consume(ctx context.Context, req request.Request, done
qb.currentBatchMu.Lock()

if qb.currentBatch == nil {
reqList, mergeSplitErr := req.MergeSplit(ctx, qb.batchCfg.SizeConfig, nil)
reqList, mergeSplitErr := req.MergeSplit(ctx, qb.batchCfg.MaxSize, exporterbatcher.SizerTypeItems, nil)
if mergeSplitErr != nil || len(reqList) == 0 {
done.OnDone(mergeSplitErr)
qb.currentBatchMu.Unlock()
Expand Down Expand Up @@ -99,7 +96,7 @@ func (qb *defaultBatcher) Consume(ctx context.Context, req request.Request, done
return
}

reqList, mergeSplitErr := qb.currentBatch.req.MergeSplit(ctx, qb.batchCfg.SizeConfig, req)
reqList, mergeSplitErr := qb.currentBatch.req.MergeSplit(ctx, qb.batchCfg.MaxSize, exporterbatcher.SizerTypeItems, req)
// If failed to merge signal all Done callbacks from current batch as well as the current request and reset the current batch.
if mergeSplitErr != nil || len(reqList) == 0 {
done.OnDone(mergeSplitErr)
Expand Down Expand Up @@ -174,7 +171,7 @@ func (qb *defaultBatcher) startTimeBasedFlushingGoroutine() {

// Start starts the goroutine that reads from the queue and flushes asynchronously.
func (qb *defaultBatcher) Start(_ context.Context, _ component.Host) error {
if qb.batchCfg.FlushTimeout != 0 {
if qb.batchCfg.FlushTimeout > 0 {
qb.timer = time.NewTimer(qb.batchCfg.FlushTimeout)
qb.startTimeBasedFlushingGoroutine()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
"github.com/stretchr/testify/require"

"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/exporter/exporterbatcher"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/requesttest"
)

Expand All @@ -35,12 +34,9 @@ func TestDefaultBatcher_NoSplit_MinThresholdZero_TimeoutDisabled(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := exporterbatcher.NewDefaultConfig()
cfg.Enabled = true
cfg.FlushTimeout = 0
cfg.SizeConfig = exporterbatcher.SizeConfig{
Sizer: exporterbatcher.SizerTypeItems,
MinSize: 0,
cfg := BatchConfig{
FlushTimeout: 0,
MinSize: 0,
}

sink := requesttest.NewSink()
Expand Down Expand Up @@ -85,12 +81,9 @@ func TestDefaultBatcher_NoSplit_TimeoutDisabled(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := exporterbatcher.NewDefaultConfig()
cfg.Enabled = true
cfg.FlushTimeout = 0
cfg.SizeConfig = exporterbatcher.SizeConfig{
Sizer: exporterbatcher.SizerTypeItems,
MinSize: 10,
cfg := BatchConfig{
FlushTimeout: 0,
MinSize: 10,
}

sink := requesttest.NewSink()
Expand Down Expand Up @@ -150,12 +143,9 @@ func TestDefaultBatcher_NoSplit_WithTimeout(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := exporterbatcher.NewDefaultConfig()
cfg.Enabled = true
cfg.FlushTimeout = 50 * time.Millisecond
cfg.SizeConfig = exporterbatcher.SizeConfig{
Sizer: exporterbatcher.SizerTypeItems,
MinSize: 100,
cfg := BatchConfig{
FlushTimeout: 50 * time.Millisecond,
MinSize: 100,
}

sink := requesttest.NewSink()
Expand Down Expand Up @@ -205,13 +195,10 @@ func TestDefaultBatcher_Split_TimeoutDisabled(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := exporterbatcher.NewDefaultConfig()
cfg.Enabled = true
cfg.FlushTimeout = 0
cfg.SizeConfig = exporterbatcher.SizeConfig{
Sizer: exporterbatcher.SizerTypeItems,
MinSize: 100,
MaxSize: 100,
cfg := BatchConfig{
FlushTimeout: 0,
MinSize: 100,
MaxSize: 100,
}

sink := requesttest.NewSink()
Expand Down Expand Up @@ -256,12 +243,13 @@ func TestDefaultBatcher_Split_TimeoutDisabled(t *testing.T) {
}

func TestDefaultBatcher_Shutdown(t *testing.T) {
batchCfg := exporterbatcher.NewDefaultConfig()
batchCfg.MinSize = 10
batchCfg.FlushTimeout = 100 * time.Second
cfg := BatchConfig{
FlushTimeout: 100 * time.Second,
MinSize: 10,
}

sink := requesttest.NewSink()
ba := newDefaultBatcher(batchCfg, sink.Export, 2)
ba := newDefaultBatcher(cfg, sink.Export, 2)
require.NoError(t, ba.Start(context.Background(), componenttest.NewNopHost()))

done := newFakeDone()
Expand All @@ -282,12 +270,14 @@ func TestDefaultBatcher_Shutdown(t *testing.T) {
}

func TestDefaultBatcher_MergeError(t *testing.T) {
batchCfg := exporterbatcher.NewDefaultConfig()
batchCfg.MinSize = 5
batchCfg.MaxSize = 7
cfg := BatchConfig{
FlushTimeout: 200 * time.Second,
MinSize: 5,
MaxSize: 7,
}

sink := requesttest.NewSink()
ba := newDefaultBatcher(batchCfg, sink.Export, 2)
ba := newDefaultBatcher(cfg, sink.Export, 2)

require.NoError(t, ba.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (

"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr"
"go.opentelemetry.io/collector/exporter/exporterqueue"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pipeline"
)
Expand Down Expand Up @@ -50,7 +49,7 @@ type persistentQueueSettings[T any] struct {
blocking bool
signal pipeline.Signal
storageID component.ID
encoding exporterqueue.Encoding[T]
encoding Encoding[T]
id component.ID
telemetry component.TelemetrySettings
}
Expand Down
8 changes: 8 additions & 0 deletions exporter/exporterhelper/internal/queuebatch/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,14 @@ import (
"go.opentelemetry.io/collector/component"
)

type Encoding[T any] interface {
// Marshal is a function that can marshal a request into bytes.
Marshal(T) ([]byte, error)

// Unmarshal is a function that can unmarshal bytes into a request.
Unmarshal([]byte) (T, error)
}

// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full and setup to
// not block.
// Experimental: This API is at the early stage of development and may change without backward compatibility
Expand Down
44 changes: 21 additions & 23 deletions exporter/exporterhelper/internal/queuebatch/queue_batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"go.opentelemetry.io/collector/exporter/exporterbatcher"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/request"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/sender"
"go.opentelemetry.io/collector/exporter/exporterqueue"
"go.opentelemetry.io/collector/pipeline"
)

Expand All @@ -20,7 +19,7 @@ type Settings[K any] struct {
Signal pipeline.Signal
ID component.ID
Telemetry component.TelemetrySettings
Encoding exporterqueue.Encoding[K]
Encoding Encoding[K]
Sizers map[exporterbatcher.SizerType]Sizer[K]
}

Expand All @@ -31,20 +30,17 @@ type QueueBatch struct {

func NewQueueBatch(
qSet Settings[request.Request],
qCfg exporterqueue.Config,
bCfg exporterbatcher.Config,
cfg Config,
next sender.SendFunc[request.Request],
) (*QueueBatch, error) {
var b Batcher[request.Request]
switch bCfg.Enabled {
case false:
switch {
case cfg.Batch == nil:
b = newDisabledBatcher[request.Request](next)
default:
b = newDefaultBatcher(bCfg, next, qCfg.NumConsumers)
}
// TODO: https://github.com/open-telemetry/opentelemetry-collector/issues/12244
if bCfg.Enabled {
qCfg.NumConsumers = 1
// TODO: https://github.com/open-telemetry/opentelemetry-collector/issues/12244
cfg.NumConsumers = 1
b = newDefaultBatcher(*cfg.Batch, next, cfg.NumConsumers)
}

sizer, ok := qSet.Sizers[exporterbatcher.SizerTypeRequests]
Expand All @@ -54,25 +50,25 @@ func NewQueueBatch(

var q Queue[request.Request]
switch {
case !qCfg.Enabled:
case cfg.WaitForResult:
q = newDisabledQueue(b.Consume)
case qCfg.StorageID != nil:
case cfg.StorageID != nil:
q = newAsyncQueue(newPersistentQueue[request.Request](persistentQueueSettings[request.Request]{
sizer: sizer,
capacity: int64(qCfg.QueueSize),
blocking: qCfg.Blocking,
capacity: int64(cfg.QueueSize),
blocking: cfg.BlockOnOverflow,
signal: qSet.Signal,
storageID: *qCfg.StorageID,
storageID: *cfg.StorageID,
encoding: qSet.Encoding,
id: qSet.ID,
telemetry: qSet.Telemetry,
}), qCfg.NumConsumers, b.Consume)
}), cfg.NumConsumers, b.Consume)
default:
q = newAsyncQueue(newMemoryQueue[request.Request](memoryQueueSettings[request.Request]{
sizer: sizer,
capacity: int64(qCfg.QueueSize),
blocking: qCfg.Blocking,
}), qCfg.NumConsumers, b.Consume)
capacity: int64(cfg.QueueSize),
blocking: cfg.BlockOnOverflow,
}), cfg.NumConsumers, b.Consume)
}

oq, err := newObsQueue(qSet, q)
Expand All @@ -85,11 +81,13 @@ func NewQueueBatch(

// Start is invoked during service startup.
func (qs *QueueBatch) Start(ctx context.Context, host component.Host) error {
if err := qs.queue.Start(ctx, host); err != nil {
if err := qs.batcher.Start(ctx, host); err != nil {
return err
}

return qs.batcher.Start(ctx, host)
if err := qs.queue.Start(ctx, host); err != nil {
return errors.Join(err, qs.batcher.Shutdown(ctx))
}
return nil
}

// Shutdown is invoked during service shutdown.
Expand Down
Loading
Loading