@@ -7,24 +7,29 @@ import (
7
7
"errors"
8
8
"math"
9
9
"sort"
10
+ "sync/atomic"
10
11
11
12
"github.com/prometheus/prometheus/prompb"
12
13
)
13
14
14
15
type batchTimeSeriesState struct {
15
16
// Track batch sizes sent to avoid over allocating huge buffers.
16
17
// This helps in the case where large batches are sent to avoid allocating too much unused memory
17
- nextTimeSeriesBufferSize int
18
- nextMetricMetadataBufferSize int
19
- nextRequestBufferSize int
18
+ nextTimeSeriesBufferSize atomic. Int64
19
+ nextMetricMetadataBufferSize atomic. Int64
20
+ nextRequestBufferSize atomic. Int64
20
21
}
21
22
22
- func newBatchTimeSericesState () batchTimeSeriesState {
23
- return batchTimeSeriesState {
24
- nextTimeSeriesBufferSize : math . MaxInt ,
25
- nextMetricMetadataBufferSize : math . MaxInt ,
26
- nextRequestBufferSize : 0 ,
23
+ func newBatchTimeSericesState () * batchTimeSeriesState {
24
+ state := & batchTimeSeriesState {
25
+ nextTimeSeriesBufferSize : atomic. Int64 {} ,
26
+ nextMetricMetadataBufferSize : atomic. Int64 {} ,
27
+ nextRequestBufferSize : atomic. Int64 {} ,
27
28
}
29
+ state .nextTimeSeriesBufferSize .Store (math .MaxInt64 )
30
+ state .nextMetricMetadataBufferSize .Store (math .MaxInt64 )
31
+ state .nextRequestBufferSize .Store (0 )
32
+ return state
28
33
}
29
34
30
35
// batchTimeSeries splits series into multiple batch write requests.
@@ -34,22 +39,22 @@ func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int,
34
39
}
35
40
36
41
// Allocate a buffer size of at least 10, or twice the last # of requests we sent
37
- requests := make ([]* prompb.WriteRequest , 0 , max (10 , state .nextRequestBufferSize ))
42
+ requests := make ([]* prompb.WriteRequest , 0 , max (10 , state .nextRequestBufferSize . Load () ))
38
43
39
44
// Allocate a time series buffer 2x the last time series batch size or the length of the input if smaller
40
- tsArray := make ([]prompb.TimeSeries , 0 , min (state .nextTimeSeriesBufferSize , len (tsMap )))
45
+ tsArray := make ([]prompb.TimeSeries , 0 , min (state .nextTimeSeriesBufferSize . Load (), int64 ( len (tsMap ) )))
41
46
sizeOfCurrentBatch := 0
42
47
43
48
i := 0
44
49
for _ , v := range tsMap {
45
50
sizeOfSeries := v .Size ()
46
51
47
52
if sizeOfCurrentBatch + sizeOfSeries >= maxBatchByteSize {
48
- state .nextTimeSeriesBufferSize = max (10 , 2 * len (tsArray ))
53
+ state .nextTimeSeriesBufferSize . Store ( int64 ( max (10 , 2 * len (tsArray )) ))
49
54
wrapped := convertTimeseriesToRequest (tsArray )
50
55
requests = append (requests , wrapped )
51
56
52
- tsArray = make ([]prompb.TimeSeries , 0 , min (state .nextTimeSeriesBufferSize , len (tsMap )- i ))
57
+ tsArray = make ([]prompb.TimeSeries , 0 , min (state .nextTimeSeriesBufferSize . Load (), int64 ( len (tsMap )- i ) ))
53
58
sizeOfCurrentBatch = 0
54
59
}
55
60
@@ -64,18 +69,18 @@ func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int,
64
69
}
65
70
66
71
// Allocate a metric metadata buffer 2x the last metric metadata batch size or the length of the input if smaller
67
- mArray := make ([]prompb.MetricMetadata , 0 , min (state .nextMetricMetadataBufferSize , len (m )))
72
+ mArray := make ([]prompb.MetricMetadata , 0 , min (state .nextMetricMetadataBufferSize . Load (), int64 ( len (m ) )))
68
73
sizeOfCurrentBatch = 0
69
74
i = 0
70
75
for _ , v := range m {
71
76
sizeOfM := v .Size ()
72
77
73
78
if sizeOfCurrentBatch + sizeOfM >= maxBatchByteSize {
74
- state .nextMetricMetadataBufferSize = max (10 , 2 * len (mArray ))
79
+ state .nextMetricMetadataBufferSize . Store ( int64 ( max (10 , 2 * len (mArray )) ))
75
80
wrapped := convertMetadataToRequest (mArray )
76
81
requests = append (requests , wrapped )
77
82
78
- mArray = make ([]prompb.MetricMetadata , 0 , min (state .nextMetricMetadataBufferSize , len (m )- i ))
83
+ mArray = make ([]prompb.MetricMetadata , 0 , min (state .nextMetricMetadataBufferSize . Load (), int64 ( len (m )- i ) ))
79
84
sizeOfCurrentBatch = 0
80
85
}
81
86
@@ -89,7 +94,7 @@ func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int,
89
94
requests = append (requests , wrapped )
90
95
}
91
96
92
- state .nextRequestBufferSize = 2 * len (requests )
97
+ state .nextRequestBufferSize . Store ( int64 ( 2 * len (requests )) )
93
98
return requests , nil
94
99
}
95
100
0 commit comments