@@ -28,7 +28,6 @@ const (
28
28
readIndexKey = "ri"
29
29
writeIndexKey = "wi"
30
30
currentlyDispatchedItemsKey = "di"
31
- queueSizeKey = "si"
32
31
33
32
// queueMetadataKey is the new single key for all queue metadata.
34
33
// TODO: Enable when https://github.com/open-telemetry/opentelemetry-collector/issues/12890 is done
@@ -88,9 +87,6 @@ type persistentQueue[T any] struct {
88
87
logger * zap.Logger
89
88
client storage.Client
90
89
91
- // isRequestSized indicates whether the queue is sized by the number of requests.
92
- isRequestSized bool
93
-
94
90
// mu guards everything declared below.
95
91
mu sync.Mutex
96
92
hasMoreElements * sync.Cond
@@ -102,11 +98,9 @@ type persistentQueue[T any] struct {
102
98
103
99
// newPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage
104
100
func newPersistentQueue [T any ](set persistentQueueSettings [T ]) readableQueue [T ] {
105
- _ , isRequestSized := set .sizer .(request.RequestsSizer [T ])
106
101
pq := & persistentQueue [T ]{
107
- set : set ,
108
- logger : set .telemetry .Logger ,
109
- isRequestSized : isRequestSized ,
102
+ set : set ,
103
+ logger : set .telemetry .Logger ,
110
104
}
111
105
pq .hasMoreElements = sync .NewCond (& pq .mu )
112
106
pq .hasMoreSpace = newCond (& pq .mu )
@@ -165,33 +159,8 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
165
159
pq .metadata .WriteIndex = 0
166
160
}
167
161
168
- queueSize := pq .metadata .WriteIndex - pq .metadata .ReadIndex
169
-
170
- // If the queue is sized by the number of requests, no need to read the queue size from storage.
171
- if queueSize > 0 && ! pq .isRequestSized {
172
- if restoredQueueSize , err := pq .restoreQueueSizeFromStorage (ctx ); err == nil {
173
- queueSize = restoredQueueSize
174
- }
175
- }
176
162
//nolint:gosec
177
- pq .metadata .QueueSize = int64 (queueSize )
178
- }
179
-
180
- // restoreQueueSizeFromStorage restores the queue size from storage.
181
- func (pq * persistentQueue [T ]) restoreQueueSizeFromStorage (ctx context.Context ) (uint64 , error ) {
182
- val , err := pq .client .Get (ctx , queueSizeKey )
183
- if err != nil {
184
- if errors .Is (err , errValueNotSet ) {
185
- pq .logger .Warn ("Cannot read the queue size snapshot from storage. " +
186
- "The reported queue size will be inaccurate until the initial queue is drained. " +
187
- "It's expected when the items sized queue enabled for the first time" , zap .Error (err ))
188
- } else {
189
- pq .logger .Error ("Failed to read the queue size snapshot from storage. " +
190
- "The reported queue size will be inaccurate until the initial queue is drained." , zap .Error (err ))
191
- }
192
- return 0 , err
193
- }
194
- return bytesToItemIndex (val )
163
+ pq .metadata .QueueSize = int64 (pq .metadata .WriteIndex - pq .metadata .ReadIndex )
195
164
}
196
165
197
166
func (pq * persistentQueue [T ]) Shutdown (ctx context.Context ) error {
@@ -202,24 +171,10 @@ func (pq *persistentQueue[T]) Shutdown(ctx context.Context) error {
202
171
203
172
pq .mu .Lock ()
204
173
defer pq .mu .Unlock ()
205
- backupErr := pq .backupQueueSize (ctx )
206
174
// Mark this queue as stopped, so consumer don't start any more work.
207
175
pq .stopped = true
208
176
pq .hasMoreElements .Broadcast ()
209
- return errors .Join (backupErr , pq .unrefClient (ctx ))
210
- }
211
-
212
- // backupQueueSize writes the current queue size to storage. The value is used to recover the queue size
213
- // in case if the collector is killed.
214
- func (pq * persistentQueue [T ]) backupQueueSize (ctx context.Context ) error {
215
- // No need to write the queue size if the queue is sized by the number of requests.
216
- // That information is already stored as difference between read and write indexes.
217
- if pq .isRequestSized {
218
- return nil
219
- }
220
-
221
- //nolint:gosec
222
- return pq .client .Set (ctx , queueSizeKey , itemIndexToBytes (uint64 (pq .metadata .QueueSize )))
177
+ return pq .unrefClient (ctx )
223
178
}
224
179
225
180
// unrefClient unrefs the client, and closes if no more references. Callers MUST hold the mutex.
@@ -271,14 +226,6 @@ func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error {
271
226
pq .metadata .QueueSize += reqSize
272
227
pq .hasMoreElements .Signal ()
273
228
274
- // Back up the queue size to storage every 10 writes. The stored value is used to recover the queue size
275
- // in case if the collector is killed. The recovered queue size is allowed to be inaccurate.
276
- if (pq .metadata .WriteIndex % 10 ) == 5 {
277
- if err := pq .backupQueueSize (ctx ); err != nil {
278
- pq .logger .Error ("Error writing queue size to storage" , zap .Error (err ))
279
- }
280
- }
281
-
282
229
return nil
283
230
}
284
231
@@ -298,13 +245,14 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, Don
298
245
// Ensure the used size and the channel size are in sync.
299
246
if pq .metadata .ReadIndex == pq .metadata .WriteIndex {
300
247
pq .metadata .QueueSize = 0
301
- pq .hasMoreSpace .Signal ()
302
248
}
303
249
if consumed {
304
250
id := indexDonePool .Get ().(* indexDone )
305
251
id .reset (index , pq .set .sizer .Sizeof (req ), pq )
306
252
return reqCtx , req , id , true
307
253
}
254
+ // More space available, data was dropped.
255
+ pq .hasMoreSpace .Signal ()
308
256
}
309
257
310
258
// TODO: Need to change the Queue interface to return an error to allow distinguish between shutdown and context canceled.
@@ -369,7 +317,6 @@ func (pq *persistentQueue[T]) onDone(index uint64, elSize int64, consumeErr erro
369
317
if pq .metadata .QueueSize < 0 {
370
318
pq .metadata .QueueSize = 0
371
319
}
372
- pq .hasMoreSpace .Signal ()
373
320
374
321
if experr .IsShutdownErr (consumeErr ) {
375
322
// The queue is shutting down, don't mark the item as dispatched, so it's picked up again after restart.
@@ -381,13 +328,8 @@ func (pq *persistentQueue[T]) onDone(index uint64, elSize int64, consumeErr erro
381
328
pq .logger .Error ("Error deleting item from queue" , zap .Error (err ))
382
329
}
383
330
384
- // Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size
385
- // in case if the collector is killed. The recovered queue size is allowed to be inaccurate.
386
- if (pq .metadata .ReadIndex % 10 ) == 0 {
387
- if qsErr := pq .backupQueueSize (context .Background ()); qsErr != nil {
388
- pq .logger .Error ("Error writing queue size to storage" , zap .Error (qsErr ))
389
- }
390
- }
331
+ // More space available after data are removed from the storage.
332
+ pq .hasMoreSpace .Signal ()
391
333
}
392
334
393
335
// retrieveAndEnqueueNotDispatchedReqs gets the items for which sending was not finished, cleans the storage
0 commit comments