@@ -4,13 +4,14 @@ import { nextTick } from "node:process";
4
4
5
5
import { create , protoInt64 , toJson } from "@bufbuild/protobuf" ;
6
6
import { type Duration , DurationSchema , type Timestamp , timestampFromDate , timestampMs } from "@bufbuild/protobuf/wkt" ;
7
+ import { abortable } from "@ydbjs/abortable" ;
7
8
import { StatusIds_StatusCode } from "@ydbjs/api/operation" ;
8
9
import { Codec , type OffsetsRange , OffsetsRangeSchema , type StreamReadMessage_CommitOffsetRequest_PartitionCommitOffset , StreamReadMessage_CommitOffsetRequest_PartitionCommitOffsetSchema , type StreamReadMessage_FromClient , StreamReadMessage_FromClientSchema , type StreamReadMessage_FromServer , StreamReadMessage_FromServerSchema , type StreamReadMessage_InitRequest_TopicReadSettings , StreamReadMessage_InitRequest_TopicReadSettingsSchema , type StreamReadMessage_ReadResponse , TopicServiceDefinition , TransactionIdentitySchema , UpdateOffsetsInTransactionRequestSchema } from "@ydbjs/api/topic" ;
9
10
import type { Driver } from "@ydbjs/core" ;
11
+ import { loggers } from "@ydbjs/debug" ;
10
12
import { YDBError } from "@ydbjs/error" ;
11
13
import { type RetryConfig , retry } from "@ydbjs/retry" ;
12
14
import { backoff , combine , jitter } from "@ydbjs/retry/strategy" ;
13
- import debug from "debug" ;
14
15
import type { StringValue } from "ms" ;
15
16
import ms from "ms" ;
16
17
@@ -20,7 +21,7 @@ import { TopicMessage } from "./message.js";
20
21
import { TopicPartitionSession } from "./partition-session.js" ;
21
22
import type { TX } from "./tx.js" ;
22
23
23
- const dbg = debug ( 'ydbjs' ) . extend ( ' topic' ) . extend ( 'reader' )
24
+ let dbg = loggers . topic . extend ( 'reader' )
24
25
25
26
type FromClientEmitterMap = {
26
27
"message" : [ StreamReadMessage_FromClient ]
@@ -197,23 +198,23 @@ export class TopicReader implements Disposable {
197
198
198
199
let dbgrpc = dbg . extend ( 'grpc' )
199
200
this . #fromClientEmitter. on ( 'message' , ( msg ) => {
200
- dbgrpc ( '%s %o' , msg . $typeName , toJson ( StreamReadMessage_FromClientSchema , msg ) )
201
+ dbgrpc . log ( '%s %o' , msg . $typeName , toJson ( StreamReadMessage_FromClientSchema , msg ) )
201
202
} )
202
203
203
204
// Log all messages from server.
204
205
this . #fromServerEmitter. on ( 'message' , ( msg ) => {
205
- dbgrpc ( '%s %o' , msg . $typeName , toJson ( StreamReadMessage_FromServerSchema , msg ) )
206
+ dbgrpc . log ( '%s %o' , msg . $typeName , toJson ( StreamReadMessage_FromServerSchema , msg ) )
206
207
} )
207
208
208
209
// Handle messages from server.
209
210
this . #fromServerEmitter. on ( 'message' , async ( message ) => {
210
211
if ( this . #disposed) {
211
- dbg ( 'error: receive "%s" after dispose' , message . serverMessage . value ?. $typeName )
212
+ dbg . log ( 'error: receive "%s" after dispose' , message . serverMessage . value ?. $typeName )
212
213
return
213
214
}
214
215
215
216
if ( message . serverMessage . case === 'initResponse' ) {
216
- dbg ( ` read session identifier: %s` , message . serverMessage . value . sessionId ) ;
217
+ dbg . log ( ' read session identifier: %s' , message . serverMessage . value . sessionId )
217
218
218
219
this . #readMore( this . #freeBufferSize)
219
220
}
@@ -222,7 +223,7 @@ export class TopicReader implements Disposable {
222
223
assert . ok ( message . serverMessage . value . partitionSession , 'startPartitionSessionRequest must have partitionSession' ) ;
223
224
assert . ok ( message . serverMessage . value . partitionOffsets , 'startPartitionSessionRequest must have partitionOffsets' ) ;
224
225
225
- dbg ( 'receive partition with id %s' , message . serverMessage . value . partitionSession . partitionId ) ;
226
+ dbg . log ( 'receive partition with id %s' , message . serverMessage . value . partitionSession . partitionId ) ;
226
227
227
228
// Create a new partition session.
228
229
let partitionSession : TopicPartitionSession = new TopicPartitionSession (
@@ -244,7 +245,7 @@ export class TopicReader implements Disposable {
244
245
let partitionOffsets = message . serverMessage . value . partitionOffsets ;
245
246
246
247
let response = await this . #options. onPartitionSessionStart ( partitionSession , committedOffset , partitionOffsets ) . catch ( ( error ) => {
247
- dbg ( 'error: onPartitionSessionStart error: %O' , error ) ;
248
+ dbg . log ( 'error: onPartitionSessionStart error: %O' , error ) ;
248
249
this . #fromClientEmitter. emit ( 'error' , error ) ;
249
250
250
251
return undefined ;
@@ -273,22 +274,22 @@ export class TopicReader implements Disposable {
273
274
274
275
let partitionSession = this . #partitionSessions. get ( message . serverMessage . value . partitionSessionId ) ;
275
276
if ( ! partitionSession ) {
276
- dbg ( 'error: stopPartitionSessionRequest for unknown partitionSessionId=%s' , message . serverMessage . value . partitionSessionId ) ;
277
+ dbg . log ( 'error: stopPartitionSessionRequest for unknown partitionSessionId=%s' , message . serverMessage . value . partitionSessionId ) ;
277
278
return ;
278
279
}
279
280
280
281
if ( this . #options. onPartitionSessionStop ) {
281
282
let committedOffset = message . serverMessage . value . committedOffset || 0n ;
282
283
283
284
await this . #options. onPartitionSessionStop ( partitionSession , committedOffset ) . catch ( ( err ) => {
284
- dbg ( 'error: onPartitionSessionStop error: %O' , err ) ;
285
+ dbg . log ( 'error: onPartitionSessionStop error: %O' , err ) ;
285
286
this . #fromClientEmitter. emit ( 'error' , err ) ;
286
287
} ) ;
287
288
}
288
289
289
290
// If graceful stop is not requested, we can stop the partition session immediately.
290
291
if ( ! message . serverMessage . value . graceful ) {
291
- dbg ( 'stop partition session %s without graceful stop' , partitionSession . partitionSessionId ) ;
292
+ dbg . log ( 'stop partition session %s without graceful stop' , partitionSession . partitionSessionId ) ;
292
293
partitionSession . stop ( ) ;
293
294
294
295
// Remove all messages from the buffer that belong to this partition session.
@@ -354,7 +355,7 @@ export class TopicReader implements Disposable {
354
355
355
356
let partitionSession = this . #partitionSessions. get ( message . serverMessage . value . partitionSessionId ) ;
356
357
if ( ! partitionSession ) {
357
- dbg ( 'error: endPartitionSession for unknown partitionSessionId=%s' , message . serverMessage . value . partitionSessionId ) ;
358
+ dbg . log ( 'error: endPartitionSession for unknown partitionSessionId=%s' , message . serverMessage . value . partitionSessionId ) ;
358
359
return ;
359
360
}
360
361
@@ -368,7 +369,7 @@ export class TopicReader implements Disposable {
368
369
for ( let part of message . serverMessage . value . partitionsCommittedOffsets ) {
369
370
let partitionSession = this . #partitionSessions. get ( part . partitionSessionId ) ;
370
371
if ( ! partitionSession ) {
371
- dbg ( 'error: commitOffsetResponse for unknown partitionSessionId=%s' , part . partitionSessionId ) ;
372
+ dbg . log ( 'error: commitOffsetResponse for unknown partitionSessionId=%s' , part . partitionSessionId ) ;
372
373
continue ;
373
374
}
374
375
@@ -427,32 +428,32 @@ export class TopicReader implements Disposable {
427
428
budget : Infinity ,
428
429
strategy : combine ( jitter ( 50 ) , backoff ( 50 , 5000 ) ) ,
429
430
retry ( error ) {
430
- dbg ( 'retrying stream read due to %O' , error ) ;
431
+ dbg . log ( 'retrying stream read due to %O' , error ) ;
431
432
return true ;
432
433
} ,
433
434
}
434
435
435
436
try {
436
437
// TODO: handle user errors (for example tx errors). Ex: use abort signal
437
- await retry ( retryConfig , async ( ) => {
438
+ await retry ( retryConfig , async ( signal ) => {
438
439
using outgoing = new AsyncEventEmitter < StreamReadMessage_FromClient > ( this . #fromClientEmitter, 'message' )
439
440
440
- dbg ( 'connecting to the stream with consumer %s' , this . #options. consumer ) ;
441
+ dbg . log ( 'connecting to the stream with consumer %s' , this . #options. consumer ) ;
441
442
442
443
let stream = this . #driver
443
444
. createClient ( TopicServiceDefinition )
444
445
. streamRead ( outgoing , { signal } ) ;
445
446
446
447
// If we have buffered messages, we need to clear them before connecting to the stream.
447
448
if ( this . #buffer. length ) {
448
- dbg ( 'has %d messages in the buffer before connecting to the stream, clearing it' , this . #buffer. length ) ;
449
+ dbg . log ( 'has %d messages in the buffer before connecting to the stream, clearing it' , this . #buffer. length ) ;
449
450
this . #buffer. length = 0 ; // Clear the buffer before connecting to the stream
450
451
this . #freeBufferSize = this . #maxBufferSize; // Reset free buffer size
451
452
}
452
453
453
454
// Stop all partition sessions before connecting to the stream
454
455
if ( this . #partitionSessions. size ) {
455
- dbg ( 'has %d partition sessions before connecting to the stream, stopping them' , this . #partitionSessions. size ) ;
456
+ dbg . log ( 'has %d partition sessions before connecting to the stream, stopping them' , this . #partitionSessions. size ) ;
456
457
457
458
for ( let partitionSession of this . #partitionSessions. values ( ) ) {
458
459
partitionSession . stop ( ) ;
@@ -463,7 +464,7 @@ export class TopicReader implements Disposable {
463
464
464
465
// If we have pending commits, we need to reject and drop them before connecting to the stream.
465
466
if ( this . #pendingCommits. size ) {
466
- dbg ( 'has pending commits, before connecting to the stream, rejecting them' ) ;
467
+ dbg . log ( 'has pending commits, before connecting to the stream, rejecting them' ) ;
467
468
468
469
for ( let [ partitionSessionId , pendingCommits ] of this . #pendingCommits) {
469
470
for ( let commit of pendingCommits ) {
@@ -492,16 +493,19 @@ export class TopicReader implements Disposable {
492
493
493
494
if ( event . status !== StatusIds_StatusCode . SUCCESS ) {
494
495
let error = new YDBError ( event . status , event . issues )
495
- dbg ( 'received error from server: %s' , error . message ) ;
496
+ dbg . log ( 'received error from server: %s' , error . message ) ;
496
497
throw error ;
497
498
}
498
499
499
500
this . #fromServerEmitter. emit ( 'message' , event ) ;
500
501
}
501
502
} ) ;
502
503
} catch ( error ) {
503
- dbg ( 'error: %O' , error ) ;
504
+ if ( error instanceof Error && error . name === 'AbortError' ) {
505
+ return
506
+ }
504
507
508
+ dbg . log ( 'error: %O' , error ) ;
505
509
this . #fromServerEmitter. emit ( 'error' , error ) ;
506
510
} finally {
507
511
this . #fromServerEmitter. emit ( 'end' ) ;
@@ -583,11 +587,11 @@ export class TopicReader implements Disposable {
583
587
*/
584
588
#readMore( bytes : bigint ) : void {
585
589
if ( this . #disposed) {
586
- dbg ( 'error: readMore called after dispose' ) ;
590
+ dbg . log ( 'error: readMore called after dispose' ) ;
587
591
return ;
588
592
}
589
593
590
- dbg ( 'request read next %d bytes' , bytes ) ;
594
+ dbg . log ( 'request read next %d bytes' , bytes ) ;
591
595
this . #fromClientEmitter. emit ( "message" , create ( StreamReadMessage_FromClientSchema , {
592
596
clientMessage : {
593
597
case : 'readRequest' ,
@@ -643,17 +647,22 @@ export class TopicReader implements Disposable {
643
647
throw new Error ( 'Read aborted' , { cause : signal . reason } ) ;
644
648
}
645
649
650
+ let ready = false ;
646
651
let active = true ;
647
652
let messageHandler = ( message : StreamReadMessage_FromServer ) => {
648
653
if ( signal . aborted ) {
649
654
return ;
650
655
}
651
656
657
+ if ( message . serverMessage . case === 'initResponse' && message . status === StatusIds_StatusCode . SUCCESS ) {
658
+ ready = true ;
659
+ }
660
+
652
661
if ( message . serverMessage . case != 'readResponse' ) {
653
662
return ;
654
663
}
655
664
656
- dbg ( 'reader received %d bytes' , message . serverMessage . value . bytesSize ) ;
665
+ dbg . log ( 'reader received %d bytes' , message . serverMessage . value . bytesSize ) ;
657
666
658
667
this . #buffer. push ( message . serverMessage . value ) ;
659
668
this . #freeBufferSize -= message . serverMessage . value . bytesSize ;
@@ -724,6 +733,7 @@ export class TopicReader implements Disposable {
724
733
let waiter = Promise . withResolvers ( )
725
734
this . #fromServerEmitter. once ( 'message' , waiter . resolve )
726
735
736
+ // TODO: process cases then waitMs aborted earlier when read session is ready
727
737
await Promise . race ( [
728
738
waiter . promise ,
729
739
once ( signal , 'abort' ) ,
@@ -787,12 +797,12 @@ export class TopicReader implements Disposable {
787
797
788
798
let partitionSession = this . #partitionSessions. get ( pd . partitionSessionId ) ;
789
799
if ( ! partitionSession ) {
790
- dbg ( 'error: readResponse for unknown partitionSessionId=%s' , pd . partitionSessionId ) ;
800
+ dbg . log ( 'error: readResponse for unknown partitionSessionId=%s' , pd . partitionSessionId ) ;
791
801
continue ;
792
802
}
793
803
794
804
if ( partitionSession . isStopped ) {
795
- dbg ( 'error: readResponse for stopped partitionSessionId=%s' , pd . partitionSessionId ) ;
805
+ dbg . log ( 'error: readResponse for stopped partitionSessionId=%s' , pd . partitionSessionId ) ;
796
806
continue ;
797
807
}
798
808
@@ -809,16 +819,16 @@ export class TopicReader implements Disposable {
809
819
let payload = msg . data ;
810
820
if ( batch . codec !== Codec . UNSPECIFIED ) {
811
821
if ( ! this . #codecs. has ( batch . codec ) ) {
812
- dbg ( 'error: codec %s is not supported' , batch . codec ) ;
822
+ dbg . log ( 'error: codec %s is not supported' , batch . codec ) ;
813
823
throw new Error ( `Codec ${ batch . codec } is not supported` ) ;
814
824
}
815
825
816
826
// Decompress the message data using the provided decompress function
817
827
try {
818
828
// eslint-disable-next-line no-await-in-loop
819
- payload = await this . #codecs. get ( batch . codec ) ! . decompress ( msg . data ) ;
829
+ payload = this . #codecs. get ( batch . codec ) ! . decompress ( msg . data ) ;
820
830
} catch ( error ) {
821
- dbg ( 'error: decompression failed for message with codec %s: %O' , batch . codec , error ) ;
831
+ dbg . log ( 'error: decompression failed for message with codec %s: %O' , batch . codec , error ) ;
822
832
823
833
throw new Error ( `Decompression failed for message with codec ${ batch . codec } ` , { cause : error } ) ;
824
834
}
@@ -864,10 +874,10 @@ export class TopicReader implements Disposable {
864
874
}
865
875
}
866
876
867
- dbg ( 'return %d messages, buffer size is %d bytes, free buffer size is %d bytes' , messages . length , this . #maxBufferSize - this . #freeBufferSize, this . #freeBufferSize) ;
877
+ dbg . log ( 'return %d messages, buffer size is %d bytes, free buffer size is %d bytes' , messages . length , this . #maxBufferSize - this . #freeBufferSize, this . #freeBufferSize) ;
868
878
869
879
if ( releasableBufferBytes > 0n ) {
870
- dbg ( 'releasing %d bytes from buffer' , releasableBufferBytes ) ;
880
+ dbg . log ( 'releasing %d bytes from buffer' , releasableBufferBytes ) ;
871
881
this . #freeBufferSize += releasableBufferBytes ;
872
882
this . #readMore( releasableBufferBytes ) ;
873
883
}
@@ -1119,7 +1129,7 @@ export class TopicReader implements Disposable {
1119
1129
1120
1130
// Convert our optimized Map structure into the API's expected format
1121
1131
for ( let [ partitionSessionId , partOffsets ] of offsets . entries ( ) ) {
1122
- dbg ( 'committing offsets for partition session %s: %o' , partitionSessionId , partOffsets ) ;
1132
+ dbg . log ( 'committing offsets for partition session %s: %o' , partitionSessionId , partOffsets ) ;
1123
1133
1124
1134
commitOffsets . push ( create ( StreamReadMessage_CommitOffsetRequest_PartitionCommitOffsetSchema , {
1125
1135
partitionSessionId,
@@ -1168,7 +1178,7 @@ export class TopicReader implements Disposable {
1168
1178
return ; // Already disposed, nothing to do
1169
1179
}
1170
1180
this . #disposed = true ;
1171
- dbg ( 'disposing TopicReader for consumer %s' , this . #options. consumer ) ;
1181
+ dbg . log ( 'disposing TopicReader for consumer %s' , this . #options. consumer ) ;
1172
1182
1173
1183
this . #buffer. length = 0 // Clear the buffer to release memory
1174
1184
this . #freeBufferSize = this . #maxBufferSize; // Reset free buffer size to max buffer size
0 commit comments