@@ -4,6 +4,7 @@ use std::error::Error;
4
4
use std:: fmt:: { Debug , Display } ;
5
5
use std:: io:: Write ;
6
6
use std:: mem;
7
+ use std:: sync:: Arc ;
7
8
8
9
use flate2:: write:: GzEncoder ;
9
10
use flate2:: Compression ;
@@ -14,19 +15,26 @@ use hstreamdb_pb::{
14
15
HStreamRecordHeader , ListShardsRequest , Shard ,
15
16
} ;
16
17
use prost:: Message ;
18
+ use tokio:: sync:: oneshot;
17
19
use tokio:: task:: JoinHandle ;
18
20
use tonic:: transport:: Channel ;
19
21
20
22
use crate :: channel_provider:: Channels ;
21
23
use crate :: common:: { self , PartitionKey , Record , ShardId } ;
22
24
use crate :: utils:: { self , clear_shard_buffer, lookup_shard, partition_key_to_shard_id} ;
23
25
26
+ type ResultVec = Vec < oneshot:: Sender < Result < String , Arc < common:: Error > > > > ;
27
+
24
28
#[ derive( Debug ) ]
25
- pub ( crate ) struct Request ( pub ( crate ) Record ) ;
29
+ pub ( crate ) struct Request (
30
+ pub ( crate ) Record ,
31
+ pub ( crate ) oneshot:: Sender < Result < String , Arc < common:: Error > > > ,
32
+ ) ;
26
33
27
34
pub struct Producer {
28
35
tasks : Vec < JoinHandle < ( ) > > ,
29
36
shard_buffer : HashMap < ShardId , Vec < Record > > ,
37
+ shard_buffer_result : HashMap < ShardId , ResultVec > ,
30
38
shard_buffer_state : HashMap < ShardId , BufferState > ,
31
39
shard_urls : HashMap < ShardId , String > ,
32
40
request_receiver : tokio:: sync:: mpsc:: UnboundedReceiver < Request > ,
@@ -84,6 +92,7 @@ impl Producer {
84
92
let producer = Producer {
85
93
tasks : Vec :: new ( ) ,
86
94
shard_buffer : HashMap :: new ( ) ,
95
+ shard_buffer_result : HashMap :: new ( ) ,
87
96
shard_buffer_state : HashMap :: new ( ) ,
88
97
shard_urls : HashMap :: new ( ) ,
89
98
request_receiver,
@@ -98,7 +107,7 @@ impl Producer {
98
107
}
99
108
100
109
pub async fn start ( & mut self ) {
101
- while let Some ( Request ( record) ) = self . request_receiver . recv ( ) . await {
110
+ while let Some ( Request ( record, result_sender ) ) = self . request_receiver . recv ( ) . await {
102
111
let partition_key = record. partition_key . clone ( ) ;
103
112
match partition_key_to_shard_id ( & self . shards , partition_key. clone ( ) ) {
104
113
Err ( err) => {
@@ -130,15 +139,25 @@ impl Producer {
130
139
buffer_state. modify ( & record) ;
131
140
self . shard_buffer_state . insert ( shard_id, buffer_state) ;
132
141
self . shard_buffer . insert ( shard_id, vec ! [ record] ) ;
142
+ self . shard_buffer_result
143
+ . insert ( shard_id, vec ! [ result_sender] ) ;
133
144
}
134
145
Some ( buffer) => {
146
+ self . shard_buffer_result
147
+ . get_mut ( & shard_id)
148
+ . unwrap ( )
149
+ . push ( result_sender) ;
135
150
let buffer_state =
136
151
self . shard_buffer_state . get_mut ( & shard_id) . unwrap ( ) ;
137
152
buffer_state. modify ( & record) ;
138
153
buffer. push ( record) ;
139
154
if buffer_state. check ( & self . flush_settings ) {
140
155
let buffer =
141
156
clear_shard_buffer ( & mut self . shard_buffer , shard_id) ;
157
+ let results = clear_shard_buffer (
158
+ & mut self . shard_buffer_result ,
159
+ shard_id,
160
+ ) ;
142
161
self . shard_buffer_state . insert ( shard_id, default ( ) ) ;
143
162
let task = tokio:: spawn ( flush_ (
144
163
self . channels . clone ( ) ,
@@ -147,6 +166,7 @@ impl Producer {
147
166
shard_url,
148
167
self . compression_type ,
149
168
buffer,
169
+ results,
150
170
) ) ;
151
171
self . tasks . push ( task) ;
152
172
}
@@ -160,6 +180,7 @@ impl Producer {
160
180
161
181
let mut shard_buffer = mem:: take ( & mut self . shard_buffer ) ;
162
182
for ( shard_id, buffer) in shard_buffer. iter_mut ( ) {
183
+ let results = self . shard_buffer_result . get_mut ( shard_id) . unwrap ( ) ;
163
184
let shard_url = self . shard_urls . get ( shard_id) ;
164
185
let shard_url_is_none = shard_url. is_none ( ) ;
165
186
match lookup_shard (
@@ -184,6 +205,7 @@ impl Producer {
184
205
shard_url,
185
206
self . compression_type ,
186
207
mem:: take ( buffer) ,
208
+ mem:: take ( results) ,
187
209
) ) ;
188
210
self . tasks . push ( task) ;
189
211
}
@@ -206,25 +228,43 @@ async fn flush(
206
228
shard_url : String ,
207
229
compression_type : CompressionType ,
208
230
buffer : Vec < Record > ,
231
+ results : ResultVec ,
209
232
) -> Result < ( ) , String > {
210
- if !buffer. is_empty ( ) {
233
+ if buffer. is_empty ( ) {
234
+ Ok ( ( ) )
235
+ } else {
211
236
let channel = channels
212
237
. channel_at ( shard_url. clone ( ) )
213
238
. await
214
239
. map_err ( |err| format ! ( "producer connect error: url = {shard_url}, {err}" ) ) ?;
215
- append (
240
+ match append (
216
241
channel,
217
242
stream_name,
218
243
shard_id,
219
244
compression_type,
220
245
buffer. to_vec ( ) ,
221
246
)
222
247
. await
223
- . map_err ( |err| format ! ( "producer append error: url = {shard_url}, {err}" ) )
224
- . map ( |x| log:: debug!( "append succeed: len = {}" , x. len( ) ) ) ?;
225
- Ok ( ( ) )
226
- } else {
227
- Ok ( ( ) )
248
+ {
249
+ Err ( err) => {
250
+ let err = Arc :: new ( err) ;
251
+ for sender in results. into_iter ( ) {
252
+ sender. send ( Err ( err. clone ( ) ) ) . unwrap_or_else ( |err| {
253
+ log:: error!( "return append result error: err = {}" , err. unwrap_err( ) )
254
+ } )
255
+ }
256
+ Err ( format ! ( "producer append error: url = {shard_url}, {err}" ) )
257
+ }
258
+ Ok ( append_result) => {
259
+ log:: debug!( "append succeed: len = {}" , append_result. len( ) ) ;
260
+ for ( result, sender) in append_result. into_iter ( ) . zip ( results) {
261
+ sender. send ( Ok ( result) ) . unwrap_or_else ( |err| {
262
+ log:: error!( "return append result error: ok = {}" , err. unwrap( ) )
263
+ } )
264
+ }
265
+ Ok ( ( ) )
266
+ }
267
+ }
228
268
}
229
269
}
230
270
@@ -235,6 +275,7 @@ async fn flush_(
235
275
shard_url : String ,
236
276
compression_type : CompressionType ,
237
277
buffer : Vec < Record > ,
278
+ results : ResultVec ,
238
279
) {
239
280
flush (
240
281
channels,
@@ -243,6 +284,7 @@ async fn flush_(
243
284
shard_url,
244
285
compression_type,
245
286
buffer,
287
+ results,
246
288
)
247
289
. await
248
290
. unwrap_or_else ( |err| log:: error!( "{err}" ) )
0 commit comments