3232import com .google .firebase .firestore .model .MutableDocument ;
3333import com .google .firebase .firestore .model .ResourcePath ;
3434import com .google .firebase .firestore .model .SnapshotVersion ;
35- import com .google .firebase .firestore .util .BackgroundQueue ;
36- import com .google .firebase .firestore .util .Executors ;
3735import com .google .firebase .firestore .util .Function ;
3836import com .google .protobuf .InvalidProtocolBufferException ;
3937import com .google .protobuf .MessageLite ;
4745import java .util .Objects ;
4846import java .util .Set ;
4947import java .util .concurrent .ConcurrentHashMap ;
50- import java .util .concurrent .Executor ;
5148import javax .annotation .Nonnull ;
5249import javax .annotation .Nullable ;
5350
@@ -170,13 +167,9 @@ public Map<DocumentKey, MutableDocument> getAll(Iterable<DocumentKey> documentKe
170167 bindVars ,
171168 ") ORDER BY path" );
172169
173- BackgroundQueue backgroundQueue = new BackgroundQueue ();
174170 while (longQuery .hasMoreSubqueries ()) {
175- longQuery
176- .performNextSubquery ()
177- .forEach (row -> processRowInBackground (backgroundQueue , results , row , /*filter*/ null ));
171+ longQuery .performNextSubquery ().forEach (row -> processRow (results , row , /*filter*/ null ));
178172 }
179- backgroundQueue .drain ();
180173
181174 // Backfill any rows with null "document_type" discovered by processRowInBackground().
182175 documentTypeBackfiller .backfill (db );
@@ -266,18 +259,16 @@ private Map<DocumentKey, MutableDocument> getAll(
266259 }
267260 bindVars [i ] = count ;
268261
269- BackgroundQueue backgroundQueue = new BackgroundQueue ();
270262 Map <DocumentKey , MutableDocument > results = new HashMap <>();
271263 db .query (sql .toString ())
272264 .binding (bindVars )
273265 .forEach (
274266 row -> {
275- processRowInBackground ( backgroundQueue , results , row , filter );
267+ processRow ( results , row , filter );
276268 if (context != null ) {
277269 context .incrementDocumentReadCount ();
278270 }
279271 });
280- backgroundQueue .drain ();
281272
282273 // Backfill any null "document_type" columns discovered by processRowInBackground().
283274 documentTypeBackfiller .backfill (db );
@@ -297,8 +288,7 @@ private Map<DocumentKey, MutableDocument> getAll(
297288 collections , offset , count , /*tryFilterDocumentType*/ null , filter , /*context*/ null );
298289 }
299290
300- private void processRowInBackground (
301- BackgroundQueue backgroundQueue ,
291+ private void processRow (
302292 Map <DocumentKey , MutableDocument > results ,
303293 Cursor row ,
304294 @ Nullable Function <MutableDocument , Boolean > filter ) {
@@ -308,22 +298,15 @@ private void processRowInBackground(
308298 boolean documentTypeIsNull = row .isNull (3 );
309299 String path = row .getString (4 );
310300
311- // Since scheduling background tasks incurs overhead, we only dispatch to a
312- // background thread if there are still some documents remaining.
313- Executor executor = row .isLast () ? Executors .DIRECT_EXECUTOR : backgroundQueue ;
314- executor .execute (
315- () -> {
316- MutableDocument document =
317- decodeMaybeDocument (rawDocument , readTimeSeconds , readTimeNanos );
318- if (documentTypeIsNull ) {
319- documentTypeBackfiller .enqueue (path , readTimeSeconds , readTimeNanos , document );
320- }
321- if (filter == null || filter .apply (document )) {
322- synchronized (results ) {
323- results .put (document .getKey (), document );
324- }
325- }
326- });
301+ MutableDocument document = decodeMaybeDocument (rawDocument , readTimeSeconds , readTimeNanos );
302+ if (documentTypeIsNull ) {
303+ documentTypeBackfiller .enqueue (path , readTimeSeconds , readTimeNanos , document );
304+ }
305+ if (filter == null || filter .apply (document )) {
306+ synchronized (results ) {
307+ results .put (document .getKey (), document );
308+ }
309+ }
327310 }
328311
329312 @ Override
0 commit comments