1515package com .google .firebase .firestore .local ;
1616
1717import static com .google .firebase .firestore .util .Assert .fail ;
18+ import static com .google .firebase .firestore .util .Assert .hardAssert ;
1819
20+ import android .database .Cursor ;
1921import androidx .annotation .Nullable ;
2022import com .google .firebase .firestore .auth .User ;
2123import com .google .firebase .firestore .model .DocumentKey ;
2224import com .google .firebase .firestore .model .ResourcePath ;
2325import com .google .firebase .firestore .model .mutation .Mutation ;
2426import com .google .firebase .firestore .model .mutation .Overlay ;
27+ import com .google .firebase .firestore .util .BackgroundQueue ;
28+ import com .google .firebase .firestore .util .Executors ;
2529import com .google .firestore .v1 .Write ;
2630import com .google .protobuf .InvalidProtocolBufferException ;
31+ import java .util .ArrayList ;
32+ import java .util .Arrays ;
2733import java .util .HashMap ;
34+ import java .util .List ;
2835import java .util .Map ;
36+ import java .util .SortedSet ;
37+ import java .util .concurrent .Executor ;
2938
3039public class SQLiteDocumentOverlayCache implements DocumentOverlayCache {
3140 private final SQLitePersistence db ;
@@ -47,7 +56,54 @@ public Overlay getOverlay(DocumentKey key) {
4756 "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
4857 + "WHERE uid = ? AND collection_path = ? AND document_id = ?" )
4958 .binding (uid , collectionPath , documentId )
50- .firstValue (this ::decodeOverlay );
59+ .firstValue (row -> this .decodeOverlay (row .getBlob (0 ), row .getInt (1 )));
60+ }
61+
62+ @ Override
63+ public Map <DocumentKey , Overlay > getOverlays (SortedSet <DocumentKey > keys ) {
64+ hardAssert (keys .comparator () == null , "getOverlays() requires natural order" );
65+ Map <DocumentKey , Overlay > result = new HashMap <>();
66+
67+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
68+ ResourcePath currentCollection = ResourcePath .EMPTY ;
69+ List <Object > accumulatedDocumentIds = new ArrayList <>();
70+ for (DocumentKey key : keys ) {
71+ if (!currentCollection .equals (key .getCollectionPath ())) {
72+ processSingleCollection (result , backgroundQueue , currentCollection , accumulatedDocumentIds );
73+ currentCollection = key .getCollectionPath ();
74+ accumulatedDocumentIds .clear ();
75+ }
76+ accumulatedDocumentIds .add (key .getDocumentId ());
77+ }
78+
79+ processSingleCollection (result , backgroundQueue , currentCollection , accumulatedDocumentIds );
80+ backgroundQueue .drain ();
81+ return result ;
82+ }
83+
84+ /** Reads the overlays for the documents in a single collection. */
85+ private void processSingleCollection (
86+ Map <DocumentKey , Overlay > result ,
87+ BackgroundQueue backgroundQueue ,
88+ ResourcePath collectionPath ,
89+ List <Object > documentIds ) {
90+ if (documentIds .isEmpty ()) {
91+ return ;
92+ }
93+
94+ SQLitePersistence .LongQuery longQuery =
95+ new SQLitePersistence .LongQuery (
96+ db ,
97+ "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
98+ + "WHERE uid = ? AND collection_path = ? AND document_id IN (" ,
99+ Arrays .asList (uid , EncodedPath .encode (collectionPath )),
100+ documentIds ,
101+ ")" );
102+ while (longQuery .hasMoreSubqueries ()) {
103+ longQuery
104+ .performNextSubquery ()
105+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
106+ }
51107 }
52108
53109 private void saveOverlay (int largestBatchId , DocumentKey key , @ Nullable Mutation mutation ) {
@@ -83,49 +139,48 @@ public void removeOverlaysForBatchId(int batchId) {
83139
84140 @ Override
85141 public Map <DocumentKey , Overlay > getOverlays (ResourcePath collection , int sinceBatchId ) {
86- String collectionPath = EncodedPath .encode (collection );
87-
88142 Map <DocumentKey , Overlay > result = new HashMap <>();
143+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
89144 db .query (
90145 "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
91146 + "WHERE uid = ? AND collection_path = ? AND largest_batch_id > ?" )
92- .binding (uid , collectionPath , sinceBatchId )
93- .forEach (
94- row -> {
95- Overlay overlay = decodeOverlay (row );
96- result .put (overlay .getKey (), overlay );
97- });
98-
147+ .binding (uid , EncodedPath .encode (collection ), sinceBatchId )
148+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
149+ backgroundQueue .drain ();
99150 return result ;
100151 }
101152
102153 @ Override
103154 public Map <DocumentKey , Overlay > getOverlays (
104155 String collectionGroup , int sinceBatchId , int count ) {
105156 Map <DocumentKey , Overlay > result = new HashMap <>();
106- Overlay [] lastOverlay = new Overlay [] {null };
157+ String [] lastCollectionPath = new String [1 ];
158+ String [] lastDocumentPath = new String [1 ];
159+ int [] lastLargestBatchId = new int [1 ];
107160
161+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
108162 db .query (
109- "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
163+ "SELECT overlay_mutation, largest_batch_id, collection_path, document_id "
164+ + " FROM document_overlays "
110165 + "WHERE uid = ? AND collection_group = ? AND largest_batch_id > ? "
111166 + "ORDER BY largest_batch_id, collection_path, document_id LIMIT ?" )
112167 .binding (uid , collectionGroup , sinceBatchId , count )
113168 .forEach (
114169 row -> {
115- lastOverlay [0 ] = decodeOverlay (row );
116- result .put (lastOverlay [0 ].getKey (), lastOverlay [0 ]);
170+ lastLargestBatchId [0 ] = row .getInt (1 );
171+ lastCollectionPath [0 ] = row .getString (2 );
172+ lastDocumentPath [0 ] = row .getString (3 );
173+ processOverlaysInBackground (backgroundQueue , result , row );
117174 });
118175
119- if (lastOverlay [0 ] == null ) {
176+ if (lastCollectionPath [0 ] == null ) {
120177 return result ;
121178 }
122179
123180 // This function should not return partial batch overlays, even if the number of overlays in the
124181 // result set exceeds the given `count` argument. Since the `LIMIT` in the above query might
125182 // result in a partial batch, the following query appends any remaining overlays for the last
126183 // batch.
127- DocumentKey key = lastOverlay [0 ].getKey ();
128- String encodedCollectionPath = EncodedPath .encode (key .getCollectionPath ());
129184 db .query (
130185 "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
131186 + "WHERE uid = ? AND collection_group = ? "
@@ -134,23 +189,35 @@ public Map<DocumentKey, Overlay> getOverlays(
134189 .binding (
135190 uid ,
136191 collectionGroup ,
137- encodedCollectionPath ,
138- encodedCollectionPath ,
139- key .getDocumentId (),
140- lastOverlay [0 ].getLargestBatchId ())
141- .forEach (
142- row -> {
143- Overlay overlay = decodeOverlay (row );
144- result .put (overlay .getKey (), overlay );
145- });
146-
192+ lastCollectionPath [0 ],
193+ lastCollectionPath [0 ],
194+ lastDocumentPath [0 ],
195+ lastLargestBatchId [0 ])
196+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
197+ backgroundQueue .drain ();
147198 return result ;
148199 }
149200
150- private Overlay decodeOverlay (android .database .Cursor row ) {
201+ private void processOverlaysInBackground (
202+ BackgroundQueue backgroundQueue , Map <DocumentKey , Overlay > results , Cursor row ) {
203+ byte [] rawMutation = row .getBlob (0 );
204+ int largestBatchId = row .getInt (1 );
205+
206+ // Since scheduling background tasks incurs overhead, we only dispatch to a
207+ // background thread if there are still some documents remaining.
208+ Executor executor = row .isLast () ? Executors .DIRECT_EXECUTOR : backgroundQueue ;
209+ executor .execute (
210+ () -> {
211+ Overlay overlay = decodeOverlay (rawMutation , largestBatchId );
212+ synchronized (results ) {
213+ results .put (overlay .getKey (), overlay );
214+ }
215+ });
216+ }
217+
218+ private Overlay decodeOverlay (byte [] rawMutation , int largestBatchId ) {
151219 try {
152- Write write = Write .parseFrom (row .getBlob (0 ));
153- int largestBatchId = row .getInt (1 );
220+ Write write = Write .parseFrom (rawMutation );
154221 Mutation mutation = serializer .decodeMutation (write );
155222 return Overlay .create (largestBatchId , mutation );
156223 } catch (InvalidProtocolBufferException e ) {
0 commit comments