diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/ITSystemTest.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/ITSystemTest.java index 0281f58a9..7cdb0d892 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/ITSystemTest.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/ITSystemTest.java @@ -25,6 +25,7 @@ import static com.google.cloud.firestore.LocalFirestoreHelper.UPDATE_SINGLE_FIELD_OBJECT; import static com.google.cloud.firestore.LocalFirestoreHelper.fullPath; import static com.google.cloud.firestore.LocalFirestoreHelper.map; +import static com.google.cloud.firestore.it.TestHelper.getLargestDocContent; import static com.google.cloud.firestore.it.TestHelper.isRunningAgainstFirestoreEmulator; import static com.google.common.truth.Truth.assertThat; import static java.util.Arrays.asList; @@ -2311,4 +2312,133 @@ public void testEnforcesTimeouts() { FirestoreException.class, () -> collection.document().listCollections().iterator().hasNext()); } + + @Test + public void testCanCRUDAndQueryLargeDocuments() throws Exception { + // Note, do not use the 'randomColl' because its format contanis the + // test name, and that makes it difficult to correctly calculate the + // largest number of bytes we can put in the document. + CollectionReference collRef = firestore.collection(LocalFirestoreHelper.autoId()); + DocumentReference docRef = collRef.document(); + Map data = getLargestDocContent(); + + // Set + docRef.set(data).get(); + + // Get + DocumentSnapshot snapshot = docRef.get().get(); + assertEquals(data, snapshot.getData()); + + // Update + Map newData = getLargestDocContent(); + docRef.update(newData).get(); + snapshot = docRef.get().get(); + assertEquals(newData, snapshot.getData()); + + // Query + QuerySnapshot querySnapshot = collRef.get().get(); + assertEquals(querySnapshot.size(), 1); + assertEquals(newData, querySnapshot.getDocuments().get(0).getData()); + + // Delete + docRef.delete().get(); + snapshot = docRef.get().get(); + assertFalse(snapshot.exists()); + } + + @Test + public void testCanCRUDLargeDocumentsInsideTransaction() throws Exception { + // Note, do not use the 'randomColl' because its format contanis the + // test name, and that makes it difficult to correctly calculate the + // largest number of bytes we can put in the document. + CollectionReference collRef = firestore.collection(LocalFirestoreHelper.autoId()); + DocumentReference docRef1 = collRef.document(); + DocumentReference docRef2 = collRef.document(); + DocumentReference docRef3 = collRef.document(); + Map data = getLargestDocContent(); + Map newData = getLargestDocContent(); + docRef1.set(data).get(); + docRef3.set(data).get(); + + collRef + .getFirestore() + .runTransaction( + transaction -> { + // Get and update + DocumentSnapshot snapshot = transaction.get(docRef1).get(); + assertEquals(data, snapshot.getData()); + transaction.update(docRef1, newData); + + // Set + transaction.set(docRef2, data); + + // Delete + transaction.delete(docRef3); + return null; + }) + .get(); + + DocumentSnapshot snapshot = docRef1.get().get(); + assertEquals(newData, snapshot.getData()); + + snapshot = docRef2.get().get(); + assertEquals(data, snapshot.getData()); + + snapshot = docRef3.get().get(); + assertFalse(snapshot.exists()); + } + + @Test + public void listenToLargeQuerySnapshot() throws Exception { + // Note, do not use the 'randomColl' because its format contanis the + // test name, and that makes it difficult to correctly calculate the + // largest number of bytes we can put in the document. + CollectionReference collRef = firestore.collection(LocalFirestoreHelper.autoId()); + DocumentReference docRef = collRef.document(); + Map data = getLargestDocContent(); + docRef.set(data).get(); + + CountDownLatch latch = new CountDownLatch(1); + List querySnapshots = new ArrayList<>(); + ListenerRegistration registration = + collRef.addSnapshotListener( + (value, error) -> { + querySnapshots.add(value); + latch.countDown(); + }); + + latch.await(); + registration.remove(); + + assertEquals(querySnapshots.size(), 1); + assertEquals(querySnapshots.get(0).getDocuments().size(), 1); + assertEquals(data, querySnapshots.get(0).getDocuments().get(0).getData()); + } + + @Test + public void listenToLargeDocumentSnapshot() throws Exception { + // Note, do not use the 'randomColl' because its format contanis the + // test name, and that makes it difficult to correctly calculate the + // largest number of bytes we can put in the document. + CollectionReference collRef = firestore.collection(LocalFirestoreHelper.autoId()); + DocumentReference docRef = collRef.document(); + Map data = getLargestDocContent(); + docRef.set(data).get(); + + CountDownLatch latch = new CountDownLatch(1); + List documentSnapshots = new ArrayList<>(); + + ListenerRegistration registration = + docRef.addSnapshotListener( + (value, error) -> { + documentSnapshots.add(value); + latch.countDown(); + }); + + latch.await(); + registration.remove(); + + assertEquals(documentSnapshots.size(), 1); + assertEquals(data, documentSnapshots.get(0).getData()); + } } diff --git a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/TestHelper.java b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/TestHelper.java index 54cfd86f0..e3411a7af 100644 --- a/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/TestHelper.java +++ b/google-cloud-firestore/src/test/java/com/google/cloud/firestore/it/TestHelper.java @@ -17,7 +17,11 @@ package com.google.cloud.firestore.it; import com.google.api.core.ApiFuture; +import com.google.cloud.firestore.Blob; import com.google.cloud.firestore.Firestore; +import java.util.Collections; +import java.util.Map; +import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; @@ -55,4 +59,20 @@ public static void await(ApiFuture future) throws InterruptedException { executor.shutdown(); } + + /** + * Returns a Blob with the size equal to the largest number of bytes allowed to be stored in a + * Firestore document. + */ + public static Map getLargestDocContent() { + int MAX_BYTES_PER_FIELD_VALUE = 1048487; + // Subtract 8 for '__name__', 20 for its value, and 4 for 'blob'. + int numBytesToUse = MAX_BYTES_PER_FIELD_VALUE - 8 - 20 - 4; + + byte[] bytes = new byte[numBytesToUse]; + // Fill the byte array with random values + Random random = new Random(); + random.nextBytes(bytes); + return Collections.singletonMap("blob", Blob.fromBytes(bytes)); + } }