From c1bcf7a0a5e22b56c05d5256269ba0452ee11ce2 Mon Sep 17 00:00:00 2001 From: Adam Binford Date: Fri, 3 Oct 2025 14:11:17 +0000 Subject: [PATCH] Use zipWithIndex instead of list indexing for maxBytesPerTrigger --- .../sql/execution/streaming/runtime/FileStreamSource.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/FileStreamSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/FileStreamSource.scala index d5503f1c247da..9847bd9d76448 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/FileStreamSource.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/FileStreamSource.scala @@ -149,8 +149,7 @@ class FileStreamSource( var rSize = BigInt(0) val lFiles = ArrayBuffer[NewFileEntry]() val rFiles = ArrayBuffer[NewFileEntry]() - for (i <- files.indices) { - val file = files(i) + files.zipWithIndex.foreach { case (file, i) => val newSize = lSize + file.size if (i == 0 || rFiles.isEmpty && newSize <= Long.MaxValue && newSize <= maxSize) { lSize += file.size