diff --git a/common/pom.xml b/common/pom.xml index 069f023896..d14d0e2dd2 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -63,6 +63,10 @@ under the License. org.apache.arrow arrow-c-data + + org.scala-lang.modules + scala-collection-compat_${scala.binary.version} + junit junit diff --git a/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java b/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java index 7595242c34..fbef0e2586 100644 --- a/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java +++ b/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java @@ -30,7 +30,6 @@ import java.util.stream.Collectors; import scala.Option; -import scala.collection.JavaConverters; import scala.collection.Seq; import scala.collection.mutable.Buffer; @@ -81,6 +80,8 @@ import org.apache.comet.vector.CometVector; import org.apache.comet.vector.NativeUtil; +import static scala.jdk.javaapi.CollectionConverters.*; + /** * A vectorized Parquet reader that reads a Parquet file in a batched fashion. * @@ -255,7 +256,7 @@ public void init() throws Throwable { ParquetReadOptions readOptions = builder.build(); Map objectStoreOptions = - JavaConverters.mapAsJavaMap(NativeConfig.extractObjectStoreOptions(conf, file.pathUri())); + asJava(NativeConfig.extractObjectStoreOptions(conf, file.pathUri())); // TODO: enable off-heap buffer when they are ready ReadOptions cometReadOptions = ReadOptions.builder(conf).build(); @@ -306,7 +307,7 @@ public void init() throws Throwable { List fields = requestedSchema.getFields(); List fileFields = fileSchema.getFields(); ParquetColumn[] parquetFields = - JavaConverters.seqAsJavaList(parquetColumn.children()).toArray(new ParquetColumn[0]); + asJava(parquetColumn.children()).toArray(new ParquetColumn[0]); int numColumns = fields.size(); if (partitionSchema != null) numColumns += partitionSchema.size(); columnReaders = new AbstractColumnReader[numColumns]; @@ -618,14 +619,14 @@ private DataType getSparkArrayTypeByFieldId( } private void checkParquetType(ParquetColumn column) throws IOException { - String[] path = JavaConverters.seqAsJavaList(column.path()).toArray(new String[0]); + String[] path = asJava(column.path()).toArray(new String[0]); if (containsPath(fileSchema, path)) { if (column.isPrimitive()) { ColumnDescriptor desc = column.descriptor().get(); ColumnDescriptor fd = fileSchema.getColumnDescription(desc.getPath()); TypeUtil.checkParquetType(fd, column.sparkType()); } else { - for (ParquetColumn childColumn : JavaConverters.seqAsJavaList(column.children())) { + for (ParquetColumn childColumn : asJava(column.children())) { checkColumn(childColumn); } } @@ -645,7 +646,7 @@ private void checkParquetType(ParquetColumn column) throws IOException { * file schema, or whether it conforms to the type of the file schema. */ private void checkColumn(ParquetColumn column) throws IOException { - String[] path = JavaConverters.seqAsJavaList(column.path()).toArray(new String[0]); + String[] path = asJava(column.path()).toArray(new String[0]); if (containsPath(fileSchema, path)) { if (column.isPrimitive()) { ColumnDescriptor desc = column.descriptor().get(); @@ -654,7 +655,7 @@ private void checkColumn(ParquetColumn column) throws IOException { throw new UnsupportedOperationException("Schema evolution not supported."); } } else { - for (ParquetColumn childColumn : JavaConverters.seqAsJavaList(column.children())) { + for (ParquetColumn childColumn : asJava(column.children())) { checkColumn(childColumn); } } @@ -805,7 +806,7 @@ public void close() throws IOException { @SuppressWarnings("deprecation") private int loadNextBatch() throws Throwable { - for (ParquetColumn childColumn : JavaConverters.seqAsJavaList(parquetColumn.children())) { + for (ParquetColumn childColumn : asJava(parquetColumn.children())) { checkParquetType(childColumn); } diff --git a/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java b/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java index 88447c1473..b170ae5830 100644 --- a/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java +++ b/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java @@ -33,6 +33,8 @@ import org.apache.comet.CometSchemaImporter; import org.apache.comet.vector.*; +import static scala.jdk.javaapi.CollectionConverters.*; + // TODO: extend ColumnReader instead of AbstractColumnReader to reduce code duplication public class NativeColumnReader extends AbstractColumnReader { protected static final Logger LOG = LoggerFactory.getLogger(NativeColumnReader.class); @@ -145,9 +147,7 @@ public CometDecodedVector loadVector() { ArrowSchema[] schemas = {schema}; CometDecodedVector cometVector = - (CometDecodedVector) - scala.collection.JavaConverters.seqAsJavaList(nativeUtil.importVector(arrays, schemas)) - .get(0); + (CometDecodedVector) asJava(nativeUtil.importVector(arrays, schemas)).get(0); // Update whether the current vector contains any null values. This is used in the following // batch(s) to determine whether we can skip loading the native vector. diff --git a/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala b/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala index f12ab7c4f1..342441ce28 100644 --- a/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala +++ b/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.comet.execution.arrow -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.arrow.vector._ import org.apache.arrow.vector.complex._ diff --git a/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala b/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala index 0c2a55b282..edaee563b3 100644 --- a/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala +++ b/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala @@ -21,7 +21,7 @@ package org.apache.spark.sql.comet.parquet import java.util.{Locale, UUID} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.parquet.schema._ import org.apache.parquet.schema.LogicalTypeAnnotation.ListLogicalTypeAnnotation diff --git a/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala b/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala index a72208db27..daab5da363 100644 --- a/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala +++ b/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala @@ -23,7 +23,7 @@ import java.io.{DataInputStream, DataOutputStream, File} import java.nio.ByteBuffer import java.nio.channels.Channels -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.arrow.c.CDataDictionaryProvider import org.apache.arrow.vector.{BigIntVector, BitVector, DateDayVector, DecimalVector, FieldVector, FixedSizeBinaryVector, Float4Vector, Float8Vector, IntVector, SmallIntVector, TimeStampMicroTZVector, TimeStampMicroVector, TinyIntVector, ValueVector, VarBinaryVector, VarCharVector, VectorSchemaRoot} diff --git a/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java b/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java index d4e748a9b6..32accae8e5 100644 --- a/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java +++ b/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java @@ -26,8 +26,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.BiFunction; -import scala.collection.JavaConverters; - import org.junit.Test; import org.apache.arrow.memory.BufferAllocator; @@ -46,6 +44,7 @@ import static org.apache.spark.sql.types.DataTypes.*; import static org.junit.Assert.*; +import static scala.jdk.javaapi.CollectionConverters.*; @SuppressWarnings("unchecked") public class TestColumnReader { @@ -97,7 +96,7 @@ public void testConstantVectors() { StructField field = StructField.apply("f", type, false, null); List values = Collections.singletonList(VALUES.get(i)); - InternalRow row = GenericInternalRow.apply(JavaConverters.asScalaBuffer(values).toSeq()); + InternalRow row = GenericInternalRow.apply(asScala(values).toSeq()); ConstantColumnReader reader = new ConstantColumnReader(field, BATCH_SIZE, row, 0, true); reader.readBatch(BATCH_SIZE); CometVector vector = reader.currentBatch(); diff --git a/dev/ensure-jars-have-correct-contents.sh b/dev/ensure-jars-have-correct-contents.sh index 23d0be3231..f698fe78fc 100755 --- a/dev/ensure-jars-have-correct-contents.sh +++ b/dev/ensure-jars-have-correct-contents.sh @@ -94,6 +94,12 @@ allowed_expr+="|^org/apache/spark/CometPlugin.class$" allowed_expr+="|^org/apache/spark/CometDriverPlugin.*$" allowed_expr+="|^org/apache/spark/CometTaskMemoryManager.class$" allowed_expr+="|^org/apache/spark/CometTaskMemoryManager.*$" +allowed_expr+="|^scala-collection-compat.properties$" +allowed_expr+="|^scala/$" +allowed_expr+="|^scala/annotation/" +allowed_expr+="|^scala/collection/" +allowed_expr+="|^scala/jdk/" +allowed_expr+="|^scala/util/" allowed_expr+=")" declare -i bad_artifacts=0 diff --git a/fuzz-testing/pom.xml b/fuzz-testing/pom.xml index 5e5221f1f1..728b32b82a 100644 --- a/fuzz-testing/pom.xml +++ b/fuzz-testing/pom.xml @@ -50,11 +50,23 @@ under the License. org.apache.spark spark-sql_${scala.binary.version} provided + + + org.scala-lang.modules + scala-collection-compat_${scala.binary.version} + + org.apache.datafusion comet-spark-spark${spark.version.short}_${scala.binary.version} ${project.version} + + + org.scala-lang.modules + scala-collection-compat_${scala.binary.version} + + org.rogach diff --git a/pom.xml b/pom.xml index b659ae4e13..cdf1b4e1d2 100644 --- a/pom.xml +++ b/pom.xml @@ -258,6 +258,11 @@ under the License. scala-library ${scala.version} + + org.scala-lang.modules + scala-collection-compat_${scala.binary.version} + 2.12.0 + com.google.protobuf protobuf-java diff --git a/spark/pom.xml b/spark/pom.xml index b71cfabff1..77e2d09c6c 100644 --- a/spark/pom.xml +++ b/spark/pom.xml @@ -220,6 +220,7 @@ under the License. com.google.protobuf:protobuf-java com.google.guava:guava + org.scala-lang.modules:scala-collection-compat_${scala.binary.version} diff --git a/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java b/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java index 552a189ab4..a845e743d4 100644 --- a/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java +++ b/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java @@ -33,7 +33,6 @@ import scala.Option; import scala.Product2; -import scala.collection.JavaConverters; import scala.reflect.ClassTag; import scala.reflect.ClassTag$; @@ -81,6 +80,8 @@ import org.apache.comet.CometConf; import org.apache.comet.Native; +import static scala.jdk.javaapi.CollectionConverters.*; + /** * This is based on Spark {@link UnsafeShuffleWriter}, as a writer to write shuffling rows into * Arrow format after sorting rows based on the partition ID. @@ -201,7 +202,7 @@ public long getPeakMemoryUsedBytes() { /** This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator> records) throws IOException { - write(JavaConverters.asScalaIteratorConverter(records).asScala()); + write(asScala(records)); } @Override diff --git a/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala b/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala index 5c2de2a6b3..3f1d8743f4 100644 --- a/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala +++ b/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala @@ -19,7 +19,7 @@ package org.apache.comet.parquet -import scala.collection.JavaConverters +import scala.jdk.CollectionConverters._ import org.apache.hadoop.conf.Configuration import org.apache.parquet.filter2.predicate.FilterApi @@ -164,7 +164,7 @@ class CometParquetFileFormat(scanImpl: String) datetimeRebaseSpec.mode == CORRECTED, partitionSchema, file.partitionValues, - JavaConverters.mapAsJavaMap(metrics)) + metrics.asJava) try { batchReader.init() } catch { @@ -198,7 +198,7 @@ class CometParquetFileFormat(scanImpl: String) datetimeRebaseSpec.mode == CORRECTED, partitionSchema, file.partitionValues, - JavaConverters.mapAsJavaMap(metrics)) + metrics.asJava) try { batchReader.init() } catch { diff --git a/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala b/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala index 69cffdd15d..495054fc81 100644 --- a/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala +++ b/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala @@ -19,8 +19,8 @@ package org.apache.comet.parquet -import scala.collection.JavaConverters import scala.collection.mutable +import scala.jdk.CollectionConverters._ import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate} import org.apache.parquet.hadoop.ParquetInputFormat @@ -139,7 +139,7 @@ case class CometParquetPartitionReaderFactory( datetimeRebaseSpec.mode == CORRECTED, partitionSchema, file.partitionValues, - JavaConverters.mapAsJavaMap(metrics)) + metrics.asJava) val taskContext = Option(TaskContext.get) taskContext.foreach(_.addTaskCompletionListener[Unit](_ => cometReader.close())) return cometReader diff --git a/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala b/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala index b04d6ebc1d..3cb01c08ea 100644 --- a/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala +++ b/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala @@ -19,7 +19,7 @@ package org.apache.comet.parquet -import scala.collection.JavaConverters.mapAsScalaMapConverter +import scala.jdk.CollectionConverters._ import org.apache.hadoop.conf.Configuration import org.apache.spark.sql.SparkSession diff --git a/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala b/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala index 65157014db..dbc3e17f83 100644 --- a/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala +++ b/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala @@ -26,8 +26,7 @@ import java.sql.{Date, Timestamp} import java.time.{Duration, Instant, LocalDate, Period} import java.util.Locale -import scala.collection.JavaConverters._ -import scala.collection.JavaConverters.asScalaBufferConverter +import scala.jdk.CollectionConverters._ import org.apache.parquet.column.statistics.{Statistics => ParquetStatistics} import org.apache.parquet.filter2.predicate._ diff --git a/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala b/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala index cee2764b0b..cbca7304d2 100644 --- a/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala +++ b/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala @@ -21,8 +21,9 @@ package org.apache.comet.rules import java.net.URI -import scala.collection.{mutable, JavaConverters} +import scala.collection.mutable import scala.collection.mutable.ListBuffer +import scala.jdk.CollectionConverters._ import org.apache.hadoop.conf.Configuration import org.apache.spark.internal.Logging @@ -443,7 +444,7 @@ object CometScanRule extends Logging { // previously validated case _ => try { - val objectStoreOptions = JavaConverters.mapAsJavaMap(objectStoreConfigMap) + val objectStoreOptions = objectStoreConfigMap.asJava Native.validateObjectStoreConfig(filePath, objectStoreOptions) } catch { case e: CometNativeException => diff --git a/spark/src/main/scala/org/apache/comet/serde/CometProject.scala b/spark/src/main/scala/org/apache/comet/serde/CometProject.scala index ad48ef27f8..651aa8fefd 100644 --- a/spark/src/main/scala/org/apache/comet/serde/CometProject.scala +++ b/spark/src/main/scala/org/apache/comet/serde/CometProject.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.execution.ProjectExec diff --git a/spark/src/main/scala/org/apache/comet/serde/CometSort.scala b/spark/src/main/scala/org/apache/comet/serde/CometSort.scala index 5229c76011..2dec25c0dd 100644 --- a/spark/src/main/scala/org/apache/comet/serde/CometSort.scala +++ b/spark/src/main/scala/org/apache/comet/serde/CometSort.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.execution.SortExec diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala index 1742912d86..0f3b1bf1fd 100644 --- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala +++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala @@ -19,8 +19,8 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ import scala.collection.mutable.ListBuffer +import scala.jdk.CollectionConverters._ import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.expressions._ diff --git a/spark/src/main/scala/org/apache/comet/serde/aggregates.scala b/spark/src/main/scala/org/apache/comet/serde/aggregates.scala index 51c8951289..d6e129a3c7 100644 --- a/spark/src/main/scala/org/apache/comet/serde/aggregates.scala +++ b/spark/src/main/scala/org/apache/comet/serde/aggregates.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters.asJavaIterableConverter +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.catalyst.expressions.{Attribute, EvalMode} import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Average, BitAndAgg, BitOrAgg, BitXorAgg, BloomFilterAggregate, CentralMomentAgg, Corr, Count, Covariance, CovPopulation, CovSample, First, Last, Max, Min, StddevPop, StddevSamp, Sum, VariancePop, VarianceSamp} diff --git a/spark/src/main/scala/org/apache/comet/serde/conditional.scala b/spark/src/main/scala/org/apache/comet/serde/conditional.scala index e4f76c101e..617043524b 100644 --- a/spark/src/main/scala/org/apache/comet/serde/conditional.scala +++ b/spark/src/main/scala/org/apache/comet/serde/conditional.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.catalyst.expressions.{Attribute, CaseWhen, Coalesce, Expression, If, IsNotNull} diff --git a/spark/src/main/scala/org/apache/comet/serde/predicates.scala b/spark/src/main/scala/org/apache/comet/serde/predicates.scala index f4e746c276..00a1fb6fa1 100644 --- a/spark/src/main/scala/org/apache/comet/serde/predicates.scala +++ b/spark/src/main/scala/org/apache/comet/serde/predicates.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.catalyst.expressions.{And, Attribute, EqualNullSafe, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, In, InSet, IsNaN, IsNotNull, IsNull, LessThan, LessThanOrEqual, Literal, Not, Or} import org.apache.spark.sql.types.BooleanType diff --git a/spark/src/main/scala/org/apache/comet/serde/structs.scala b/spark/src/main/scala/org/apache/comet/serde/structs.scala index 1c25d87bb9..208b2e1262 100644 --- a/spark/src/main/scala/org/apache/comet/serde/structs.scala +++ b/spark/src/main/scala/org/apache/comet/serde/structs.scala @@ -19,7 +19,7 @@ package org.apache.comet.serde -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.sql.catalyst.expressions.{Attribute, CreateNamedStruct, GetArrayStructFields, GetStructField, StructsToJson} import org.apache.spark.sql.types.{ArrayType, DataType, DataTypes, MapType, StructType} diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala b/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala index 95e03ca69c..6d0a31236f 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala @@ -22,8 +22,8 @@ package org.apache.spark.sql.comet import java.util.UUID import java.util.concurrent.{Future, TimeoutException, TimeUnit} -import scala.collection.JavaConverters._ import scala.concurrent.Promise +import scala.jdk.CollectionConverters._ import scala.util.control.NonFatal import org.apache.spark.{broadcast, SparkException} diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala b/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala index 63cf323a8c..fd97fe3fa2 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.comet -import scala.collection.JavaConverters.asJavaIterableConverter +import scala.jdk.CollectionConverters._ import scala.reflect.ClassTag import org.apache.spark.{Partition, SparkContext, TaskContext} diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala b/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala index 6f8a533625..70027c2c40 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.comet -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.SparkContext import org.apache.spark.internal.Logging diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala index 5d772be403..018c9f7c10 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala @@ -22,7 +22,7 @@ package org.apache.spark.sql.comet.execution.shuffle import java.nio.{ByteBuffer, ByteOrder} import java.nio.file.{Files, Paths} -import scala.collection.JavaConverters.asJavaIterableConverter +import scala.jdk.CollectionConverters._ import org.apache.spark.{SparkEnv, TaskContext} import org.apache.spark.internal.Logging diff --git a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala index 1142c6af17..d733c00fe4 100644 --- a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala +++ b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala @@ -22,7 +22,7 @@ package org.apache.spark.sql.comet.execution.shuffle import java.util.Collections import java.util.concurrent.ConcurrentHashMap -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.ShuffleDependency import org.apache.spark.SparkConf diff --git a/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala b/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala index 49124d63e5..9f20644e7e 100644 --- a/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala +++ b/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala @@ -24,7 +24,7 @@ import java.net.InetAddress import java.nio.file.Files import java.util.UUID -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.commons.io.FileUtils import org.apache.hadoop.conf.Configuration diff --git a/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala b/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala index 0c75fabe31..56f5bcfdee 100644 --- a/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala +++ b/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala @@ -22,7 +22,7 @@ package org.apache.spark.sql import java.io.File import java.nio.file.{Files, Paths} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.sql.catalyst.util.{fileToString, resourceToString, stringToFile} diff --git a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala index aee85bf8e7..e158af6335 100644 --- a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala +++ b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala @@ -22,7 +22,7 @@ package org.apache.spark.sql import java.io.File import java.nio.file.{Files, Paths} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.internal.config.{MEMORY_OFFHEAP_ENABLED, MEMORY_OFFHEAP_SIZE} diff --git a/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala b/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala index fcfbf6074f..02b9ca5dce 100644 --- a/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala +++ b/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala @@ -21,7 +21,7 @@ package org.apache.spark.sql.benchmark import java.io.File -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.util.Random import org.apache.hadoop.fs.Path