@@ -105,7 +105,50 @@ object MimaExcludes {
105
105
ProblemFilters .exclude[InheritedNewAbstractMethodProblem ](" org.apache.spark.ml.param.shared.HasValidationIndicatorCol.validationIndicatorCol" ),
106
106
107
107
// [SPARK-23042] Use OneHotEncoderModel to encode labels in MultilayerPerceptronClassifier
108
- ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.ml.classification.LabelConverter" )
108
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.ml.classification.LabelConverter" ),
109
+
110
+ // [SPARK-21842][MESOS] Support Kerberos ticket renewal and creation in Mesos
111
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.deploy.SparkHadoopUtil.getDateOfNextUpdate" ),
112
+
113
+ // [SPARK-23366] Improve hot reading path in ReadAheadInputStream
114
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.io.ReadAheadInputStream.this" ),
115
+
116
+ // [SPARK-22941][CORE] Do not exit JVM when submit fails with in-process launcher.
117
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.deploy.SparkSubmit.addJarToClasspath" ),
118
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.deploy.SparkSubmit.mergeFileLists" ),
119
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.deploy.SparkSubmit.prepareSubmitEnvironment$default$2" ),
120
+
121
+ // Data Source V2 API changes
122
+ // TODO: they are unstable APIs and should not be tracked by mima.
123
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.ReadSupportWithSchema" ),
124
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch.createDataReaderFactories" ),
125
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch.createBatchDataReaderFactories" ),
126
+ ProblemFilters .exclude[ReversedMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch.planBatchInputPartitions" ),
127
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsScanUnsafeRow" ),
128
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.DataSourceReader.createDataReaderFactories" ),
129
+ ProblemFilters .exclude[ReversedMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.DataSourceReader.planInputPartitions" ),
130
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsPushDownCatalystFilters" ),
131
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.reader.DataReader" ),
132
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics.getStatistics" ),
133
+ ProblemFilters .exclude[ReversedMissingMethodProblem ](" org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics.estimateStatistics" ),
134
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.reader.DataReaderFactory" ),
135
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.reader.streaming.ContinuousDataReader" ),
136
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.sources.v2.writer.SupportsWriteInternalRow" ),
137
+ ProblemFilters .exclude[DirectMissingMethodProblem ](" org.apache.spark.sql.sources.v2.writer.DataWriterFactory.createDataWriter" ),
138
+ ProblemFilters .exclude[ReversedMissingMethodProblem ](" org.apache.spark.sql.sources.v2.writer.DataWriterFactory.createDataWriter" ),
139
+
140
+ // Changes to HasRawPredictionCol.
141
+ ProblemFilters .exclude[InheritedNewAbstractMethodProblem ](" org.apache.spark.ml.param.shared.HasRawPredictionCol.rawPredictionCol" ),
142
+ ProblemFilters .exclude[InheritedNewAbstractMethodProblem ](" org.apache.spark.ml.param.shared.HasRawPredictionCol.org$apache$spark$ml$param$shared$HasRawPredictionCol$_setter_$rawPredictionCol_=" ),
143
+ ProblemFilters .exclude[InheritedNewAbstractMethodProblem ](" org.apache.spark.ml.param.shared.HasRawPredictionCol.getRawPredictionCol" ),
144
+
145
+ // [SPARK-15526][ML][FOLLOWUP] Make JPMML provided scope to avoid including unshaded JARs
146
+ (problem : Problem ) => problem match {
147
+ case MissingClassProblem (cls) =>
148
+ ! cls.fullName.startsWith(" org.spark_project.jpmml" ) &&
149
+ ! cls.fullName.startsWith(" org.spark_project.dmg.pmml" )
150
+ case _ => true
151
+ }
109
152
)
110
153
111
154
// Exclude rules for 2.3.x
0 commit comments