diff --git a/auron-spark-tests/common/src/test/scala/org/apache/spark/sql/SparkQueryTestsBase.scala b/auron-spark-tests/common/src/test/scala/org/apache/spark/sql/SparkQueryTestsBase.scala
index 6c6d839a2..8498edcde 100644
--- a/auron-spark-tests/common/src/test/scala/org/apache/spark/sql/SparkQueryTestsBase.scala
+++ b/auron-spark-tests/common/src/test/scala/org/apache/spark/sql/SparkQueryTestsBase.scala
@@ -162,7 +162,7 @@ object AuronQueryTestUtil extends Assertions {
private def genError(
expectedAnswer: Seq[Row],
sparkAnswer: Seq[Row],
- isSorted: Boolean = false): String = {
+ isSorted: Boolean): String = {
val getRowType: Option[Row] => String = row =>
row
.map(row =>
diff --git a/auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronDataFrameSuite.scala b/auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronDataFrameSuite.scala
index 74e27698e..b5374551d 100644
--- a/auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronDataFrameSuite.scala
+++ b/auron-spark-tests/spark33/src/test/scala/org/apache/spark/sql/AuronDataFrameSuite.scala
@@ -186,22 +186,22 @@ class AuronDataFrameSuite extends DataFrameSuite with SparkQueryTestsBase {
val join = df.join(df, "id")
checkAnswer(join, df)
val shuffleCount = collect(join.queryExecution.executedPlan) {
- case e: NativeShuffleExchangeExec =>
+ case _: NativeShuffleExchangeExec =>
true
}.size
assert(shuffleCount === 1, s"Expected 1 shuffle exchange, got $shuffleCount")
- assert(collect(join.queryExecution.executedPlan) { case e: ReusedExchangeExec =>
+ assert(collect(join.queryExecution.executedPlan) { case _: ReusedExchangeExec =>
true
}.size === 1)
val broadcasted = broadcast(join)
val join2 = join.join(broadcasted, "id").join(broadcasted, "id")
checkAnswer(join2, df)
val shuffleCount2 = collect(join2.queryExecution.executedPlan) {
- case e: NativeShuffleExchangeExec =>
+ case _: NativeShuffleExchangeExec =>
true
}.size
assert(shuffleCount2 == 1, s"Expected 1 shuffle exchange in join2, got $shuffleCount2")
- assert(collect(join2.queryExecution.executedPlan) { case e: ReusedExchangeExec =>
+ assert(collect(join2.queryExecution.executedPlan) { case _: ReusedExchangeExec =>
true
}.size == 4)
}
diff --git a/auron-spark-ui/src/main/scala/org/apache/spark/sql/execution/ui/AuronSQLAppStatusListener.scala b/auron-spark-ui/src/main/scala/org/apache/spark/sql/execution/ui/AuronSQLAppStatusListener.scala
index 0da16d4fd..651234ac5 100644
--- a/auron-spark-ui/src/main/scala/org/apache/spark/sql/execution/ui/AuronSQLAppStatusListener.scala
+++ b/auron-spark-ui/src/main/scala/org/apache/spark/sql/execution/ui/AuronSQLAppStatusListener.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.execution.ui
+import scala.annotation.nowarn
+
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent}
@@ -23,6 +25,7 @@ import org.apache.spark.status.ElementTrackingStore
import org.apache.auron.spark.ui.AuronBuildInfoEvent
+@nowarn("cat=unused") // conf temporarily unused
class AuronSQLAppStatusListener(conf: SparkConf, kvstore: ElementTrackingStore)
extends SparkListener
with Logging {
diff --git a/pom.xml b/pom.xml
index f59ee507d..405b34742 100644
--- a/pom.xml
+++ b/pom.xml
@@ -379,7 +379,10 @@
${javaVersion}
${scalaLongVersion}
+ -deprecation
+ -feature
-Ywarn-unused
+ -Xfatal-warnings
@@ -927,11 +930,25 @@
scala-maven-plugin
+ -deprecation
+ -feature
-Ywarn-unused
-Ymacro-annotations
+ -Xfatal-warnings
+
+ -Wconf:cat=deprecation:wv,any:e
+ -Wconf:cat=other-nullary-override:s
+ -Wconf:msg=^(?=.*?method|value|type|object|trait|inheritance)(?=.*?deprecated)(?=.*?since 2.13).+$:s
+ -Wconf:msg=Auto-application to \`\(\)\` is deprecated:s
+ -Wconf:msg=object JavaConverters in package collection is deprecated:s
+ -Wconf:cat=unchecked&msg=outer reference:s
+ -Wconf:cat=unchecked&msg=eliminated by erasure:s
+ -Wconf:cat=unused-nowarn:s
+ -Wconf:msg=early initializers are deprecated:s
+ -Wconf:cat=other-match-analysis:s
diff --git a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/InterceptedValidateSparkPlan.scala b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/InterceptedValidateSparkPlan.scala
index 95aefa65f..f90e4865a 100644
--- a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/InterceptedValidateSparkPlan.scala
+++ b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/InterceptedValidateSparkPlan.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.auron
+import scala.annotation.nowarn
+
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.SparkPlan
@@ -71,6 +73,7 @@ object InterceptedValidateSparkPlan extends Logging {
}
}
+ @nowarn("cat=unused") // plan unused
@sparkver("3.0 / 3.1")
def validate(plan: SparkPlan): Unit = {
throw new UnsupportedOperationException("validate is not supported in spark 3.0.3 or 3.1.3")
diff --git a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/ShimsImpl.scala b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/ShimsImpl.scala
index 839f5655d..c3a1861de 100644
--- a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/ShimsImpl.scala
+++ b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/auron/ShimsImpl.scala
@@ -19,6 +19,7 @@ package org.apache.spark.sql.auron
import java.io.File
import java.util.UUID
+import scala.annotation.nowarn
import scala.collection.mutable
import org.apache.commons.lang3.reflect.FieldUtils
@@ -284,6 +285,7 @@ class ShimsImpl extends Shims with Logging {
child: SparkPlan): NativeGenerateBase =
NativeGenerateExec(generator, requiredChildOutput, outer, generatorOutput, child)
+ @sparkver("3.4 / 3.5")
private def effectiveLimit(rawLimit: Int): Int =
if (rawLimit == -1) Int.MaxValue else rawLimit
@@ -989,6 +991,7 @@ class ShimsImpl extends Shims with Logging {
}
}
+ @nowarn("cat=unused") // Some params temporarily unused
@sparkver("3.4 / 3.5")
private def convertPromotePrecision(
e: Expression,
@@ -1021,6 +1024,7 @@ class ShimsImpl extends Shims with Logging {
}
}
+ @nowarn("cat=unused") // Some params temporarily unused
@sparkver("3.0 / 3.1 / 3.2")
private def convertBloomFilterAgg(agg: AggregateFunction): Option[pb.PhysicalAggExprNode] = None
@@ -1047,6 +1051,7 @@ class ShimsImpl extends Shims with Logging {
}
}
+ @nowarn("cat=unused") // Some params temporarily unused
@sparkver("3.0 / 3.1 / 3.2")
private def convertBloomFilterMightContain(
e: Expression,
diff --git a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReader.scala b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReader.scala
index e08d8490e..14ebba484 100644
--- a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReader.scala
+++ b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReader.scala
@@ -37,6 +37,9 @@ class AuronBlockStoreShuffleReader[K, C](
extends AuronBlockStoreShuffleReaderBase[K, C](handle, context)
with Logging {
+ // Touch mapOutputTracker to suppress -Xfatal-warnings (used in Spark 3.2+, unused in 3.0/3.1)
+ private val _ = mapOutputTracker
+
override def readBlocks(): Iterator[InputStream] = {
@sparkver("3.2 / 3.3 / 3.4 / 3.5")
def fetchIterator = new ShuffleBlockFetcherIterator(
diff --git a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronRssShuffleManagerBase.scala b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronRssShuffleManagerBase.scala
index 98863bf6a..af92b0e8b 100644
--- a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronRssShuffleManagerBase.scala
+++ b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronRssShuffleManagerBase.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.execution.auron.shuffle
+import scala.annotation.nowarn
+
import org.apache.spark.{ShuffleDependency, SparkConf, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.shuffle._
@@ -23,6 +25,7 @@ import org.apache.spark.sql.execution.auron.shuffle.AuronShuffleDependency.isArr
import org.apache.auron.sparkver
+@nowarn("cat=unused") // _conf temporarily unused
abstract class AuronRssShuffleManagerBase(_conf: SparkConf) extends ShuffleManager with Logging {
override def registerShuffle[K, V, C](
shuffleId: Int,
diff --git a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/joins/auron/plan/NativeShuffledHashJoinExecProvider.scala b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/joins/auron/plan/NativeShuffledHashJoinExecProvider.scala
index 0236dd260..5f763eeca 100644
--- a/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/joins/auron/plan/NativeShuffledHashJoinExecProvider.scala
+++ b/spark-extension-shims-spark/src/main/scala/org/apache/spark/sql/execution/joins/auron/plan/NativeShuffledHashJoinExecProvider.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.execution.joins.auron.plan
+import scala.annotation.nowarn
+
import org.apache.spark.sql.auron.join.JoinBuildSides.JoinBuildSide
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.JoinType
@@ -80,6 +82,7 @@ case object NativeShuffledHashJoinExecProvider {
NativeShuffledHashJoinExec(left, right, leftKeys, rightKeys, joinType, buildSide, isSkewJoin)
}
+ @nowarn("cat=unused") // Some params temporarily unused
@sparkver("3.1")
def provide(
left: SparkPlan,
@@ -127,6 +130,7 @@ case object NativeShuffledHashJoinExecProvider {
NativeShuffledHashJoinExec(left, right, leftKeys, rightKeys, joinType, buildSide)
}
+ @nowarn("cat=unused") // Some params temporarily unused
@sparkver("3.0")
def provide(
left: SparkPlan,
diff --git a/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronFunctionSuite.scala b/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronFunctionSuite.scala
index c7e2f6a9f..ae1d24320 100644
--- a/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronFunctionSuite.scala
+++ b/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronFunctionSuite.scala
@@ -278,8 +278,6 @@ class AuronFunctionSuite extends AuronQueryTest with BaseAuronSQLSuite {
val dateTimeStampMin = format.parse(dateStringMin).getTime
val dateTimeStampMax = format.parse(dateStringMax).getTime
format = new SimpleDateFormat("yyyy-MM-dd")
- val dateString = "2015-01-01"
- val date = format.parse(dateString)
val functions =
s"""
@@ -320,8 +318,6 @@ class AuronFunctionSuite extends AuronQueryTest with BaseAuronSQLSuite {
val dateTimeStampMin = format.parse(dateStringMin).getTime
val dateTimeStampMax = format.parse(dateStringMax).getTime
format = new SimpleDateFormat("yyyy-MM-dd")
- val dateString = "2015-07-01"
- val date = format.parse(dateString)
val functions =
s"""
diff --git a/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronQuerySuite.scala b/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronQuerySuite.scala
index 349b489aa..3e9909789 100644
--- a/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronQuerySuite.scala
+++ b/spark-extension-shims-spark/src/test/scala/org/apache/auron/AuronQuerySuite.scala
@@ -204,18 +204,10 @@ class AuronQuerySuite extends AuronQueryTest with BaseAuronSQLSuite with AuronSQ
.toDF("c1", "c2")
.write
.orc(path)
- val correctAnswer = Seq(Row(1, 2), Row(3, 4), Row(5, 6), Row(null, null))
checkSparkAnswerAndOperator(() => spark.read.orc(path))
withTable("t") {
sql(s"CREATE EXTERNAL TABLE t(c3 INT, c2 INT) USING ORC LOCATION '$path'")
-
- val expected = if (forcePositionalEvolution) {
- correctAnswer
- } else {
- Seq(Row(null, 2), Row(null, 4), Row(null, 6), Row(null, null))
- }
-
checkSparkAnswerAndOperator(() => spark.table("t"))
}
}
@@ -236,7 +228,6 @@ class AuronQuerySuite extends AuronQueryTest with BaseAuronSQLSuite with AuronSQ
.write
.partitionBy("p")
.orc(path)
- val correctAnswer = Seq(Row(1, 2, 1), Row(3, 4, 2), Row(5, 6, 3), Row(null, null, 4))
checkSparkAnswerAndOperator(() => spark.read.orc(path))
withTable("t") {
@@ -247,12 +238,6 @@ class AuronQuerySuite extends AuronQueryTest with BaseAuronSQLSuite with AuronSQ
|LOCATION '$path'
|""".stripMargin)
sql("MSCK REPAIR TABLE t")
- if (forcePositionalEvolution) {
- correctAnswer
- } else {
- Seq(Row(null, 2, 1), Row(null, 4, 2), Row(null, 6, 3), Row(null, null, 4))
- }
-
checkSparkAnswerAndOperator(() => spark.table("t"))
}
}
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/auron/AuronCallNativeWrapper.scala b/spark-extension/src/main/scala/org/apache/spark/sql/auron/AuronCallNativeWrapper.scala
index bf0918c40..b4028c1e2 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/auron/AuronCallNativeWrapper.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/auron/AuronCallNativeWrapper.scala
@@ -22,6 +22,7 @@ import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.util.concurrent.atomic.AtomicReference
+import scala.annotation.nowarn
import scala.collection.mutable.ArrayBuffer
import org.apache.arrow.c.ArrowArray
@@ -53,6 +54,7 @@ import org.apache.auron.protobuf.TaskDefinition
* This class has been deprecated and migrated to {@link
* org.apache.auron.jni.AuronCallNativeWrapper}. Will be removed in the future.
*/
+@nowarn("cat=deprecation") // JniBridge is temporarily used (deprecated)
@Deprecated
case class AuronCallNativeWrapper(
nativePlan: PhysicalPlanNode,
@@ -193,6 +195,7 @@ case class AuronCallNativeWrapper(
}
}
+@nowarn("cat=deprecation") // JniBridge is temporarily used (deprecated)
object AuronCallNativeWrapper extends Logging {
def initNative(): Unit = {
lazyInitNative
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeConverters.scala b/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeConverters.scala
index f88c03f29..8b48c39aa 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeConverters.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeConverters.scala
@@ -1010,12 +1010,12 @@ object NativeConverters extends Logging {
val children = e.children.map(Cast(_, e.dataType))
buildScalarFunction(pb.ScalarFunction.Coalesce, children, e.dataType)
- case e @ StringLPad(str, len, pad) =>
+ case _ @StringLPad(str, len, pad) =>
buildScalarFunction(
pb.ScalarFunction.Lpad,
Seq(str, castIfNecessary(len, LongType), pad),
StringType)
- case e @ StringRPad(str, len, pad) =>
+ case _ @StringRPad(str, len, pad) =>
buildScalarFunction(
pb.ScalarFunction.Rpad,
Seq(str, castIfNecessary(len, LongType), pad),
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeHelper.scala b/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeHelper.scala
index 3ae7669ee..e16656471 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeHelper.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/auron/NativeHelper.scala
@@ -100,7 +100,7 @@ object NativeHelper extends Logging {
if (nativePlan == null) {
return Iterator.empty
}
- var auronCallNativeWrapper = new org.apache.auron.jni.AuronCallNativeWrapper(
+ val auronCallNativeWrapper = new org.apache.auron.jni.AuronCallNativeWrapper(
ROOT_ALLOCATOR,
nativePlan,
metrics,
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/auron/Shims.scala b/spark-extension/src/main/scala/org/apache/spark/sql/auron/Shims.scala
index 85ab65d57..8d8175219 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/auron/Shims.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/auron/Shims.scala
@@ -18,6 +18,8 @@ package org.apache.spark.sql.auron
import java.io.File
+import scala.annotation.nowarn
+
import org.apache.spark.ShuffleDependency
import org.apache.spark.SparkContext
import org.apache.spark.TaskContext
@@ -272,6 +274,7 @@ abstract class Shims {
def getMinPartitionNum(sparkSession: SparkSession): Int
+ @nowarn("cat=unused") // Some params temporarily unused
def postTransform(plan: SparkPlan, sc: SparkContext): Unit = {}
def getAdaptiveInputPlan(exec: AdaptiveSparkPlanExec): SparkPlan
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/columnar/AuronArrowColumnVector.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/columnar/AuronArrowColumnVector.scala
index d8be57e61..3dbde795c 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/columnar/AuronArrowColumnVector.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/columnar/AuronArrowColumnVector.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.execution.auron.columnar
+import scala.annotation.nowarn
+
import org.apache.arrow.vector.BigIntVector
import org.apache.arrow.vector.BitVector
import org.apache.arrow.vector.DateDayVector
@@ -143,6 +145,7 @@ class AuronArrowColumnVector(vector: ValueVector)
}
object AuronArrowColumnVector {
+ @nowarn("cat=unused") // Data type get methods unimplemented (placeholder)
abstract private class ArrowVectorAccessor(private val vector: ValueVector) {
def isNullAt(rowId: Int): Boolean =
if (vector.getValueCount > 0 && vector.getValidityBuffer.capacity == 0) false
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeParquetSinkBase.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeParquetSinkBase.scala
index 260ee249e..2bd50523e 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeParquetSinkBase.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeParquetSinkBase.scala
@@ -21,6 +21,7 @@ import java.security.PrivilegedExceptionAction
import java.util
import java.util.UUID
+import scala.annotation.nowarn
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
@@ -80,7 +81,7 @@ abstract class NativeParquetSinkBase(
hiveQlTable.getMetadata)
val tableSchema = table.schema
val hadoopConf = newHadoopConf(tableDesc)
- val job = new Job(hadoopConf)
+ val job = Job.getInstance(hadoopConf)
val parquetFileFormat = new ParquetFileFormat()
parquetFileFormat.prepareWrite(sparkSession, job, Map(), tableSchema)
@@ -114,7 +115,7 @@ abstract class NativeParquetSinkBase(
})
// init parquet schema
- val job = new Job(new JobConf(serializableConf.value))
+ val job = Job.getInstance(new JobConf(serializableConf.value))
val tableProperties = tableDesc.getProperties
val columnNameProperty: String = tableProperties.getProperty(IOConstants.COLUMNS)
val columnTypeProperty: String = tableProperties.getProperty(IOConstants.COLUMNS_TYPES)
@@ -157,6 +158,7 @@ abstract class NativeParquetSinkBase(
friendlyName = "NativeRDD.ParquetSink")
}
+ @nowarn("cat=unused") // _tableDesc temporarily unused
protected def newHadoopConf(_tableDesc: TableDesc): Configuration =
sparkSession.sessionState.newHadoopConf()
}
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffleExchangeBase.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffleExchangeBase.scala
index 552dcff37..8b6aa04dc 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffleExchangeBase.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffleExchangeBase.scala
@@ -18,6 +18,7 @@ package org.apache.spark.sql.execution.auron.plan
import java.util.UUID
+import scala.annotation.nowarn
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
@@ -308,6 +309,7 @@ abstract class NativeShuffleExchangeBase(
dependency
}
+ @nowarn("cat=unused") // Some params temporarily unused
private def rangePartitioningBound[K: Ordering: ClassTag, V](
partitions: Int,
rdd: RDD[_ <: Product2[K, V]],
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffledHashJoinBase.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffledHashJoinBase.scala
index 1f8a06c82..17dfbe258 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffledHashJoinBase.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeShuffledHashJoinBase.scala
@@ -82,6 +82,8 @@ abstract class NativeShuffledHashJoinBase(
private def nativeBuildSide = buildSide match {
case JoinBuildLeft => pb.JoinSide.LEFT_SIDE
case JoinBuildRight => pb.JoinSide.RIGHT_SIDE
+ case other =>
+ throw new IllegalArgumentException(s"Unknown Join buildSide: $other")
}
protected def rewriteKeyExprToLong(exprs: Seq[Expression]): Seq[Expression]
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeUnionBase.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeUnionBase.scala
index a62ea6f13..bf3a15499 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeUnionBase.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/plan/NativeUnionBase.scala
@@ -70,7 +70,7 @@ abstract class NativeUnionBase(
val unionInputs = ArrayBuffer[(PhysicalPlanNode, Int)]()
partition match {
case p: UnionPartition[_] =>
- val rdds = unionRDD.asInstanceOf[UnionRDD[_]].rdds
+ val rdds = unionRDD.asInstanceOf[UnionRDD[Any]].rdds
val nativeRDD = rdds(p.parentRddIndex).asInstanceOf[NativeRDD]
val input = nativeRDD.nativePlan(p.parentPartition, taskContext)
for (childIndex <- rdds.indices) {
@@ -81,7 +81,7 @@ abstract class NativeUnionBase(
}
}
case p: PartitionerAwareUnionRDDPartition =>
- val rdds = unionRDD.asInstanceOf[PartitionerAwareUnionRDD[_]].rdds
+ val rdds = unionRDD.asInstanceOf[PartitionerAwareUnionRDD[Any]].rdds
for ((rdd, partition) <- rdds.zip(p.parents)) {
val nativeRDD = rdd.asInstanceOf[NativeRDD]
unionInputs.append((nativeRDD.nativePlan(partition, taskContext), partition.index))
diff --git a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReaderBase.scala b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReaderBase.scala
index 477c1b5e3..eaba3640e 100644
--- a/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReaderBase.scala
+++ b/spark-extension/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/AuronBlockStoreShuffleReaderBase.scala
@@ -182,5 +182,5 @@ trait BlockObject extends AutoCloseable {
def getFileLength: Long = throw new UnsupportedOperationException
def getByteBuffer: ByteBuffer = throw new UnsupportedOperationException
def getChannel: ReadableByteChannel = throw new UnsupportedOperationException
- def throwFetchFailed(errmsg: String): Unit = throw new UnsupportedOperationException
+ def throwFetchFailed(errmsg: String): Unit = throw new UnsupportedOperationException(errmsg)
}
diff --git a/spark-version-annotation-macros/src/main/scala/org/apache/auron/sparkver.scala b/spark-version-annotation-macros/src/main/scala/org/apache/auron/sparkver.scala
index 7d1e494d0..fd7d4ed91 100644
--- a/spark-version-annotation-macros/src/main/scala/org/apache/auron/sparkver.scala
+++ b/spark-version-annotation-macros/src/main/scala/org/apache/auron/sparkver.scala
@@ -16,8 +16,7 @@
*/
package org.apache.auron
-import scala.annotation.StaticAnnotation
-import scala.annotation.compileTimeOnly
+import scala.annotation.{compileTimeOnly, nowarn, StaticAnnotation}
import scala.language.experimental._
import scala.reflect.macros.whitebox
@@ -96,16 +95,19 @@ object sparkver {
}
}
+@nowarn("cat=unused") // 'vers' is used by macro
@compileTimeOnly("enable macro paradise to expand macro annotations")
final class sparkver(vers: String) extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro sparkver.Macros.verEnable
}
+@nowarn("cat=unused") // 'vers' is used by macro
@compileTimeOnly("enable macro paradise to expand macro annotations")
final class sparkverEnableMembers(vers: String) extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro sparkver.Macros.verEnableMembers
}
+@nowarn("cat=unused") // 'vers' is used by macro
@compileTimeOnly("enable macro paradise to expand macro annotations")
final class sparkverEnableOverride(vers: String) extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro sparkver.Macros.verEnableOverride
diff --git a/thirdparty/auron-uniffle/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/uniffle/AuronUniffleShuffleReader.scala b/thirdparty/auron-uniffle/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/uniffle/AuronUniffleShuffleReader.scala
index 18745f5c7..c4b1ca314 100644
--- a/thirdparty/auron-uniffle/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/uniffle/AuronUniffleShuffleReader.scala
+++ b/thirdparty/auron-uniffle/src/main/scala/org/apache/spark/sql/execution/auron/shuffle/uniffle/AuronUniffleShuffleReader.scala
@@ -20,6 +20,7 @@ import java.io.InputStream
import java.nio.ByteBuffer
import java.util
+import scala.annotation.nowarn
import scala.collection.AbstractIterator
import org.apache.commons.lang3.reflect.FieldUtils
@@ -40,6 +41,7 @@ import org.apache.uniffle.common.config.RssConf
import org.apache.uniffle.common.exception.RssException
import org.apache.uniffle.shaded.org.roaringbitmap.longlong.Roaring64NavigableMap
+@nowarn("cat=unused") // Some params temporarily unused
class AuronUniffleShuffleReader[K, C](
reader: RssShuffleReader[K, C],
handle: RssShuffleHandleWrapper[K, _, C],
@@ -207,6 +209,7 @@ class AuronUniffleShuffleReader[K, C](
}
}
+ @nowarn("cat=unused") // Some params temporarily unused
private class UniffleInputStream(
iterator: MultiPartitionIterator[_, _],
shuffleId: Int,