diff --git a/R/pkg/tests/fulltests/test_client.R b/R/pkg/tests/fulltests/test_client.R
index 9798627ffc551..b38067e534fc0 100644
--- a/R/pkg/tests/fulltests/test_client.R
+++ b/R/pkg/tests/fulltests/test_client.R
@@ -37,7 +37,7 @@ test_that("multiple packages don't produce a warning", {
test_that("sparkJars sparkPackages as character vectors", {
args <- generateSparkSubmitArgs("", "", c("one.jar", "two.jar", "three.jar"), "",
- c("com.databricks:spark-avro_2.12:2.0.1"))
+ c("com.databricks:spark-avro_2.13:2.0.1"))
expect_match(args, "--jars one.jar,two.jar,three.jar")
- expect_match(args, "--packages com.databricks:spark-avro_2.12:2.0.1")
+ expect_match(args, "--packages com.databricks:spark-avro_2.13:2.0.1")
})
diff --git a/common/kvstore/pom.xml b/common/kvstore/pom.xml
index 66e6bb473bf2f..d356958a3f701 100644
--- a/common/kvstore/pom.xml
+++ b/common/kvstore/pom.xml
@@ -21,12 +21,12 @@
4.0.0
org.apache.spark
- spark-parent_2.12
+ spark-parent_2.13
3.5.1
../../pom.xml
- spark-kvstore_2.12
+ spark-kvstore_2.13
jar
Spark Project Local DB
https://spark.apache.org/
diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml
index 98897b4424ae0..4fe3fdd186b4c 100644
--- a/common/network-common/pom.xml
+++ b/common/network-common/pom.xml
@@ -21,12 +21,12 @@
4.0.0
org.apache.spark
- spark-parent_2.12
+ spark-parent_2.13
3.5.1
../../pom.xml
- spark-network-common_2.12
+ spark-network-common_2.13
jar
Spark Project Networking
https://spark.apache.org/
diff --git a/common/network-shuffle/pom.xml b/common/network-shuffle/pom.xml
index 44531ea54cd58..ce04c865a0ac1 100644
--- a/common/network-shuffle/pom.xml
+++ b/common/network-shuffle/pom.xml
@@ -21,12 +21,12 @@
4.0.0
org.apache.spark
- spark-parent_2.12
+ spark-parent_2.13
3.5.1
../../pom.xml
- spark-network-shuffle_2.12
+ spark-network-shuffle_2.13
jar
Spark Project Shuffle Streaming Service
https://spark.apache.org/
diff --git a/common/unsafe/pom.xml b/common/unsafe/pom.xml
index bf116a6ff12e7..449b24818bbe0 100644
--- a/common/unsafe/pom.xml
+++ b/common/unsafe/pom.xml
@@ -21,12 +21,12 @@
4.0.0
org.apache.spark
- spark-parent_2.12
+ spark-parent_2.13
3.5.1
../../pom.xml
- spark-unsafe_2.12
+ spark-unsafe_2.13
jar
Spark Project Unsafe
https://spark.apache.org/
diff --git a/connector/docker-integration-tests/pom.xml b/connector/docker-integration-tests/pom.xml
index 898e1f88c7783..b4971790164ff 100644
--- a/connector/docker-integration-tests/pom.xml
+++ b/connector/docker-integration-tests/pom.xml
@@ -21,12 +21,12 @@
4.0.0
org.apache.spark
- spark-parent_2.12
+ spark-parent_2.13
3.5.1
../../pom.xml
- spark-docker-integration-tests_2.12
+ spark-docker-integration-tests_2.13
jar
Spark Project Docker Integration Tests
https://spark.apache.org/
diff --git a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala
index 6dd5af2389a81..4f1c76c7a7fe3 100644
--- a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala
+++ b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/JsonUtils.scala
@@ -21,14 +21,14 @@ import scala.collection.mutable.HashMap
import scala.util.control.NonFatal
import org.apache.kafka.common.TopicPartition
-import org.json4s.NoTypeHints
+import org.json4s.{Formats, NoTypeHints}
import org.json4s.jackson.Serialization
/**
* Utilities for converting Kafka related objects to and from json.
*/
private object JsonUtils {
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
/**
* Read TopicPartitions from json string
@@ -96,10 +96,8 @@ private object JsonUtils {
*/
def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
val result = new HashMap[String, HashMap[Int, Long]]()
- implicit val order = new Ordering[TopicPartition] {
- override def compare(x: TopicPartition, y: TopicPartition): Int = {
+ implicit val order: Ordering[TopicPartition] = (x: TopicPartition, y: TopicPartition) => {
Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition))
- }
}
val partitions = partitionOffsets.keySet.toSeq.sorted // sort for more determinism
partitions.foreach { tp =>
diff --git a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
index 7209e2c373ab1..d6a50ff84f562 100644
--- a/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala
@@ -30,6 +30,7 @@ import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.sys.process._
+import org.json4s.Formats
import org.json4s.jackson.JsonMethods
import org.apache.spark.{SparkConf, SparkContext}
@@ -340,7 +341,7 @@ private object FaultToleranceTest extends App with Logging {
private class TestMasterInfo(val ip: String, val dockerId: DockerId, val logFile: File)
extends Logging {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
var state: RecoveryState.Value = _
var liveWorkerIPs: List[String] = _
var numLiveApps = 0
@@ -383,7 +384,7 @@ private class TestMasterInfo(val ip: String, val dockerId: DockerId, val logFile
private class TestWorkerInfo(val ip: String, val dockerId: DockerId, val logFile: File)
extends Logging {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
logDebug("Created worker: " + this)
diff --git a/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala b/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala
index 641c5416cbb33..2e4e07b36cb64 100644
--- a/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/StandaloneResourceUtils.scala
@@ -23,7 +23,7 @@ import java.nio.file.Files
import scala.collection.mutable
import scala.util.control.NonFatal
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s.{DefaultFormats, Extraction, Formats}
import org.json4s.jackson.JsonMethods.{compact, render}
import org.apache.spark.SparkException
@@ -114,7 +114,7 @@ private[spark] object StandaloneResourceUtils extends Logging {
private def writeResourceAllocationJson[T](
allocations: Seq[T],
jsonFile: File): Unit = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val allocationJson = Extraction.decompose(allocations)
Files.write(jsonFile.toPath, compact(render(allocationJson)).getBytes())
}
diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index 537522326fc78..f98a2ab974c2f 100644
--- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -60,8 +60,6 @@ private[spark] class CoarseGrainedExecutorBackend(
import CoarseGrainedExecutorBackend._
- private implicit val formats = DefaultFormats
-
private[spark] val stopping = new AtomicBoolean(false)
var executor: Executor = null
@volatile var driver: Option[RpcEndpointRef] = None
diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala b/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala
index 7f7bb36512d14..0201708468e5f 100644
--- a/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala
+++ b/core/src/main/scala/org/apache/spark/resource/ResourceInformation.scala
@@ -19,7 +19,7 @@ package org.apache.spark.resource
import scala.util.control.NonFatal
-import org.json4s.{DefaultFormats, Extraction, JValue}
+import org.json4s.{DefaultFormats, Extraction, JValue, Formats}
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
@@ -69,7 +69,7 @@ private[spark] object ResourceInformation {
* Parses a JSON string into a [[ResourceInformation]] instance.
*/
def parseJson(json: String): ResourceInformation = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
try {
parse(json).extract[ResourceInformationJson].toResourceInformation
} catch {
@@ -80,7 +80,7 @@ private[spark] object ResourceInformation {
}
def parseJson(json: JValue): ResourceInformation = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
try {
json.extract[ResourceInformationJson].toResourceInformation
} catch {
diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala
index d19f413598b58..095b015a28632 100644
--- a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala
+++ b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala
@@ -22,7 +22,7 @@ import java.util.Optional
import scala.util.control.NonFatal
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.jackson.JsonMethods._
import org.apache.spark.{SparkConf, SparkException}
@@ -252,7 +252,7 @@ private[spark] object ResourceUtils extends Logging {
def parseAllocatedFromJsonFile(resourcesFile: String): Seq[ResourceAllocation] = {
withResourcesJson[ResourceAllocation](resourcesFile) { json =>
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Seq[ResourceAllocation]]
}
}
diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala
index d19744db089ba..96dc5ac44b47a 100644
--- a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala
+++ b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala
@@ -31,7 +31,7 @@ private [spark] class JobDuration(val value: AtomicLong) extends Gauge[Long] {
private[spark] class AppStatusSource extends Source {
- override implicit val metricRegistry = new MetricRegistry()
+ override implicit val metricRegistry: MetricRegistry = new MetricRegistry()
override val sourceName = "appStatus"
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
index 19de4544bea32..6bd01a655608f 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
@@ -23,7 +23,7 @@ import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.collection.mutable
-import scala.concurrent.{ExecutionContext, Future, TimeoutException}
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future, TimeoutException}
import scala.util.Random
import scala.util.control.NonFatal
@@ -94,7 +94,7 @@ class BlockManagerMasterEndpoint(
private val askThreadPool =
ThreadUtils.newDaemonCachedThreadPool("block-manager-ask-thread-pool", 100)
- private implicit val askExecutionContext = ExecutionContext.fromExecutorService(askThreadPool)
+ private implicit val askExecutionContext: ExecutionContextExecutorService = ExecutionContext.fromExecutorService(askThreadPool)
private val topologyMapper = {
val topologyMapperClassName = conf.get(
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala
index 476be80e67df3..775b25f59ad5c 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala
@@ -17,7 +17,7 @@
package org.apache.spark.storage
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future}
import org.apache.spark.{MapOutputTracker, SparkEnv}
import org.apache.spark.internal.Logging
@@ -38,7 +38,7 @@ class BlockManagerStorageEndpoint(
private val asyncThreadPool =
ThreadUtils.newDaemonCachedThreadPool("block-manager-storage-async-thread-pool", 100)
- private implicit val asyncExecutionContext = ExecutionContext.fromExecutorService(asyncThreadPool)
+ private implicit val asyncExecutionContext: ExecutionContextExecutorService = ExecutionContext.fromExecutorService(asyncThreadPool)
// Operations that involve removing blocks may be slow and should be done asynchronously
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
diff --git a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
index 5434e82c95b1b..ed67906a4f268 100644
--- a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala
@@ -42,7 +42,7 @@ import org.apache.spark.storage._
abstract class ContextCleanerSuiteBase(val shuffleManager: Class[_] = classOf[SortShuffleManager])
extends SparkFunSuite with BeforeAndAfter with LocalSparkContext
{
- implicit val defaultTimeout = timeout(10.seconds)
+ implicit val defaultTimeout: PatienceConfiguration.Timeout = timeout(10.seconds)
val conf = new SparkConf()
.setMaster("local[2]")
.setAppName("ContextCleanerSuite")
diff --git a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
index 4145975741bc4..f69e316df5fe8 100644
--- a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat}
import org.apache.logging.log4j.{Level, LogManager}
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s.{DefaultFormats, Extraction, Formats}
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.must.Matchers._
@@ -923,7 +923,7 @@ class SparkContextSuite extends SparkFunSuite with LocalSparkContext with Eventu
val scriptPath = createTempScriptWithExpectedOutput(dir, "gpuDiscoveryScript",
"""{"name": "gpu","addresses":["5", "6"]}""")
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val gpusAllocated =
ResourceAllocation(DRIVER_GPU_ID, Seq("0", "1", "8"))
val ja = Extraction.decompose(Seq(gpusAllocated))
diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
index 6322661f4afd2..a83a0ace5c008 100644
--- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala
@@ -29,6 +29,7 @@ import scala.concurrent.duration._
import com.google.common.io.{ByteStreams, Files}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
+import org.json4s.Formats
import org.json4s.JsonAST._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
@@ -380,7 +381,7 @@ abstract class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with
test("incomplete apps get refreshed") {
implicit val webDriver: WebDriver = new HtmlUnitDriver
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
// this test dir is explicitly deleted on successful runs; retained for diagnostics when
// not
diff --git a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala
index 37874de987662..f3c7138a13119 100644
--- a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala
@@ -327,7 +327,7 @@ class MasterSuite extends SparkFunSuite
}
test("SPARK-46888: master should reject worker kill request if decommision is disabled") {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
val conf = new SparkConf()
.set(DECOMMISSION_ENABLED, false)
.set(MASTER_UI_DECOMMISSION_ALLOW_MODE, "ALLOW")
@@ -347,7 +347,7 @@ class MasterSuite extends SparkFunSuite
}
test("master/worker web ui available") {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
val conf = new SparkConf()
val localCluster = LocalSparkCluster(2, 2, 512, conf)
localCluster.start()
@@ -383,7 +383,7 @@ class MasterSuite extends SparkFunSuite
}
test("master/worker web ui available with reverseProxy") {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
val conf = new SparkConf()
conf.set(UI_REVERSE_PROXY, true)
val localCluster = LocalSparkCluster(2, 2, 512, conf)
@@ -419,7 +419,7 @@ class MasterSuite extends SparkFunSuite
}
test("master/worker web ui available behind front-end reverseProxy") {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
val reverseProxyUrl = "http://proxyhost:8080/path/to/spark"
val conf = new SparkConf()
conf.set(UI_REVERSE_PROXY, true)
diff --git a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala
index a07d4f76905a7..75cebc90acba5 100644
--- a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala
@@ -23,7 +23,7 @@ import java.util.function.Supplier
import scala.concurrent.duration._
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s.{DefaultFormats, Extraction, Formats}
import org.mockito.{Mock, MockitoAnnotations}
import org.mockito.Answers.RETURNS_SMART_NULLS
import org.mockito.ArgumentMatchers.any
@@ -60,7 +60,7 @@ class WorkerSuite extends SparkFunSuite with Matchers with BeforeAndAfter {
}
def conf(opts: (String, String)*): SparkConf = new SparkConf(loadDefaults = false).setAll(opts)
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
private var _worker: Worker = _
diff --git a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala
index 909d605442575..57d391b0cf063 100644
--- a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala
+++ b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala
@@ -26,7 +26,7 @@ import java.util.concurrent.atomic.AtomicInteger
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration._
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s.{DefaultFormats, Extraction, Formats}
import org.json4s.JsonAST.{JArray, JObject}
import org.json4s.JsonDSL._
import org.mockito.ArgumentMatchers.any
@@ -50,7 +50,7 @@ import org.apache.spark.util.{SerializableBuffer, ThreadUtils, Utils}
class CoarseGrainedExecutorBackendSuite extends SparkFunSuite
with LocalSparkContext with MockitoSugar {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
test("parsing no resources") {
val conf = new SparkConf
diff --git a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala
index ffe5ff5787102..e2daf41a203c2 100644
--- a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala
@@ -21,7 +21,7 @@ import java.io.File
import java.nio.file.{Files => JavaFiles}
import java.util.Optional
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s.{DefaultFormats, Extraction, Formats}
import org.apache.spark.{LocalSparkContext, SparkConf, SparkException, SparkFunSuite}
import org.apache.spark.TestUtils._
@@ -117,7 +117,7 @@ class ResourceUtilsSuite extends SparkFunSuite
val conf = new SparkConf
assume(!(Utils.isWindows))
withTempDir { dir =>
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val fpgaAddrs = Seq("f1", "f2", "f3")
val fpgaAllocation = ResourceAllocation(EXECUTOR_FPGA_ID, fpgaAddrs)
val resourcesFile = createTempJsonFile(
@@ -146,7 +146,7 @@ class ResourceUtilsSuite extends SparkFunSuite
val rpId = 1
assume(!(Utils.isWindows))
withTempDir { dir =>
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val fpgaAddrs = Seq("f1", "f2", "f3")
val fpgaAllocation = ResourceAllocation(EXECUTOR_FPGA_ID, fpgaAddrs)
val resourcesFile = createTempJsonFile(
diff --git a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
index 79496bba6674b..32972e860275a 100644
--- a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
@@ -83,7 +83,7 @@ private[spark] class SparkUICssErrorHandler extends DefaultCssErrorHandler {
class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers {
implicit var webDriver: WebDriver = _
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
override def beforeAll(): Unit = {
diff --git a/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala b/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala
index 6888e492a8d33..6902493dc3c5d 100644
--- a/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala
@@ -22,14 +22,14 @@ import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.duration._
-import org.scalatest.concurrent.{ThreadSignaler, TimeLimits}
+import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.apache.spark.SparkFunSuite
class KeyLockSuite extends SparkFunSuite with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
- private implicit val defaultSignaler = ThreadSignaler
+ private implicit val defaultSignaler: Signaler = ThreadSignaler
private val foreverMs = 60 * 1000L
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
index b17b86c08314b..669205cb89057 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
@@ -17,7 +17,7 @@
package org.apache.spark.examples.sql
// $example on:programmatic_schema$
-import org.apache.spark.sql.Row
+import org.apache.spark.sql.{Row, Encoder}
// $example off:programmatic_schema$
// $example on:init_session$
import org.apache.spark.sql.SparkSession
@@ -220,7 +220,7 @@ object SparkSQLExample {
// +------------+
// No pre-defined encoders for Dataset[Map[K,V]], define explicitly
- implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]]
+ implicit val mapEncoder: Encoder[Map[String, Any]] = org.apache.spark.sql.Encoders.kryo[Map[String, Any]]
// Primitive types and case classes can be also defined as
// implicit val stringIntMapEncoder: Encoder[Map[String, Any]] = ExpressionEncoder()
diff --git a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala
index 8f03a29eb991a..a8844358ead2d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala
@@ -16,7 +16,7 @@
*/
package org.apache.spark.ml.linalg
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}
@@ -29,7 +29,7 @@ private[ml] object JsonMatrixConverter {
* Parses the JSON representation of a Matrix into a [[Matrix]].
*/
def fromJson(json: String): Matrix = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val jValue = parseJson(json)
(jValue \ "type").extract[Int] match {
case 0 => // sparse
diff --git a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala
index 1b949d75eeaa0..12387233879ad 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala
@@ -17,7 +17,7 @@
package org.apache.spark.ml.linalg
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}
@@ -27,7 +27,7 @@ private[ml] object JsonVectorConverter {
* Parses the JSON representation of a vector into a [[Vector]].
*/
def fromJson(json: String): Vector = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val jValue = parseJson(json)
(jValue \ "type").extract[Int] match {
case 0 => // sparse
diff --git a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
index b818be30583c0..1b5845f14f1b0 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/params.scala
@@ -129,7 +129,7 @@ private[ml] object Param {
case JObject(v) =>
val keys = v.map(_._1)
if (keys.contains("class")) {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val className = (jValue \ "class").extract[String]
className match {
case JsonMatrixConverter.className =>
@@ -398,7 +398,7 @@ class IntParam(parent: String, name: String, doc: String, isValid: Int => Boolea
}
override def jsonDecode(json: String): Int = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Int]
}
}
@@ -484,7 +484,7 @@ class LongParam(parent: String, name: String, doc: String, isValid: Long => Bool
}
override def jsonDecode(json: String): Long = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Long]
}
}
@@ -505,7 +505,7 @@ class BooleanParam(parent: String, name: String, doc: String) // No need for isV
}
override def jsonDecode(json: String): Boolean = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Boolean]
}
}
@@ -528,7 +528,7 @@ class StringArrayParam(parent: Params, name: String, doc: String, isValid: Array
}
override def jsonDecode(json: String): Array[String] = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Seq[String]].toArray
}
}
@@ -617,7 +617,7 @@ class IntArrayParam(parent: Params, name: String, doc: String, isValid: Array[In
}
override def jsonDecode(json: String): Array[Int] = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
parse(json).extract[Seq[Int]].toArray
}
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala
index 5161bc72659c6..97de8604e7728 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/classification/ClassificationModel.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.classification
-import org.json4s.{DefaultFormats, JValue}
+import org.json4s.{DefaultFormats, JValue, Formats}
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
@@ -65,7 +65,7 @@ private[mllib] object ClassificationModel {
* @return (numFeatures, numClasses)
*/
def getNumFeaturesClasses(metadata: JValue): (Int, Int) = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
((metadata \ "numFeatures").extract[Int], (metadata \ "numClasses").extract[Int])
}
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
index c3979118de403..2d1781a25d3f6 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
@@ -18,7 +18,7 @@
package org.apache.spark.mllib.clustering
import org.json4s._
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
@@ -187,7 +187,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] {
}
def load(sc: SparkContext, path: String): BisectingKMeansModel = {
- implicit val formats: DefaultFormats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
assert(formatVersion == thisFormatVersion)
@@ -223,7 +223,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] {
}
def load(sc: SparkContext, path: String): BisectingKMeansModel = {
- implicit val formats: DefaultFormats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
assert(formatVersion == thisFormatVersion)
@@ -261,7 +261,7 @@ object BisectingKMeansModel extends Loader[BisectingKMeansModel] {
}
def load(sc: SparkContext, path: String): BisectingKMeansModel = {
- implicit val formats: DefaultFormats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
assert(formatVersion == thisFormatVersion)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala
index 0c9c6ab826e62..eb5e776799d04 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.scala
@@ -18,7 +18,7 @@
package org.apache.spark.mllib.clustering
import breeze.linalg.{DenseVector => BreezeVector}
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
@@ -175,7 +175,7 @@ object GaussianMixtureModel extends Loader[GaussianMixtureModel] {
@Since("1.4.0")
override def load(sc: SparkContext, path: String): GaussianMixtureModel = {
val (loadedClassName, version, metadata) = Loader.loadMetadata(sc, path)
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val k = (metadata \ "k").extract[Int]
val classNameV1_0 = SaveLoadV1_0.classNameV1_0
(loadedClassName, version) match {
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala
index 64b352157caf7..5eafdc9add58d 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala
@@ -179,7 +179,7 @@ object KMeansModel extends Loader[KMeansModel] {
}
def load(sc: SparkContext, path: String): KMeansModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
@@ -213,7 +213,7 @@ object KMeansModel extends Loader[KMeansModel] {
}
def load(sc: SparkContext, path: String): KMeansModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
index aa8b6a00a427f..e318f06900950 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAModel.scala
@@ -20,7 +20,7 @@ package org.apache.spark.mllib.clustering
import breeze.linalg.{argmax, argtopk, normalize, sum, DenseMatrix => BDM, DenseVector => BDV}
import breeze.numerics.{exp, lgamma}
import org.apache.hadoop.fs.Path
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
@@ -496,7 +496,7 @@ object LocalLDAModel extends Loader[LocalLDAModel] {
@Since("1.5.0")
override def load(sc: SparkContext, path: String): LocalLDAModel = {
val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path)
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val expectedK = (metadata \ "k").extract[Int]
val expectedVocabSize = (metadata \ "vocabSize").extract[Int]
val docConcentration =
@@ -923,7 +923,7 @@ object DistributedLDAModel extends Loader[DistributedLDAModel] {
@Since("1.5.0")
override def load(sc: SparkContext, path: String): DistributedLDAModel = {
val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path)
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val expectedK = (metadata \ "k").extract[Int]
val vocabSize = (metadata \ "vocabSize").extract[Int]
val docConcentration =
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
index ba541bbcccd29..12c7ae5066c82 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
@@ -79,7 +79,7 @@ object PowerIterationClusteringModel extends Loader[PowerIterationClusteringMode
@Since("1.4.0")
def load(sc: SparkContext, path: String): PowerIterationClusteringModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
index 3202f08e220b0..4aae9d8add43a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
@@ -145,7 +145,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] {
}
def load(sc: SparkContext, path: String): ChiSqSelectorModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
assert(className == thisClassName)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
index 97f277d53ca9d..f286b729c03d0 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
@@ -23,7 +23,7 @@ import scala.collection.JavaConverters._
import scala.collection.mutable
import com.google.common.collect.{Ordering => GuavaOrdering}
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
@@ -704,7 +704,7 @@ object Word2VecModel extends Loader[Word2VecModel] {
override def load(sc: SparkContext, path: String): Word2VecModel = {
val (loadedClassName, loadedVersion, metadata) = Loader.loadMetadata(sc, path)
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val expectedVectorSize = (metadata \ "vectorSize").extract[Int]
val expectedNumWords = (metadata \ "numWords").extract[Int]
val classNameV1_0 = SaveLoadV1_0.classNameV1_0
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
index ecdc28dea37fd..0938b709226bd 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala
@@ -25,7 +25,7 @@ import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe._
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, render}
@@ -126,7 +126,7 @@ object FPGrowthModel extends Loader[FPGrowthModel[_]] {
}
def load(sc: SparkContext, path: String): FPGrowthModel[_] = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
index 7c023bcfa72a4..703dd65bfab78 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
@@ -25,7 +25,7 @@ import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe._
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, render}
@@ -670,7 +670,7 @@ object PrefixSpanModel extends Loader[PrefixSpanModel[_]] {
}
def load(sc: SparkContext, path: String): PrefixSpanModel[_] = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala
index a93f37799419e..fa58443cca90b 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala
@@ -25,7 +25,7 @@ import scala.collection.JavaConverters._
import scala.language.implicitConversions
import breeze.linalg.{DenseVector => BDV, SparseVector => BSV, Vector => BV}
-import org.json4s.DefaultFormats
+import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}
@@ -430,7 +430,7 @@ object Vectors {
*/
@Since("1.6.0")
def fromJson(json: String): Vector = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val jValue = parseJson(json)
(jValue \ "type").extract[Int] match {
case 0 => // sparse
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
index 3276513213f5d..581fe1f9eb647 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
@@ -392,7 +392,7 @@ object MatrixFactorizationModel extends Loader[MatrixFactorizationModel] {
}
def load(sc: SparkContext, path: String): MatrixFactorizationModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val spark = SparkSession.builder().sparkContext(sc).getOrCreate()
val (className, formatVersion, metadata) = loadMetadata(sc, path)
assert(className == thisClassName)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
index 12a78ef4ec140..81d1b290404d5 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala
@@ -209,7 +209,7 @@ object IsotonicRegressionModel extends Loader[IsotonicRegressionModel] {
@Since("1.4.0")
override def load(sc: SparkContext, path: String): IsotonicRegressionModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val (loadedClassName, version, metadata) = loadMetadata(sc, path)
val isotonic = (metadata \ "isotonic").extract[Boolean]
val classNameV1_0 = SaveLoadV1_0.thisClassName
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala
index a95a54225a085..144c2b6a8e852 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala
@@ -17,7 +17,7 @@
package org.apache.spark.mllib.regression
-import org.json4s.{DefaultFormats, JValue}
+import org.json4s.{DefaultFormats, JValue, Formats}
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
@@ -64,7 +64,7 @@ private[mllib] object RegressionModel {
* @return numFeatures
*/
def getNumFeatures(metadata: JValue): Int = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
(metadata \ "numFeatures").extract[Int]
}
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
index cdc998000c2fc..7a864b9d41efe 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
@@ -312,7 +312,7 @@ object DecisionTreeModel extends Loader[DecisionTreeModel] with Logging {
*/
@Since("1.3.0")
override def load(sc: SparkContext, path: String): DecisionTreeModel = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val (loadedClassName, version, metadata) = Loader.loadMetadata(sc, path)
val algo = (metadata \ "algo").extract[String]
val numNodes = (metadata \ "numNodes").extract[Int]
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala
index 1f879a4d9dfbb..03821dc417750 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/treeEnsembleModels.scala
@@ -438,7 +438,7 @@ private[tree] object TreeEnsembleModel extends Logging {
* Read metadata from the loaded JSON metadata.
*/
def readMetadata(metadata: JValue): Metadata = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
(metadata \ "metadata").extract[Metadata]
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
index c13bc4099ce70..74e8ae75caf3e 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/modelSaveLoad.scala
@@ -117,7 +117,7 @@ private[mllib] object Loader {
* @return (class name, version, metadata)
*/
def loadMetadata(sc: SparkContext, path: String): (String, String, JValue) = {
- implicit val formats = DefaultFormats
+ implicit val formats: Formats = DefaultFormats
val metadata = parse(sc.textFile(metadataPath(path)).first())
val clazz = (metadata \ "class").extract[String]
val version = (metadata \ "version").extract[String]
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala
index 34848a7f3d853..3997e5c883ad4 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala
@@ -21,7 +21,7 @@ import java.util.EnumSet
import java.util.concurrent.atomic.AtomicBoolean
import javax.servlet.DispatcherType
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future}
import scala.util.{Failure, Success}
import scala.util.control.NonFatal
@@ -64,14 +64,14 @@ private[spark] abstract class YarnSchedulerBackend(
private val yarnSchedulerEndpointRef = rpcEnv.setupEndpoint(
YarnSchedulerBackend.ENDPOINT_NAME, yarnSchedulerEndpoint)
- private implicit val askTimeout = RpcUtils.askRpcTimeout(sc.conf)
+ private implicit val askTimeout: RpcTimeout = RpcUtils.askRpcTimeout(sc.conf)
/**
* Declare implicit single thread execution context for futures doRequestTotalExecutors and
* doKillExecutors below, avoiding using the global execution context that may cause conflict
* with user code's execution of futures.
*/
- private implicit val schedulerEndpointEC = ExecutionContext.fromExecutorService(
+ private implicit val schedulerEndpointEC: ExecutionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonSingleThreadExecutor("yarn-scheduler-endpoint"))
/** Application ID. */
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
index 3a662e68d58c2..33be55a43c52b 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ObjectExpressionsSuite.scala
@@ -491,7 +491,7 @@ class ObjectExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
}
}
- implicit private def mapIntStrEncoder = ExpressionEncoder[Map[Int, String]]()
+ implicit private def mapIntStrEncoder: ExpressionEncoder[Map[Int, String]] = ExpressionEncoder[Map[Int, String]]()
test("SPARK-23588 CatalystToExternalMap should support interpreted execution") {
// To get a resolved `CatalystToExternalMap` expression, we build a deserializer plan
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala
index f28df3839d0a8..a4d4f0a625e9c 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala
@@ -423,7 +423,7 @@ class ColumnPruningSuite extends PlanTest {
comparePlans(optimized, expected)
}
- implicit private def productEncoder[T <: Product : TypeTag] = ExpressionEncoder[T]()
+ implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder[T]()
private val func = identity[Iterator[OtherTuple]] _
test("Column pruning on MapPartitions") {
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateMapObjectsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateMapObjectsSuite.scala
index 1c818eee1224d..f7f429a501aeb 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateMapObjectsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateMapObjectsSuite.scala
@@ -37,8 +37,8 @@ class EliminateMapObjectsSuite extends PlanTest {
}
}
- implicit private def intArrayEncoder = ExpressionEncoder[Array[Int]]()
- implicit private def doubleArrayEncoder = ExpressionEncoder[Array[Double]]()
+ implicit private def intArrayEncoder: ExpressionEncoder[Array[Int]] = ExpressionEncoder[Array[Int]]()
+ implicit private def doubleArrayEncoder: ExpressionEncoder[Array[Double]] = ExpressionEncoder[Array[Double]]()
test("SPARK-20254: Remove unnecessary data conversion for primitive array") {
val intObjType = ObjectType(classOf[Array[Int]])
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSerializationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSerializationSuite.scala
index 0d654cc1ac935..a8b4914300bc8 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSerializationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSerializationSuite.scala
@@ -35,8 +35,8 @@ class EliminateSerializationSuite extends PlanTest {
EliminateSerialization) :: Nil
}
- implicit private def productEncoder[T <: Product : TypeTag] = ExpressionEncoder[T]()
- implicit private def intEncoder = ExpressionEncoder[Int]()
+ implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder[T]()
+ implicit private def intEncoder: ExpressionEncoder[Int] = ExpressionEncoder[Int]()
test("back to back serialization") {
val input = LocalRelation($"obj".obj(classOf[(Int, Int)]))
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ObjectSerializerPruningSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ObjectSerializerPruningSuite.scala
index a1039b051ce45..02e11343d229a 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ObjectSerializerPruningSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ObjectSerializerPruningSuite.scala
@@ -40,7 +40,7 @@ class ObjectSerializerPruningSuite extends PlanTest {
RemoveNoopOperators) :: Nil
}
- implicit private def productEncoder[T <: Product : TypeTag] = ExpressionEncoder[T]()
+ implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder[T]()
test("collect struct types") {
val dataTypes = Seq(
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/TypedFilterOptimizationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/TypedFilterOptimizationSuite.scala
index 4385777e79c09..090ed9af2494c 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/TypedFilterOptimizationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/TypedFilterOptimizationSuite.scala
@@ -37,7 +37,7 @@ class TypedFilterOptimizationSuite extends PlanTest {
CombineTypedFilters) :: Nil
}
- implicit private def productEncoder[T <: Product : TypeTag] = ExpressionEncoder[T]()
+ implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder[T]()
val testRelation = LocalRelation($"_1".int, $"_2".int)
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala
index acf62d07bc398..4979106b5d7fd 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala
@@ -47,7 +47,7 @@ class DistinctKeyVisitorSuite extends PlanTest {
assert(plan.analyze.distinctKeys === distinctKeys)
}
- implicit private def productEncoder[T <: Product : TypeTag] = ExpressionEncoder[T]()
+ implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder[T]()
test("Aggregate's distinct attributes") {
checkDistinctAttributes(t1.groupBy($"a", $"b")($"a", $"b", 1), Set(ExpressionSet(Seq(a, b))))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index c063af9381ff2..0cd24b456fef7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -22,6 +22,7 @@ import java.io.{ByteArrayOutputStream, CharArrayWriter, DataOutputStream}
import scala.annotation.varargs
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashSet}
+import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import scala.util.control.NonFatal
@@ -240,7 +241,7 @@ class Dataset[T] private[sql](
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer)
}
- private implicit def classTag = exprEnc.clsTag
+ private implicit def classTag: ClassTag[T] = exprEnc.clsTag
// sqlContext must be val because a stable identifier is expected when you import implicits
@transient lazy val sqlContext: SQLContext = sparkSession.sqlContext
@@ -1588,7 +1589,7 @@ class Dataset[T] private[sql](
* @since 1.6.0
*/
def select[U1](c1: TypedColumn[T, U1]): Dataset[U1] = {
- implicit val encoder = c1.encoder
+ implicit val encoder: ExpressionEncoder[T] = c1.encoder
val project = Project(c1.withInputType(exprEnc, logicalPlan.output).named :: Nil, logicalPlan)
if (!encoder.isSerializedAsStructForTopLevel) {
@@ -3383,7 +3384,7 @@ class Dataset[T] private[sql](
* @since 1.6.0
*/
def map[U](func: MapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
- implicit val uEnc = encoder
+ implicit val uEnc: Encoder[U] = encoder
withTypedPlan(MapElements[T, U](func, logicalPlan))
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
index 4c2ccb27eab20..094bc3c8a6901 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
@@ -43,8 +43,8 @@ class KeyValueGroupedDataset[K, V] private[sql](
private val groupingAttributes: Seq[Attribute]) extends Serializable {
// Similar to [[Dataset]], we turn the passed in encoder to `ExpressionEncoder` explicitly.
- private implicit val kExprEnc = encoderFor(kEncoder)
- private implicit val vExprEnc = encoderFor(vEncoder)
+ private implicit val kExprEnc: ExpressionEncoder[K] = encoderFor(kEncoder)
+ private implicit val vExprEnc: ExpressionEncoder[V] = encoderFor(vEncoder)
private def logicalPlan = queryExecution.analyzed
private def sparkSession = queryExecution.sparkSession
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala
index a72e4c339af8c..bbc7bd49b4e10 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala
@@ -22,7 +22,7 @@ import java.util.Locale
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
-import org.json4s.NoTypeHints
+import org.json4s.{NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.SparkUpgradeException
@@ -55,7 +55,7 @@ object DataSourceUtils extends PredicateHelper {
/**
* Utility methods for converting partitionBy columns to options and back.
*/
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
def encodePartitioningColumns(columns: Seq[String]): String = {
Serialization.write(columns)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/ApplyInPandasWithStatePythonRunner.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/ApplyInPandasWithStatePythonRunner.scala
index d4c535fe76a3e..2d79d7117bef3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/ApplyInPandasWithStatePythonRunner.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/ApplyInPandasWithStatePythonRunner.scala
@@ -204,7 +204,7 @@ class ApplyInPandasWithStatePythonRunner(
STATE_METADATA_SCHEMA_FROM_PYTHON_WORKER)
stateMetadataBatch.rowIterator().asScala.take(numRows).flatMap { row =>
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
// NOTE: See ApplyInPandasWithStatePythonRunner.STATE_METADATA_SCHEMA_FROM_PYTHON_WORKER
// for the schema.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
index 140367b3236ff..d19ffafb590c9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala
@@ -24,7 +24,7 @@ import scala.io.{Source => IOSource}
import scala.reflect.ClassTag
import org.apache.hadoop.fs.Path
-import org.json4s.NoTypeHints
+import org.json4s.{NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.sql.SparkSession
@@ -49,9 +49,10 @@ abstract class CompactibleFileStreamLog[T <: AnyRef : ClassTag](
import CompactibleFileStreamLog._
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
/** Needed to serialize type T into JSON when using Jackson */
+ @scala.annotation.nowarn
private implicit val manifest = Manifest.classType[T](implicitly[ClassTag[T]].runtimeClass)
protected val minBatchesToRetain = sparkSession.sessionState.conf.minBatchesToRetain
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala
index 94ba8b8aa5153..d8aa31be47972 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLog.scala
@@ -18,8 +18,6 @@
package org.apache.spark.sql.execution.streaming
import org.apache.hadoop.fs.FileStatus
-import org.json4s.NoTypeHints
-import org.json4s.jackson.Serialization
import org.apache.spark.paths.SparkPath
import org.apache.spark.sql.SparkSession
@@ -90,8 +88,6 @@ class FileStreamSinkLog(
_retentionMs: Option[Long] = None)
extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {
- private implicit val formats = Serialization.formats(NoTypeHints)
-
protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay
protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala
index 5fe9a39c91e0b..42c72f426ea49 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala
@@ -22,9 +22,6 @@ import java.util.Map.Entry
import scala.collection.mutable
-import org.json4s.NoTypeHints
-import org.json4s.jackson.Serialization
-
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.streaming.FileStreamSource.FileEntry
import org.apache.spark.sql.internal.SQLConf
@@ -51,8 +48,6 @@ class FileStreamSourceLog(
protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSourceLogDeletion
- private implicit val formats = Serialization.formats(NoTypeHints)
-
// A fixed size log entry cache to cache the file entries belong to the compaction batch. It is
// used to avoid scanning the compacted log file to retrieve it's own batch data.
private val cacheSize = compactInterval
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala
index 3af9c9aebf33d..c9ade7b568e82 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala
@@ -246,7 +246,7 @@ private[sql] object GroupStateImpl {
}
def fromJson[S](value: Option[S], json: JValue): GroupStateImpl[S] = {
- implicit val formats = org.json4s.DefaultFormats
+ implicit val formats: Formats = org.json4s.DefaultFormats
val hmap = json.extract[Map[String, Any]]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
index 9a811db679d01..b7ea70124d271 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala
@@ -26,7 +26,7 @@ import scala.reflect.ClassTag
import org.apache.commons.io.IOUtils
import org.apache.hadoop.fs._
-import org.json4s.NoTypeHints
+import org.json4s.{NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
@@ -49,7 +49,7 @@ import org.apache.spark.sql.internal.SQLConf
class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, path: String)
extends MetadataLog[T] with Logging {
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
/** Needed to serialize type T into JSON when using Jackson */
private implicit val manifest = Manifest.classType[T](implicitly[ClassTag[T]].runtimeClass)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
index 368dfae0cc95e..a83054ca8b726 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
@@ -25,7 +25,7 @@ import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.ListBuffer
-import org.json4s.{DefaultFormats, NoTypeHints}
+import org.json4s.{DefaultFormats, NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.SparkEnv
@@ -286,6 +286,6 @@ class TextSocketContinuousPartitionReader(
}
case class TextSocketOffset(offsets: List[Int]) extends Offset {
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
override def json: String = Serialization.write(offsets)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ContinuousMemoryStream.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ContinuousMemoryStream.scala
index d0cf602c7cca2..52e5eeb81d3ab 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ContinuousMemoryStream.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ContinuousMemoryStream.scala
@@ -22,7 +22,7 @@ import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.ListBuffer
-import org.json4s.NoTypeHints
+import org.json4s.{NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.{SparkEnv, TaskContext}
@@ -46,7 +46,7 @@ import org.apache.spark.util.RpcUtils
class ContinuousMemoryStream[A : Encoder](id: Int, sqlContext: SQLContext, numPartitions: Int = 2)
extends MemoryStreamBase[A](sqlContext) with ContinuousStream {
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
// ContinuousReader implementation
@@ -182,6 +182,6 @@ class ContinuousMemoryStreamPartitionReader(
case class ContinuousMemoryStreamOffset(partitionNums: Map[Int, Int])
extends Offset {
- private implicit val formats = Serialization.formats(NoTypeHints)
+ private implicit val formats: Formats = Serialization.formats(NoTypeHints)
override def json(): String = Serialization.write(partitionNums)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RatePerMicroBatchStream.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RatePerMicroBatchStream.scala
index 6954e4534e494..bc74be538d90f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RatePerMicroBatchStream.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/RatePerMicroBatchStream.scala
@@ -17,7 +17,7 @@
package org.apache.spark.sql.execution.streaming.sources
-import org.json4s.NoTypeHints
+import org.json4s.{NoTypeHints, Formats}
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
@@ -130,7 +130,7 @@ case class RatePerMicroBatchStreamOffset(offset: Long, timestamp: Long) extends
}
object RatePerMicroBatchStreamOffset {
- implicit val formats = Serialization.formats(NoTypeHints)
+ implicit val formats: Formats = Serialization.formats(NoTypeHints)
def apply(json: String): RatePerMicroBatchStreamOffset =
Serialization.read[RatePerMicroBatchStreamOffset](json)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index cfeccbdf648c2..dfd193b4349d2 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -3748,9 +3748,9 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
.exists(_.contains(s"org.apache.hive.hcatalog_hive-hcatalog-core-$hiveVersion.jar")))
// default transitive=true, test download ivy URL jar return multiple jars
- sql("ADD JAR ivy://org.scala-js:scalajs-test-interface_2.12:1.2.0")
- assert(sc.listJars().exists(_.contains("scalajs-library_2.12")))
- assert(sc.listJars().exists(_.contains("scalajs-test-interface_2.12")))
+ sql("ADD JAR ivy://org.scala-js:scalajs-test-interface_2.13:1.2.0")
+ assert(sc.listJars().exists(_.contains("scalajs-library_2.13")))
+ assert(sc.listJars().exists(_.contains("scalajs-test-interface_2.13")))
sql(s"ADD JAR ivy://org.apache.hive:hive-contrib:$hiveVersion" +
"?exclude=org.pentaho:pentaho-aggdesigner-algorithm&transitive=true")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
index 52b740bc5c34f..c59e91304c7fa 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
@@ -21,7 +21,7 @@ import java.util.UUID
import scala.collection.mutable
-import org.scalactic.TolerantNumerics
+import org.scalactic.{Equality, TolerantNumerics}
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.Waiters.Waiter
@@ -44,7 +44,7 @@ class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter {
import testImplicits._
// To make === between double tolerate inexact values
- implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
+ implicit val doubleEquality: Equality[Double] = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
spark.streams.active.foreach(_.stop())
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
index 8565056cda6fa..a84690b5b97bb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
@@ -29,7 +29,7 @@ import org.apache.commons.io.FileUtils
import org.apache.commons.lang3.RandomStringUtils
import org.apache.hadoop.fs.Path
import org.mockito.Mockito.when
-import org.scalactic.TolerantNumerics
+import org.scalactic.{Equality, TolerantNumerics}
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatestplus.mockito.MockitoSugar
@@ -60,7 +60,7 @@ class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging wi
import testImplicits._
// To make === between double tolerate inexact values
- implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
+ implicit val doubleEquality: Equality[Double] = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
sqlContext.streams.active.foreach(_.stop())
diff --git a/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala
index c63c748953f1a..cceb56bb44111 100644
--- a/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala
@@ -21,7 +21,7 @@ import java.net.URL
import java.text.SimpleDateFormat
import javax.servlet.http.HttpServletResponse
-import org.json4s.DefaultFormats
+import org.json4s.Formats
import org.json4s.jackson.JsonMethods
import org.apache.spark.SparkConf
@@ -47,7 +47,7 @@ class SqlResourceWithActualMetricsSuite
// Exclude nodes which may not have the metrics
val excludedNodes = List("WholeStageCodegen", "Project", "SerializeFromObject")
- implicit val formats = new DefaultFormats {
+ implicit val formats: DefaultFormats = new DefaultFormats {
override def dateFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss")
}
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
index b421a94a06c40..177c9b153e8ae 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala
@@ -26,7 +26,7 @@ import java.util.{Locale, UUID}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
-import scala.concurrent.{ExecutionContext, Future, Promise}
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future, Promise}
import scala.concurrent.duration._
import scala.io.Source
import scala.util.Try
@@ -441,7 +441,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftServer2Test {
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
- implicit val ec = ExecutionContext.fromExecutorService(
+ implicit val ec: ExecutionContextExecutorService = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonSingleThreadExecutor("test-jdbc-cancel"))
try {
// Start a very-long-running query that will take hours to finish, then cancel it in order
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/Time.scala b/streaming/src/main/scala/org/apache/spark/streaming/Time.scala
index 92cfd7d40338c..9681245baa0fe 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/Time.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/Time.scala
@@ -89,5 +89,5 @@ case class Time(private val millis: Long) {
}
object Time {
- implicit val ordering = Ordering.by((time: Time) => time.millis)
+ implicit val ordering: Ordering[Time] = Ordering.by((time: Time) => time.millis)
}
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
index 7a561ecb4990f..54bab53a747ac 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
@@ -17,7 +17,7 @@
package org.apache.spark.streaming.receiver
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService, Future}
import scala.concurrent.duration._
import org.apache.hadoop.conf.Configuration
@@ -159,8 +159,8 @@ private[streaming] class WriteAheadLogBasedBlockHandler(
// For processing futures used in parallel block storing into block manager and write ahead log
// # threads = 2, so that both writing to BM and WAL can proceed in parallel
- implicit private val executionContext = ExecutionContext.fromExecutorService(
- ThreadUtils.newDaemonFixedThreadPool(2, this.getClass.getSimpleName))
+ implicit private val executionContext: ExecutionContextExecutorService = ExecutionContext
+ .fromExecutorService(ThreadUtils.newDaemonFixedThreadPool(2, this.getClass.getSimpleName))
/**
* This implementation stores the block into the block manager as well as a write ahead log.