diff --git a/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala index 02def89dd8..2f6ff0acdf 100644 --- a/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala @@ -32,16 +32,13 @@ import org.apache.spark.internal.Logging * @note This can't be part of PairRDDFunctions because we need more implicit parameters to * convert our keys and values to Writable. */ -class SequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable : ClassTag]( +class SequenceFileRDDFunctions[K: IsWritable: ClassTag, V: IsWritable: ClassTag]( self: RDD[(K, V)], _keyWritableClass: Class[_ <: Writable], _valueWritableClass: Class[_ <: Writable]) extends Logging with Serializable { - // TODO the context bound (<%) above should be replaced with simple type bound and implicit - // conversion but is a breaking change. This should be fixed in Spark 3.x. - /** * Output the RDD as a Hadoop SequenceFile using the Writable types we infer from the RDD's key * and value types. If the key or value are Writable, then we use their classes directly; @@ -52,7 +49,7 @@ class SequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable : ClassTag def saveAsSequenceFile( path: String, codec: Option[Class[_ <: CompressionCodec]] = None): Unit = self.withScope { - def anyToWritable[U <% Writable](u: U): Writable = u + def anyToWritable[U: IsWritable](u: U): Writable = u // TODO We cannot force the return type of `anyToWritable` be same as keyWritableClass and // valueWritableClass at the compile time. To implement that, we need to add type parameters to diff --git a/core/src/main/scala/org/apache/spark/rdd/package.scala b/core/src/main/scala/org/apache/spark/rdd/package.scala index 55fc6e4d2b..43ca6d7643 100644 --- a/core/src/main/scala/org/apache/spark/rdd/package.scala +++ b/core/src/main/scala/org/apache/spark/rdd/package.scala @@ -17,7 +17,11 @@ package org.apache.spark +import org.apache.hadoop.io.Writable + /** * Provides several RDD implementations. See [[org.apache.spark.rdd.RDD]]. */ -package object rdd +package object rdd { + type IsWritable[A] = A => Writable +}