diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/BadRecordException.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/BadRecordException.scala index d719a33929..67defe78a6 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/BadRecordException.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/BadRecordException.scala @@ -38,6 +38,6 @@ case class PartialResultException( * @param cause the actual exception about why the record is bad and can't be parsed. */ case class BadRecordException( - record: () => UTF8String, - partialResult: () => Option[InternalRow], + @transient record: () => UTF8String, + @transient partialResult: () => Option[InternalRow], cause: Throwable) extends Exception(cause) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala index 2a3f2b0ec9..1651eb6a8e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala @@ -30,6 +30,7 @@ import scala.collection.JavaConverters._ import scala.util.Properties import com.univocity.parsers.common.TextParsingException +import org.apache.commons.lang3.exception.ExceptionUtils import org.apache.commons.lang3.time.FastDateFormat import org.apache.hadoop.io.SequenceFile.CompressionType import org.apache.hadoop.io.compress.GzipCodec @@ -366,6 +367,7 @@ abstract class CSVSuite } assert(exception.getMessage.contains("Malformed CSV record")) + assert(ExceptionUtils.getRootCause(exception).isInstanceOf[RuntimeException]) } }