[SPARK-27733][CORE] Upgrade Avro to version 1.10.1

### What changes were proposed in this pull request?

Update Avro dependency to version 1.10.1

### Why are the changes needed?

To catch up multiple improvements of Avro as well as fix security issues on transitive dependencies.

### Does this PR introduce _any_ user-facing change?

No

### How was this patch tested?

Since there were no API changes required we just run the tests

Closes #31232 from iemejia/SPARK-27733-avro-upgrade.

Authored-by: Ismaël Mejía <iemejia@gmail.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
This commit is contained in:
Ismaël Mejía 2021-01-20 15:42:27 -08:00 committed by Dongjoon Hyun
parent d68612a008
commit e9e81f798f
14 changed files with 39 additions and 80 deletions

View file

@ -35,10 +35,6 @@
</properties>
<dependencies>
<dependency>
<groupId>com.thoughtworks.paranamer</groupId>
<artifactId>paranamer</artifactId>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
@ -46,7 +42,6 @@
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-mapred</artifactId>
<classifier>${avro.mapred.classifier}</classifier>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
@ -171,6 +166,10 @@
<artifactId>jakarta.servlet-api</artifactId>
<version>${jakartaservlet.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>

View file

@ -20,7 +20,7 @@ package org.apache.spark.util.collection.unsafe.sort;
import java.util.Comparator;
import java.util.LinkedList;
import org.apache.avro.reflect.Nullable;
import javax.annotation.Nullable;
import org.apache.spark.TaskContext;
import org.apache.spark.memory.MemoryConsumer;

View file

@ -22,9 +22,9 @@ arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar
arrow-vector/2.0.0//arrow-vector-2.0.0.jar
audience-annotations/0.5.0//audience-annotations-0.5.0.jar
automaton/1.11-8//automaton-1.11-8.jar
avro-ipc/1.8.2//avro-ipc-1.8.2.jar
avro-mapred/1.8.2/hadoop2/avro-mapred-1.8.2-hadoop2.jar
avro/1.8.2//avro-1.8.2.jar
avro-ipc/1.10.1//avro-ipc-1.10.1.jar
avro-mapred/1.10.1//avro-mapred-1.10.1.jar
avro/1.10.1//avro-1.10.1.jar
bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar
breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar
breeze_2.12/1.0//breeze_2.12-1.0.jar
@ -238,7 +238,7 @@ xbean-asm7-shaded/4.15//xbean-asm7-shaded-4.15.jar
xercesImpl/2.12.0//xercesImpl-2.12.0.jar
xml-apis/1.4.01//xml-apis-1.4.01.jar
xmlenc/0.52//xmlenc-0.52.jar
xz/1.5//xz-1.5.jar
xz/1.8//xz-1.8.jar
zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar
zookeeper-jute/3.6.2//zookeeper-jute-3.6.2.jar
zookeeper/3.6.2//zookeeper-3.6.2.jar

View file

@ -17,9 +17,9 @@ arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar
arrow-vector/2.0.0//arrow-vector-2.0.0.jar
audience-annotations/0.5.0//audience-annotations-0.5.0.jar
automaton/1.11-8//automaton-1.11-8.jar
avro-ipc/1.8.2//avro-ipc-1.8.2.jar
avro-mapred/1.8.2/hadoop2/avro-mapred-1.8.2-hadoop2.jar
avro/1.8.2//avro-1.8.2.jar
avro-ipc/1.10.1//avro-ipc-1.10.1.jar
avro-mapred/1.10.1//avro-mapred-1.10.1.jar
avro/1.10.1//avro-1.10.1.jar
bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar
breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar
breeze_2.12/1.0//breeze_2.12-1.0.jar
@ -205,7 +205,7 @@ transaction-api/1.1//transaction-api-1.1.jar
univocity-parsers/2.9.1//univocity-parsers-2.9.1.jar
velocity/1.5//velocity-1.5.jar
xbean-asm7-shaded/4.15//xbean-asm7-shaded-4.15.jar
xz/1.5//xz-1.5.jar
xz/1.8//xz-1.8.jar
zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar
zookeeper-jute/3.6.2//zookeeper-jute-3.6.2.jar
zookeeper/3.6.2//zookeeper-3.6.2.jar

View file

@ -339,7 +339,7 @@ applications. Read the [Advanced Dependency Management](https://spark.apache
Submission Guide for more details.
## Supported types for Avro -> Spark SQL conversion
Currently Spark supports reading all [primitive types](https://avro.apache.org/docs/1.8.2/spec.html#schema_primitive) and [complex types](https://avro.apache.org/docs/1.8.2/spec.html#schema_complex) under records of Avro.
Currently Spark supports reading all [primitive types](https://avro.apache.org/docs/1.10.1/spec.html#schema_primitive) and [complex types](https://avro.apache.org/docs/1.10.1/spec.html#schema_complex) under records of Avro.
<table class="table">
<tr><th><b>Avro type</b></th><th><b>Spark SQL type</b></th></tr>
<tr>
@ -403,7 +403,7 @@ In addition to the types listed above, it supports reading `union` types. The fo
3. `union(something, null)`, where something is any supported Avro type. This will be mapped to the same Spark SQL type as that of something, with nullable set to true.
All other union types are considered complex. They will be mapped to StructType where field names are member0, member1, etc., in accordance with members of the union. This is consistent with the behavior when converting between Avro and Parquet.
It also supports reading the following Avro [logical types](https://avro.apache.org/docs/1.8.2/spec.html#Logical+Types):
It also supports reading the following Avro [logical types](https://avro.apache.org/docs/1.10.1/spec.html#Logical+Types):
<table class="table">
<tr><th><b>Avro logical type</b></th><th><b>Avro type</b></th><th><b>Spark SQL type</b></th></tr>

View file

@ -70,6 +70,10 @@
<groupId>org.apache.spark</groupId>
<artifactId>spark-tags_${scala.binary.version}</artifactId>
</dependency>
<dependency>
<groupId>org.tukaani</groupId>
<artifactId>xz</artifactId>
</dependency>
</dependencies>
<build>
<outputDirectory>target/scala-${scala.binary.version}/classes</outputDirectory>

View file

@ -51,14 +51,14 @@ private[sql] class AvroOptions(
/**
* Top level record name in write result, which is required in Avro spec.
* See https://avro.apache.org/docs/1.8.2/spec.html#schema_record .
* See https://avro.apache.org/docs/1.10.1/spec.html#schema_record .
* Default value is "topLevelRecord"
*/
val recordName: String = parameters.getOrElse("recordName", "topLevelRecord")
/**
* Record namespace in write result. Default value is "".
* See Avro spec for details: https://avro.apache.org/docs/1.8.2/spec.html#schema_record .
* See Avro spec for details: https://avro.apache.org/docs/1.10.1/spec.html#schema_record .
*/
val recordNamespace: String = parameters.getOrElse("recordNamespace", "")

View file

@ -1015,7 +1015,7 @@ abstract class AvroSuite
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("Caused by: java.lang.NullPointerException: " +
"in test_schema in string null of string in field Name"))
"null of string in string in field Name of test_schema in test_schema"))
}
}

View file

@ -83,7 +83,6 @@
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-mapred</artifactId>
<classifier>${avro.mapred.classifier}</classifier>
<scope>provided</scope>
</dependency>
<dependency>

View file

@ -100,15 +100,9 @@
<artifactId>${hadoop-client-runtime.artifact}</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-ipc</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-mapred</artifactId>
<classifier>${avro.mapred.classifier}</classifier>
<scope>provided</scope>
</dependency>
<dependency>

64
pom.xml
View file

@ -148,8 +148,7 @@
the link to metrics.dropwizard.io in docs/monitoring.md.
-->
<codahale.metrics.version>4.1.1</codahale.metrics.version>
<avro.version>1.8.2</avro.version>
<avro.mapred.classifier>hadoop2</avro.mapred.classifier>
<avro.version>1.10.1</avro.version>
<aws.kinesis.client.version>1.12.0</aws.kinesis.client.version>
<!-- Should be consistent with Kinesis client dependency -->
<aws.java.sdk.version>1.11.655</aws.java.sdk.version>
@ -194,10 +193,6 @@
<jpam.version>1.1</jpam.version>
<selenium.version>3.141.59</selenium.version>
<htmlunit.version>2.40.0</htmlunit.version>
<!--
Managed up from older version from Avro; sync with jackson-module-paranamer dependency version
-->
<paranamer.version>2.8</paranamer.version>
<maven-antrun.version>1.8</maven-antrun.version>
<commons-crypto.version>1.1.0</commons-crypto.version>
<!--
@ -1206,48 +1201,16 @@
<artifactId>avro</artifactId>
<version>${avro.version}</version>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-ipc</artifactId>
<version>${avro.version}</version>
<exclusions>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.velocity</groupId>
<artifactId>velocity</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- avro-mapred for some reason depends on avro-ipc's test jar, so undo that. -->
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-ipc</artifactId>
<classifier>tests</classifier>
<version>${avro.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-mapred</artifactId>
<version>${avro.version}</version>
<classifier>${avro.mapred.classifier}</classifier>
<scope>${hive.deps.scope}</scope>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-ipc-jetty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
@ -1266,10 +1229,19 @@
</exclusion>
<exclusion>
<groupId>org.apache.velocity</groupId>
<artifactId>velocity</artifactId>
<artifactId>velocity-engine-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.annotation</groupId>
<artifactId>javax.annotation-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.tukaani</groupId>
<artifactId>xz</artifactId>
<version>1.8</version>
</dependency>
<!-- See SPARK-23654 for info on this dependency;
It is used to keep javax.activation at v1.1.1 after dropping
jets3t as a dependency.
@ -2389,12 +2361,6 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.thoughtworks.paranamer</groupId>
<artifactId>paranamer</artifactId>
<version>${paranamer.version}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.arrow</groupId>
<artifactId>arrow-vector</artifactId>

View file

@ -655,7 +655,7 @@ object DependencyOverrides {
dependencyOverrides += "com.google.guava" % "guava" % guavaVersion,
dependencyOverrides += "xerces" % "xercesImpl" % "2.12.0",
dependencyOverrides += "jline" % "jline" % "2.14.6",
dependencyOverrides += "org.apache.avro" % "avro" % "1.8.2")
dependencyOverrides += "org.apache.avro" % "avro" % "1.10.1")
}
/**

View file

@ -34,7 +34,7 @@ addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.8.0")
addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.3")
addSbtPlugin("com.cavorite" % "sbt-avro" % "2.1.1")
libraryDependencies += "org.apache.avro" % "avro-compiler" % "1.8.2"
libraryDependencies += "org.apache.avro" % "avro-compiler" % "1.10.1"
addSbtPlugin("io.spray" % "sbt-revolver" % "0.9.1")

View file

@ -122,12 +122,9 @@
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</dependency>
<!-- use the build matching the hadoop api of avro-mapred (i.e. no classifier for hadoop 1 API,
hadoop2 classifier for hadoop 2 API. avro-mapred is a dependency of org.spark-project.hive:hive-serde -->
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-mapred</artifactId>
<classifier>${avro.mapred.classifier}</classifier>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>