[SPARK-26026][BUILD] Published Scaladoc jars missing from Maven Central
## What changes were proposed in this pull request? This restores scaladoc artifact generation, which got dropped with the Scala 2.12 update. The change looks large, but is almost all due to needing to make the InterfaceStability annotations top-level classes (i.e. `InterfaceStability.Stable` -> `Stable`), unfortunately. A few inner class references had to be qualified too. Lots of scaladoc warnings now reappear. We can choose to disable generation by default and enable for releases, later. ## How was this patch tested? N/A; build runs scaladoc now. Closes #23069 from srowen/SPARK-26026. Authored-by: Sean Owen <sean.owen@databricks.com> Signed-off-by: Sean Owen <sean.owen@databricks.com>
This commit is contained in:
parent
bbbdaa82a4
commit
630e25e355
|
@ -33,7 +33,7 @@ public final class ChunkFetchFailure extends AbstractMessage implements Response
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.ChunkFetchFailure; }
|
||||
public Message.Type type() { return Type.ChunkFetchFailure; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -32,7 +32,7 @@ public final class ChunkFetchRequest extends AbstractMessage implements RequestM
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.ChunkFetchRequest; }
|
||||
public Message.Type type() { return Type.ChunkFetchRequest; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -39,7 +39,7 @@ public final class ChunkFetchSuccess extends AbstractResponseMessage {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.ChunkFetchSuccess; }
|
||||
public Message.Type type() { return Type.ChunkFetchSuccess; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -34,7 +34,7 @@ public final class OneWayMessage extends AbstractMessage implements RequestMessa
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.OneWayMessage; }
|
||||
public Message.Type type() { return Type.OneWayMessage; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -31,7 +31,7 @@ public final class RpcFailure extends AbstractMessage implements ResponseMessage
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.RpcFailure; }
|
||||
public Message.Type type() { return Type.RpcFailure; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -38,7 +38,7 @@ public final class RpcRequest extends AbstractMessage implements RequestMessage
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.RpcRequest; }
|
||||
public Message.Type type() { return Type.RpcRequest; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -33,7 +33,7 @@ public final class RpcResponse extends AbstractResponseMessage {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.RpcResponse; }
|
||||
public Message.Type type() { return Type.RpcResponse; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -33,7 +33,7 @@ public final class StreamFailure extends AbstractMessage implements ResponseMess
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.StreamFailure; }
|
||||
public Message.Type type() { return Type.StreamFailure; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -34,7 +34,7 @@ public final class StreamRequest extends AbstractMessage implements RequestMessa
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.StreamRequest; }
|
||||
public Message.Type type() { return Type.StreamRequest; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -40,7 +40,7 @@ public final class StreamResponse extends AbstractResponseMessage {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.StreamResponse; }
|
||||
public Message.Type type() { return Type.StreamResponse; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -52,7 +52,7 @@ public final class UploadStream extends AbstractMessage implements RequestMessag
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.UploadStream; }
|
||||
public Message.Type type() { return Type.UploadStream; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -23,6 +23,7 @@ import io.netty.buffer.Unpooled;
|
|||
import org.apache.spark.network.buffer.NettyManagedBuffer;
|
||||
import org.apache.spark.network.protocol.Encoders;
|
||||
import org.apache.spark.network.protocol.AbstractMessage;
|
||||
import org.apache.spark.network.protocol.Message;
|
||||
|
||||
/**
|
||||
* Encodes a Sasl-related message which is attempting to authenticate using some credentials tagged
|
||||
|
@ -46,7 +47,7 @@ class SaslMessage extends AbstractMessage {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Type type() { return Type.User; }
|
||||
public Message.Type type() { return Type.User; }
|
||||
|
||||
@Override
|
||||
public int encodedLength() {
|
||||
|
|
|
@ -101,7 +101,7 @@ public class RetryingBlockFetcher {
|
|||
|
||||
public RetryingBlockFetcher(
|
||||
TransportConf conf,
|
||||
BlockFetchStarter fetchStarter,
|
||||
RetryingBlockFetcher.BlockFetchStarter fetchStarter,
|
||||
String[] blockIds,
|
||||
BlockFetchingListener listener) {
|
||||
this.fetchStarter = fetchStarter;
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.spark.annotation;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
/**
|
||||
* APIs that are meant to evolve towards becoming stable APIs, but are not stable APIs yet.
|
||||
* Evolving interfaces can change from one feature release to another release (i.e. 2.1 to 2.2).
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Evolving {}
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.spark.annotation;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
/**
|
||||
* Annotation to inform users of how much to rely on a particular package,
|
||||
* class or method not changing over time.
|
||||
*/
|
||||
public class InterfaceStability {
|
||||
|
||||
/**
|
||||
* Stable APIs that retain source and binary compatibility within a major release.
|
||||
* These interfaces can change from one major release to another major release
|
||||
* (e.g. from 1.0 to 2.0).
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Stable {};
|
||||
|
||||
/**
|
||||
* APIs that are meant to evolve towards becoming stable APIs, but are not stable APIs yet.
|
||||
* Evolving interfaces can change from one feature release to another release (i.e. 2.1 to 2.2).
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Evolving {};
|
||||
|
||||
/**
|
||||
* Unstable APIs, with no guarantee on stability.
|
||||
* Classes that are unannotated are considered Unstable.
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Unstable {};
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.spark.annotation;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
/**
|
||||
* Stable APIs that retain source and binary compatibility within a major release.
|
||||
* These interfaces can change from one major release to another major release
|
||||
* (e.g. from 1.0 to 2.0).
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Stable {}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.spark.annotation;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
/**
|
||||
* Unstable APIs, with no guarantee on stability.
|
||||
* Classes that are unannotated are considered Unstable.
|
||||
*/
|
||||
@Documented
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
|
||||
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
|
||||
public @interface Unstable {}
|
|
@ -22,7 +22,7 @@ import scala.reflect.ClassTag
|
|||
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
|
||||
import com.amazonaws.services.kinesis.model.Record
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Evolving
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.storage.{BlockId, StorageLevel}
|
||||
import org.apache.spark.streaming.{Duration, StreamingContext, Time}
|
||||
|
@ -84,14 +84,14 @@ private[kinesis] class KinesisInputDStream[T: ClassTag](
|
|||
}
|
||||
}
|
||||
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
object KinesisInputDStream {
|
||||
/**
|
||||
* Builder for [[KinesisInputDStream]] instances.
|
||||
*
|
||||
* @since 2.2.0
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
class Builder {
|
||||
// Required params
|
||||
private var streamingContext: Option[StreamingContext] = None
|
||||
|
|
|
@ -14,13 +14,12 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.spark.streaming.kinesis
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
package org.apache.spark.streaming.kinesis
|
||||
|
||||
import com.amazonaws.auth._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Evolving
|
||||
import org.apache.spark.internal.Logging
|
||||
|
||||
/**
|
||||
|
@ -84,14 +83,14 @@ private[kinesis] final case class STSCredentials(
|
|||
}
|
||||
}
|
||||
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
object SparkAWSCredentials {
|
||||
/**
|
||||
* Builder for [[SparkAWSCredentials]] instances.
|
||||
*
|
||||
* @since 2.2.0
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
class Builder {
|
||||
private var basicCreds: Option[BasicCredentials] = None
|
||||
private var stsCreds: Option[STSCredentials] = None
|
||||
|
|
|
@ -31,8 +31,8 @@ abstract class AbstractAppHandle implements SparkAppHandle {
|
|||
private final LauncherServer server;
|
||||
|
||||
private LauncherServer.ServerConnection connection;
|
||||
private List<Listener> listeners;
|
||||
private AtomicReference<State> state;
|
||||
private List<SparkAppHandle.Listener> listeners;
|
||||
private AtomicReference<SparkAppHandle.State> state;
|
||||
private volatile String appId;
|
||||
private volatile boolean disposed;
|
||||
|
||||
|
@ -42,7 +42,7 @@ abstract class AbstractAppHandle implements SparkAppHandle {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void addListener(Listener l) {
|
||||
public synchronized void addListener(SparkAppHandle.Listener l) {
|
||||
if (listeners == null) {
|
||||
listeners = new CopyOnWriteArrayList<>();
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ abstract class AbstractAppHandle implements SparkAppHandle {
|
|||
}
|
||||
|
||||
@Override
|
||||
public State getState() {
|
||||
public SparkAppHandle.State getState() {
|
||||
return state.get();
|
||||
}
|
||||
|
||||
|
@ -120,11 +120,11 @@ abstract class AbstractAppHandle implements SparkAppHandle {
|
|||
}
|
||||
}
|
||||
|
||||
void setState(State s) {
|
||||
void setState(SparkAppHandle.State s) {
|
||||
setState(s, false);
|
||||
}
|
||||
|
||||
void setState(State s, boolean force) {
|
||||
void setState(SparkAppHandle.State s, boolean force) {
|
||||
if (force) {
|
||||
state.set(s);
|
||||
fireEvent(false);
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.json4s.JsonDSL._
|
|||
import org.json4s.jackson.JsonMethods._
|
||||
|
||||
import org.apache.spark.{SparkContext, SparkException}
|
||||
import org.apache.spark.annotation.{DeveloperApi, InterfaceStability, Since}
|
||||
import org.apache.spark.annotation.{DeveloperApi, Since, Unstable}
|
||||
import org.apache.spark.internal.Logging
|
||||
import org.apache.spark.ml._
|
||||
import org.apache.spark.ml.classification.{OneVsRest, OneVsRestModel}
|
||||
|
@ -84,7 +84,7 @@ private[util] sealed trait BaseReadWrite {
|
|||
*
|
||||
* @since 2.4.0
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
@Since("2.4.0")
|
||||
trait MLWriterFormat {
|
||||
/**
|
||||
|
@ -108,7 +108,7 @@ trait MLWriterFormat {
|
|||
*
|
||||
* @since 2.4.0
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
@Since("2.4.0")
|
||||
trait MLFormatRegister extends MLWriterFormat {
|
||||
/**
|
||||
|
@ -208,7 +208,7 @@ abstract class MLWriter extends BaseReadWrite with Logging {
|
|||
/**
|
||||
* A ML Writer which delegates based on the requested format.
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
@Since("2.4.0")
|
||||
class GeneralMLWriter(stage: PipelineStage) extends MLWriter with Logging {
|
||||
private var source: String = "internal"
|
||||
|
@ -291,7 +291,7 @@ trait MLWritable {
|
|||
* Trait for classes that provide `GeneralMLWriter`.
|
||||
*/
|
||||
@Since("2.4.0")
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
trait GeneralMLWritable extends MLWritable {
|
||||
/**
|
||||
* Returns an `MLWriter` instance for this ML instance.
|
||||
|
|
8
pom.xml
8
pom.xml
|
@ -2016,7 +2016,6 @@
|
|||
<plugin>
|
||||
<groupId>net.alchim31.maven</groupId>
|
||||
<artifactId>scala-maven-plugin</artifactId>
|
||||
<!-- 3.3.1 won't work with zinc; fails to find javac from java.home -->
|
||||
<version>3.4.4</version>
|
||||
<executions>
|
||||
<execution>
|
||||
|
@ -2037,6 +2036,13 @@
|
|||
<goal>testCompile</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>attach-scaladocs</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>doc-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<scalaVersion>${scala.version}</scalaVersion>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
import org.apache.spark.sql.catalyst.expressions.GenericRow;
|
||||
|
||||
/**
|
||||
|
@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.expressions.GenericRow;
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public class RowFactory {
|
||||
|
||||
/**
|
||||
|
|
|
@ -50,7 +50,7 @@ public final class UnsafeExternalRowSorter {
|
|||
private long numRowsInserted = 0;
|
||||
|
||||
private final StructType schema;
|
||||
private final PrefixComputer prefixComputer;
|
||||
private final UnsafeExternalRowSorter.PrefixComputer prefixComputer;
|
||||
private final UnsafeExternalSorter sorter;
|
||||
|
||||
public abstract static class PrefixComputer {
|
||||
|
@ -74,7 +74,7 @@ public final class UnsafeExternalRowSorter {
|
|||
StructType schema,
|
||||
Supplier<RecordComparator> recordComparatorSupplier,
|
||||
PrefixComparator prefixComparator,
|
||||
PrefixComputer prefixComputer,
|
||||
UnsafeExternalRowSorter.PrefixComputer prefixComputer,
|
||||
long pageSizeBytes,
|
||||
boolean canUseRadixSort) throws IOException {
|
||||
return new UnsafeExternalRowSorter(schema, recordComparatorSupplier, prefixComparator,
|
||||
|
@ -85,7 +85,7 @@ public final class UnsafeExternalRowSorter {
|
|||
StructType schema,
|
||||
Ordering<InternalRow> ordering,
|
||||
PrefixComparator prefixComparator,
|
||||
PrefixComputer prefixComputer,
|
||||
UnsafeExternalRowSorter.PrefixComputer prefixComputer,
|
||||
long pageSizeBytes,
|
||||
boolean canUseRadixSort) throws IOException {
|
||||
Supplier<RecordComparator> recordComparatorSupplier =
|
||||
|
@ -98,9 +98,9 @@ public final class UnsafeExternalRowSorter {
|
|||
StructType schema,
|
||||
Supplier<RecordComparator> recordComparatorSupplier,
|
||||
PrefixComparator prefixComparator,
|
||||
PrefixComputer prefixComputer,
|
||||
UnsafeExternalRowSorter.PrefixComputer prefixComputer,
|
||||
long pageSizeBytes,
|
||||
boolean canUseRadixSort) throws IOException {
|
||||
boolean canUseRadixSort) {
|
||||
this.schema = schema;
|
||||
this.prefixComputer = prefixComputer;
|
||||
final SparkEnv sparkEnv = SparkEnv.get();
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
package org.apache.spark.sql.streaming;
|
||||
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.annotation.Experimental;
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.sql.catalyst.plans.logical.*;
|
||||
|
||||
/**
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.plans.logical.*;
|
|||
* @since 2.2.0
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public class GroupStateTimeout {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.streaming;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes;
|
||||
|
||||
/**
|
||||
|
@ -26,7 +26,7 @@ import org.apache.spark.sql.catalyst.streaming.InternalOutputModes;
|
|||
*
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public class OutputMode {
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.spark.sql.types;
|
|||
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* To get/create specific data type, users should use singleton objects and factory methods
|
||||
|
@ -27,7 +27,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public class DataTypes {
|
||||
/**
|
||||
* Gets the StringType object.
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types;
|
|||
import java.lang.annotation.*;
|
||||
|
||||
import org.apache.spark.annotation.DeveloperApi;
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* ::DeveloperApi::
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
@DeveloperApi
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public @interface SQLUserDefinedType {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,16 +17,15 @@
|
|||
|
||||
package org.apache.spark.sql
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
|
||||
|
||||
|
||||
/**
|
||||
* Thrown when a query fails to analyze, usually because the query itself is invalid.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class AnalysisException protected[sql] (
|
||||
val message: String,
|
||||
val line: Option[Int] = None,
|
||||
|
|
|
@ -20,10 +20,9 @@ package org.apache.spark.sql
|
|||
import scala.annotation.implicitNotFound
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
import org.apache.spark.annotation.{Experimental, InterfaceStability}
|
||||
import org.apache.spark.annotation.{Evolving, Experimental}
|
||||
import org.apache.spark.sql.types._
|
||||
|
||||
|
||||
/**
|
||||
* :: Experimental ::
|
||||
* Used to convert a JVM object of type `T` to and from the internal Spark SQL representation.
|
||||
|
@ -67,7 +66,7 @@ import org.apache.spark.sql.types._
|
|||
* @since 1.6.0
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
@implicitNotFound("Unable to find encoder for type ${T}. An implicit Encoder[${T}] is needed to " +
|
||||
"store ${T} instances in a Dataset. Primitive types (Int, String, etc) and Product types (case " +
|
||||
"classes) are supported by importing spark.implicits._ Support for serializing other types " +
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.lang.reflect.Modifier
|
|||
import scala.reflect.{classTag, ClassTag}
|
||||
import scala.reflect.runtime.universe.TypeTag
|
||||
|
||||
import org.apache.spark.annotation.{Experimental, InterfaceStability}
|
||||
import org.apache.spark.annotation.{Evolving, Experimental}
|
||||
import org.apache.spark.sql.catalyst.analysis.GetColumnByOrdinal
|
||||
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
|
||||
import org.apache.spark.sql.catalyst.expressions.{BoundReference, Cast}
|
||||
|
@ -36,7 +36,7 @@ import org.apache.spark.sql.types._
|
|||
* @since 1.6.0
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
object Encoders {
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.spark.sql
|
|||
import scala.collection.JavaConverters._
|
||||
import scala.util.hashing.MurmurHash3
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.expressions.GenericRow
|
||||
import org.apache.spark.sql.types.StructType
|
||||
|
||||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object Row {
|
||||
/**
|
||||
* This method can be used to extract fields from a [[Row]] object in a pattern match. Example:
|
||||
|
@ -124,7 +124,7 @@ object Row {
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
trait Row extends Serializable {
|
||||
/** Number of elements in the Row. */
|
||||
def size: Int = length
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.spark.sql.types
|
|||
|
||||
import scala.reflect.runtime.universe.TypeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.expressions.Expression
|
||||
|
||||
/**
|
||||
|
@ -134,7 +134,7 @@ object AtomicType {
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
abstract class NumericType extends AtomicType {
|
||||
// Unfortunately we can't get this implicitly as that breaks Spark Serialization. In order for
|
||||
// implicitly[Numeric[JvmType]] to be valid, we have to change JvmType from a type variable to a
|
||||
|
|
|
@ -21,7 +21,7 @@ import scala.math.Ordering
|
|||
|
||||
import org.json4s.JsonDSL._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.util.ArrayData
|
||||
|
||||
/**
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.util.ArrayData
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object ArrayType extends AbstractDataType {
|
||||
/**
|
||||
* Construct a [[ArrayType]] object with the given element type. The `containsNull` is true.
|
||||
|
@ -60,7 +60,7 @@ object ArrayType extends AbstractDataType {
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataType {
|
||||
|
||||
/** No-arg constructor for kryo. */
|
||||
|
|
|
@ -20,15 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.Ordering
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.util.TypeUtils
|
||||
|
||||
|
||||
/**
|
||||
* The data type representing `Array[Byte]` values.
|
||||
* Please use the singleton `DataTypes.BinaryType`.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class BinaryType private() extends AtomicType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "BinaryType$" in byte code.
|
||||
|
@ -55,5 +54,5 @@ class BinaryType private() extends AtomicType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object BinaryType extends BinaryType
|
||||
|
|
|
@ -20,15 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.Ordering
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `Boolean` values. Please use the singleton `DataTypes.BooleanType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class BooleanType private() extends AtomicType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "BooleanType$" in byte code.
|
||||
|
@ -48,5 +47,5 @@ class BooleanType private() extends AtomicType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object BooleanType extends BooleanType
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.{Integral, Numeric, Ordering}
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `Byte` values. Please use the singleton `DataTypes.ByteType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class ByteType private() extends IntegralType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "ByteType$" in byte code.
|
||||
|
@ -52,5 +52,5 @@ class ByteType private() extends IntegralType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object ByteType extends ByteType
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.types
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing calendar time intervals. The calendar time interval is stored
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.annotation.InterfaceStability
|
|||
*
|
||||
* @since 1.5.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class CalendarIntervalType private() extends DataType {
|
||||
|
||||
override def defaultSize: Int = 16
|
||||
|
@ -40,5 +40,5 @@ class CalendarIntervalType private() extends DataType {
|
|||
/**
|
||||
* @since 1.5.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object CalendarIntervalType extends CalendarIntervalType
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.json4s.JsonAST.JValue
|
|||
import org.json4s.JsonDSL._
|
||||
import org.json4s.jackson.JsonMethods._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.analysis.Resolver
|
||||
import org.apache.spark.sql.catalyst.expressions.{Cast, Expression}
|
||||
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
|
||||
|
@ -38,7 +38,7 @@ import org.apache.spark.util.Utils
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
abstract class DataType extends AbstractDataType {
|
||||
/**
|
||||
* Enables matching against DataType for expressions:
|
||||
|
@ -111,7 +111,7 @@ abstract class DataType extends AbstractDataType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object DataType {
|
||||
|
||||
private val FIXED_DECIMAL = """decimal\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import scala.math.Ordering
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* A date type, supporting "0001-01-01" through "9999-12-31".
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.annotation.InterfaceStability
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class DateType private() extends AtomicType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "DateType$" in byte code.
|
||||
|
@ -53,5 +53,5 @@ class DateType private() extends AtomicType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object DateType extends DateType
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import java.lang.{Long => JLong}
|
||||
import java.math.{BigInteger, MathContext, RoundingMode}
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Unstable
|
||||
import org.apache.spark.sql.AnalysisException
|
||||
|
||||
/**
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.sql.AnalysisException
|
|||
* - If decimalVal is set, it represents the whole decimal value
|
||||
* - Otherwise, the decimal value is longVal / (10 ** _scale)
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
final class Decimal extends Ordered[Decimal] with Serializable {
|
||||
import org.apache.spark.sql.types.Decimal._
|
||||
|
||||
|
@ -407,7 +407,7 @@ final class Decimal extends Ordered[Decimal] with Serializable {
|
|||
}
|
||||
}
|
||||
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
object Decimal {
|
||||
val ROUND_HALF_UP = BigDecimal.RoundingMode.HALF_UP
|
||||
val ROUND_HALF_EVEN = BigDecimal.RoundingMode.HALF_EVEN
|
||||
|
|
|
@ -21,11 +21,10 @@ import java.util.Locale
|
|||
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.AnalysisException
|
||||
import org.apache.spark.sql.catalyst.expressions.{Expression, Literal}
|
||||
|
||||
|
||||
/**
|
||||
* The data type representing `java.math.BigDecimal` values.
|
||||
* A Decimal that must have fixed precision (the maximum number of digits) and scale (the number
|
||||
|
@ -39,7 +38,7 @@ import org.apache.spark.sql.catalyst.expressions.{Expression, Literal}
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case class DecimalType(precision: Int, scale: Int) extends FractionalType {
|
||||
|
||||
if (scale > precision) {
|
||||
|
@ -110,7 +109,7 @@ case class DecimalType(precision: Int, scale: Int) extends FractionalType {
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object DecimalType extends AbstractDataType {
|
||||
import scala.math.min
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ import scala.math.{Fractional, Numeric, Ordering}
|
|||
import scala.math.Numeric.DoubleAsIfIntegral
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.util.Utils
|
||||
|
||||
/**
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.util.Utils
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class DoubleType private() extends FractionalType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "DoubleType$" in byte code.
|
||||
|
@ -54,5 +54,5 @@ class DoubleType private() extends FractionalType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object DoubleType extends DoubleType
|
||||
|
|
|
@ -21,7 +21,7 @@ import scala.math.{Fractional, Numeric, Ordering}
|
|||
import scala.math.Numeric.FloatAsIfIntegral
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.util.Utils
|
||||
|
||||
/**
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.util.Utils
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class FloatType private() extends FractionalType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "FloatType$" in byte code.
|
||||
|
@ -55,5 +55,5 @@ class FloatType private() extends FractionalType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object FloatType extends FloatType
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.{Integral, Numeric, Ordering}
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `Int` values. Please use the singleton `DataTypes.IntegerType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class IntegerType private() extends IntegralType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "IntegerType$" in byte code.
|
||||
|
@ -51,5 +51,5 @@ class IntegerType private() extends IntegralType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object IntegerType extends IntegerType
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.{Integral, Numeric, Ordering}
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `Long` values. Please use the singleton `DataTypes.LongType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class LongType private() extends IntegralType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "LongType$" in byte code.
|
||||
|
@ -51,5 +51,5 @@ class LongType private() extends IntegralType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object LongType extends LongType
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import org.json4s.JsonAST.JValue
|
||||
import org.json4s.JsonDSL._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type for Maps. Keys in a map are not allowed to have `null` values.
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.annotation.InterfaceStability
|
|||
* @param valueType The data type of map values.
|
||||
* @param valueContainsNull Indicates if map values have `null` values.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case class MapType(
|
||||
keyType: DataType,
|
||||
valueType: DataType,
|
||||
|
@ -78,7 +78,7 @@ case class MapType(
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object MapType extends AbstractDataType {
|
||||
|
||||
override private[sql] def defaultConcreteType: DataType = apply(NullType, NullType)
|
||||
|
|
|
@ -22,7 +22,7 @@ import scala.collection.mutable
|
|||
import org.json4s._
|
||||
import org.json4s.jackson.JsonMethods._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
|
||||
/**
|
||||
|
@ -37,7 +37,7 @@ import org.apache.spark.annotation.InterfaceStability
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
sealed class Metadata private[types] (private[types] val map: Map[String, Any])
|
||||
extends Serializable {
|
||||
|
||||
|
@ -117,7 +117,7 @@ sealed class Metadata private[types] (private[types] val map: Map[String, Any])
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object Metadata {
|
||||
|
||||
private[this] val _empty = new Metadata(Map.empty)
|
||||
|
@ -228,7 +228,7 @@ object Metadata {
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class MetadataBuilder {
|
||||
|
||||
private val map: mutable.Map[String, Any] = mutable.Map.empty
|
||||
|
|
|
@ -17,15 +17,14 @@
|
|||
|
||||
package org.apache.spark.sql.types
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `NULL` values. Please use the singleton `DataTypes.NullType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class NullType private() extends DataType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "NullType$" in byte code.
|
||||
|
@ -38,5 +37,5 @@ class NullType private() extends DataType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object NullType extends NullType
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.spark.sql.types
|
|||
|
||||
import scala.language.existentials
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Evolving
|
||||
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
object ObjectType extends AbstractDataType {
|
||||
override private[sql] def defaultConcreteType: DataType =
|
||||
throw new UnsupportedOperationException(
|
||||
|
@ -38,7 +38,7 @@ object ObjectType extends AbstractDataType {
|
|||
/**
|
||||
* Represents a JVM object that is passing through Spark SQL expression evaluation.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
case class ObjectType(cls: Class[_]) extends DataType {
|
||||
override def defaultSize: Int = 4096
|
||||
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.spark.sql.types
|
|||
import scala.math.{Integral, Numeric, Ordering}
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `Short` values. Please use the singleton `DataTypes.ShortType`.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class ShortType private() extends IntegralType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "ShortType$" in byte code.
|
||||
|
@ -51,5 +51,5 @@ class ShortType private() extends IntegralType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object ShortType extends ShortType
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import scala.math.Ordering
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.unsafe.types.UTF8String
|
||||
|
||||
/**
|
||||
|
@ -28,7 +28,7 @@ import org.apache.spark.unsafe.types.UTF8String
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class StringType private() extends AtomicType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "StringType$" in byte code.
|
||||
|
@ -48,6 +48,6 @@ class StringType private() extends AtomicType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object StringType extends StringType
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import org.json4s.JsonAST.JValue
|
||||
import org.json4s.JsonDSL._
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier}
|
||||
|
||||
/**
|
||||
|
@ -33,7 +33,7 @@ import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdenti
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case class StructField(
|
||||
name: String,
|
||||
dataType: DataType,
|
||||
|
|
|
@ -24,10 +24,10 @@ import scala.util.control.NonFatal
|
|||
import org.json4s.JsonDSL._
|
||||
|
||||
import org.apache.spark.SparkException
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, InterpretedOrdering}
|
||||
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, LegacyTypeStringParser}
|
||||
import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier}
|
||||
import org.apache.spark.sql.catalyst.util.quoteIdentifier
|
||||
import org.apache.spark.util.Utils
|
||||
|
||||
/**
|
||||
|
@ -95,7 +95,7 @@ import org.apache.spark.util.Utils
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case class StructType(fields: Array[StructField]) extends DataType with Seq[StructField] {
|
||||
|
||||
/** No-arg constructor for kryo. */
|
||||
|
@ -422,7 +422,7 @@ case class StructType(fields: Array[StructField]) extends DataType with Seq[Stru
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
object StructType extends AbstractDataType {
|
||||
|
||||
override private[sql] def defaultConcreteType: DataType = new StructType
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.types
|
|||
import scala.math.Ordering
|
||||
import scala.reflect.runtime.universe.typeTag
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability
|
||||
import org.apache.spark.annotation.Stable
|
||||
|
||||
/**
|
||||
* The data type representing `java.sql.Timestamp` values.
|
||||
|
@ -28,7 +28,7 @@ import org.apache.spark.annotation.InterfaceStability
|
|||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
class TimestampType private() extends AtomicType {
|
||||
// The companion object and this class is separated so the companion object also subclasses
|
||||
// this type. Otherwise, the companion object would be of type "TimestampType$" in byte code.
|
||||
|
@ -50,5 +50,5 @@ class TimestampType private() extends AtomicType {
|
|||
/**
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
case object TimestampType extends TimestampType
|
||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.spark.api.java.function;
|
|||
import java.io.Serializable;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.annotation.Experimental;
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.sql.streaming.GroupState;
|
||||
|
||||
/**
|
||||
|
@ -33,7 +33,7 @@ import org.apache.spark.sql.streaming.GroupState;
|
|||
* @since 2.1.1
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface FlatMapGroupsWithStateFunction<K, V, S, R> extends Serializable {
|
||||
Iterator<R> call(K key, Iterator<V> values, GroupState<S> state) throws Exception;
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.spark.api.java.function;
|
|||
import java.io.Serializable;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.annotation.Experimental;
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.sql.streaming.GroupState;
|
||||
|
||||
/**
|
||||
|
@ -32,7 +32,7 @@ import org.apache.spark.sql.streaming.GroupState;
|
|||
* @since 2.1.1
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface MapGroupsWithStateFunction<K, V, S, R> extends Serializable {
|
||||
R call(K key, Iterator<V> values, GroupState<S> state) throws Exception;
|
||||
}
|
||||
|
|
|
@ -16,14 +16,14 @@
|
|||
*/
|
||||
package org.apache.spark.sql;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* SaveMode is used to specify the expected behavior of saving a DataFrame to a data source.
|
||||
*
|
||||
* @since 1.3.0
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public enum SaveMode {
|
||||
/**
|
||||
* Append mode means that when saving a DataFrame to a data source, if data/table already exists,
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 0 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF0<R> extends Serializable {
|
||||
R call() throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 1 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF1<T1, R> extends Serializable {
|
||||
R call(T1 t1) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 10 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 11 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 12 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 13 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 14 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 15 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 16 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 17 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 18 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 19 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 2 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF2<T1, T2, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 20 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 21 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 22 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21, T22 t22) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 3 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF3<T1, T2, T3, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 4 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF4<T1, T2, T3, T4, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 5 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF5<T1, T2, T3, T4, T5, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 6 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF6<T1, T2, T3, T4, T5, T6, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 7 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF7<T1, T2, T3, T4, T5, T6, T7, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 8 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF8<T1, T2, T3, T4, T5, T6, T7, T8, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8) throws Exception;
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.spark.sql.api.java;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Stable;
|
||||
|
||||
/**
|
||||
* A Spark SQL UDF that has 9 arguments.
|
||||
*/
|
||||
@InterfaceStability.Stable
|
||||
@Stable
|
||||
public interface UDF9<T1, T2, T3, T4, T5, T6, T7, T8, T9, R> extends Serializable {
|
||||
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9) throws Exception;
|
||||
}
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
|
||||
package org.apache.spark.sql.execution.datasources;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Unstable;
|
||||
|
||||
/**
|
||||
* Exception thrown when the parquet reader find column type mismatches.
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
@Unstable
|
||||
public class SchemaColumnConvertNotSupportedException extends RuntimeException {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
package org.apache.spark.sql.expressions.javalang;
|
||||
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.annotation.Experimental;
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.api.java.function.MapFunction;
|
||||
import org.apache.spark.sql.TypedColumn;
|
||||
import org.apache.spark.sql.execution.aggregate.TypedAverage;
|
||||
|
@ -35,7 +35,7 @@ import org.apache.spark.sql.execution.aggregate.TypedSumLong;
|
|||
* @since 2.0.0
|
||||
*/
|
||||
@Experimental
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public class typed {
|
||||
// Note: make sure to keep in sync with typed.scala
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils;
|
||||
import org.apache.spark.sql.sources.v2.reader.BatchReadSupport;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* This interface is used to create {@link BatchReadSupport} instances when end users run
|
||||
* {@code SparkSession.read.format(...).option(...).load()}.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface BatchReadSupportProvider extends DataSourceV2 {
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.spark.sql.sources.v2;
|
|||
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.SaveMode;
|
||||
import org.apache.spark.sql.sources.v2.writer.BatchWriteSupport;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* This interface is used to create {@link BatchWriteSupport} instances when end users run
|
||||
* {@code Dataset.write.format(...).option(...).save()}.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface BatchWriteSupportProvider extends DataSourceV2 {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils;
|
||||
import org.apache.spark.sql.sources.v2.reader.streaming.ContinuousReadSupport;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* This interface is used to create {@link ContinuousReadSupport} instances when end users run
|
||||
* {@code SparkSession.readStream.format(...).option(...).load()} with a continuous trigger.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface ContinuousReadSupportProvider extends DataSourceV2 {
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.stream.Stream;
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* An immutable string-to-string map in which keys are case-insensitive. This is used to represent
|
||||
|
@ -73,7 +73,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
* </tr>
|
||||
* </table>
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public class DataSourceOptions {
|
||||
private final Map<String, String> keyLowerCasedMap;
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* The base interface for data source v2. Implementations must have a public, 0-arg constructor.
|
||||
|
@ -30,5 +30,5 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
* If Spark fails to execute any methods in the implementations of this interface (by throwing an
|
||||
* exception), the read action will fail and no Spark job will be submitted.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface DataSourceV2 {}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils;
|
||||
import org.apache.spark.sql.sources.v2.reader.streaming.MicroBatchReadSupport;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* This interface is used to create {@link MicroBatchReadSupport} instances when end users run
|
||||
* {@code SparkSession.readStream.format(...).option(...).load()} with a micro-batch trigger.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface MicroBatchReadSupportProvider extends DataSourceV2 {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* A mix-in interface for {@link DataSourceV2}. Data sources can implement this interface to
|
||||
* propagate session configs with the specified key-prefix to all data source operations in this
|
||||
* session.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface SessionConfigSupport extends DataSourceV2 {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.execution.streaming.BaseStreamingSink;
|
||||
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWriteSupport;
|
||||
import org.apache.spark.sql.streaming.OutputMode;
|
||||
|
@ -30,7 +30,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* This interface is used to create {@link StreamingWriteSupport} instances when end users run
|
||||
* {@code Dataset.writeStream.format(...).option(...).start()}.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface StreamingWriteSupportProvider extends DataSourceV2, BaseStreamingSink {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2.reader;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* An interface that defines how to load the data from data source for batch processing.
|
||||
|
@ -29,7 +29,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
* {@link ScanConfig}. The {@link ScanConfig} will be used to create input partitions and reader
|
||||
* factory to scan data from the data source with a Spark job.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface BatchReadSupport extends ReadSupport {
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.spark.sql.sources.v2.reader;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* A serializable representation of an input partition returned by
|
||||
|
@ -32,7 +32,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
* the actual reading. So {@link InputPartition} must be serializable while {@link PartitionReader}
|
||||
* doesn't need to be.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface InputPartition extends Serializable {
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.spark.sql.sources.v2.reader;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* A partition reader returned by {@link PartitionReaderFactory#createReader(InputPartition)} or
|
||||
|
@ -32,7 +32,7 @@ import org.apache.spark.annotation.InterfaceStability;
|
|||
* data sources(whose {@link PartitionReaderFactory#supportColumnarReads(InputPartition)}
|
||||
* returns true).
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface PartitionReader<T> extends Closeable {
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.spark.sql.sources.v2.reader;
|
|||
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.catalyst.InternalRow;
|
||||
import org.apache.spark.sql.vectorized.ColumnarBatch;
|
||||
|
||||
|
@ -30,7 +30,7 @@ import org.apache.spark.sql.vectorized.ColumnarBatch;
|
|||
* {@link PartitionReader} (by throwing an exception), corresponding Spark task would fail and
|
||||
* get retried until hitting the maximum retry times.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface PartitionReaderFactory extends Serializable {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2.reader;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
||||
/**
|
||||
|
@ -27,7 +27,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* If Spark fails to execute any methods in the implementations of this interface (by throwing an
|
||||
* exception), the read action will fail and no Spark job will be submitted.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface ReadSupport {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2.reader;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
|
||||
/**
|
||||
|
@ -31,7 +31,7 @@ import org.apache.spark.sql.types.StructType;
|
|||
* {@link SupportsReportStatistics#estimateStatistics(ScanConfig)}, implementations mostly need to
|
||||
* cast the input {@link ScanConfig} to the concrete {@link ScanConfig} class of the data source.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface ScanConfig {
|
||||
|
||||
/**
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2.reader;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* An interface for building the {@link ScanConfig}. Implementations can mixin those
|
||||
* SupportsPushDownXYZ interfaces to do operator pushdown, and keep the operator pushdown result in
|
||||
* the returned {@link ScanConfig}.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface ScanConfigBuilder {
|
||||
ScanConfig build();
|
||||
}
|
||||
|
|
|
@ -19,13 +19,13 @@ package org.apache.spark.sql.sources.v2.reader;
|
|||
|
||||
import java.util.OptionalLong;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
|
||||
/**
|
||||
* An interface to represent statistics for a data source, which is returned by
|
||||
* {@link SupportsReportStatistics#estimateStatistics(ScanConfig)}.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface Statistics {
|
||||
OptionalLong sizeInBytes();
|
||||
OptionalLong numRows();
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
|
||||
package org.apache.spark.sql.sources.v2.reader;
|
||||
|
||||
import org.apache.spark.annotation.InterfaceStability;
|
||||
import org.apache.spark.annotation.Evolving;
|
||||
import org.apache.spark.sql.sources.Filter;
|
||||
|
||||
/**
|
||||
* A mix-in interface for {@link ScanConfigBuilder}. Data sources can implement this interface to
|
||||
* push down filters to the data source and reduce the size of the data to be read.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
@Evolving
|
||||
public interface SupportsPushDownFilters extends ScanConfigBuilder {
|
||||
|
||||
/**
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue