[SPARK-13986][CORE][MLLIB] Remove DeveloperApi-annotations for non-publics

## What changes were proposed in this pull request?

Spark uses `DeveloperApi` annotation, but sometimes it seems to conflict with visibility. This PR tries to fix those conflict by removing annotations for non-publics. The following is the example.

**JobResult.scala**
```scala
DeveloperApi
sealed trait JobResult

DeveloperApi
case object JobSucceeded extends JobResult

-DeveloperApi
private[spark] case class JobFailed(exception: Exception) extends JobResult
```

## How was this patch tested?

Pass the existing Jenkins test.

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #11797 from dongjoon-hyun/SPARK-13986.
This commit is contained in:
Dongjoon Hyun 2016-03-21 14:57:52 +00:00 committed by Sean Owen
parent 17a3f00676
commit df61fbd978
8 changed files with 0 additions and 16 deletions

View file

@ -29,5 +29,4 @@ sealed trait JobResult
@DeveloperApi
case object JobSucceeded extends JobResult
@DeveloperApi
private[spark] case class JobFailed(exception: Exception) extends JobResult

View file

@ -19,17 +19,13 @@ package org.apache.spark.util.collection
import scala.reflect.ClassTag
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* A fast hash map implementation for nullable keys. This hash map supports insertions and updates,
* but not deletions. This map is about 5X faster than java.util.HashMap, while using much less
* space overhead.
*
* Under the hood, it uses our OpenHashSet implementation.
*/
@DeveloperApi
private[spark]
class OpenHashMap[K : ClassTag, @specialized(Long, Int, Double) V: ClassTag](
initialCapacity: Int)

View file

@ -22,15 +22,12 @@ import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}
/**
* :: DeveloperApi ::
*
* Single-label regression
*
* @tparam FeaturesType Type of input features. E.g., [[org.apache.spark.mllib.linalg.Vector]]
* @tparam Learner Concrete Estimator type
* @tparam M Concrete Model type
*/
@DeveloperApi
private[spark] abstract class Regressor[
FeaturesType,
Learner <: Regressor[FeaturesType, Learner, M],

View file

@ -171,7 +171,6 @@ private[spark] class NodeIdCache(
}
}
@DeveloperApi
private[spark] object NodeIdCache {
/**
* Initialize the node Id cache with initial node Id values.

View file

@ -17,17 +17,14 @@
package org.apache.spark.ml.tuning
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.Estimator
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.param.{Param, ParamMap, Params}
import org.apache.spark.sql.types.StructType
/**
* :: DeveloperApi ::
* Common params for [[TrainValidationSplitParams]] and [[CrossValidatorParams]].
*/
@DeveloperApi
private[ml] trait ValidatorParams extends Params {
/**

View file

@ -156,7 +156,6 @@ sealed trait Matrix extends Serializable {
def numActives: Int
}
@DeveloperApi
private[spark] class MatrixUDT extends UserDefinedType[Matrix] {
override def sqlType: StructType = {

View file

@ -173,7 +173,6 @@ private[spark] class NodeIdCache(
}
}
@DeveloperApi
private[spark] object NodeIdCache {
/**
* Initialize the node Id cache with initial node Id values.

View file

@ -79,7 +79,6 @@ private[spark] object InformationGainStats {
}
/**
* :: DeveloperApi ::
* Impurity statistics for each split
* @param gain information gain value
* @param impurity current node impurity
@ -89,7 +88,6 @@ private[spark] object InformationGainStats {
* @param valid whether the current split satisfies minimum info gain or
* minimum number of instances per node
*/
@DeveloperApi
private[spark] class ImpurityStats(
val gain: Double,
val impurity: Double,