From aa41dcea4a41899507dfe4ec1eceaabb5edf728f Mon Sep 17 00:00:00 2001 From: zhengruifeng Date: Fri, 12 Jul 2019 11:00:16 -0700 Subject: [PATCH] [SPARK-28159][ML][FOLLOWUP] fix typo & (0 until v.size).toList => List.range(0, v.size) ## What changes were proposed in this pull request? fix typo in spark-28159 `transfromWithMean` -> `transformWithMean` ## How was this patch tested? existing test Closes #25129 from zhengruifeng/to_ml_vec_cleanup. Authored-by: zhengruifeng Signed-off-by: Dongjoon Hyun --- mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala | 2 +- .../scala/org/apache/spark/ml/feature/StandardScaler.scala | 2 +- .../scala/org/apache/spark/mllib/feature/StandardScaler.scala | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala index aa81037014..681bb95156 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala @@ -490,7 +490,7 @@ abstract class LDAModel private[ml] ( Vectors.zeros(k) } else { val (ids: List[Int], cts: Array[Double]) = vector match { - case v: DenseVector => ((0 until v.size).toList, v.values) + case v: DenseVector => (List.range(0, v.size), v.values) case v: SparseVector => (v.indices.toList, v.values) case other => throw new UnsupportedOperationException( diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala index 17f2c17c95..81cf2e1a4f 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/feature/StandardScaler.scala @@ -169,7 +169,7 @@ class StandardScalerModel private[ml] ( case d: DenseVector => d.values.clone() case v: Vector => v.toArray } - val newValues = scaler.transfromWithMean(values) + val newValues = scaler.transformWithMean(values) Vectors.dense(newValues) } else if ($(withStd)) { vector: Vector => diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala index 578b779cd5..19e53e7eac 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala @@ -141,7 +141,7 @@ class StandardScalerModel @Since("1.3.0") ( case d: DenseVector => d.values.clone() case v: Vector => v.toArray } - val newValues = transfromWithMean(values) + val newValues = transformWithMean(values) Vectors.dense(newValues) } else if (withStd) { vector match { @@ -161,7 +161,7 @@ class StandardScalerModel @Since("1.3.0") ( } } - private[spark] def transfromWithMean(values: Array[Double]): Array[Double] = { + private[spark] def transformWithMean(values: Array[Double]): Array[Double] = { // By default, Scala generates Java methods for member variables. So every time when // the member variables are accessed, `invokespecial` will be called which is expensive. // This can be avoid by having a local reference of `shift`.