[MINOR] Fix typos in comments and testcase name of code
## What changes were proposed in this pull request? This PR fixes typos in comments and testcase name of code. ## How was this patch tested? manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11481 from dongjoon-hyun/minor_fix_typos_in_code.
This commit is contained in:
parent
52035d1036
commit
941b270b70
|
@ -28,7 +28,7 @@ import org.apache.spark._
|
|||
* of them will be combined together, showed in one line.
|
||||
*/
|
||||
private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
|
||||
// Carrige return
|
||||
// Carriage return
|
||||
val CR = '\r'
|
||||
// Update period of progress bar, in milliseconds
|
||||
val UPDATE_PERIOD = 200L
|
||||
|
|
|
@ -26,11 +26,11 @@ package org.apache.sparktest
|
|||
*/
|
||||
class ImplicitSuite {
|
||||
|
||||
// We only want to test if `implict` works well with the compiler, so we don't need a real
|
||||
// We only want to test if `implicit` works well with the compiler, so we don't need a real
|
||||
// SparkContext.
|
||||
def mockSparkContext[T]: org.apache.spark.SparkContext = null
|
||||
|
||||
// We only want to test if `implict` works well with the compiler, so we don't need a real RDD.
|
||||
// We only want to test if `implicit` works well with the compiler, so we don't need a real RDD.
|
||||
def mockRDD[T]: org.apache.spark.rdd.RDD[T] = null
|
||||
|
||||
def testRddToPairRDDFunctions(): Unit = {
|
||||
|
|
|
@ -563,7 +563,7 @@ def main():
|
|||
|
||||
# backwards compatibility checks
|
||||
if build_tool == "sbt":
|
||||
# Note: compatiblity tests only supported in sbt for now
|
||||
# Note: compatibility tests only supported in sbt for now
|
||||
detect_binary_inop_with_mima()
|
||||
|
||||
# run the test suites
|
||||
|
|
|
@ -228,7 +228,7 @@ class MyJavaLogisticRegressionModel
|
|||
* Create a copy of the model.
|
||||
* The copy is shallow, except for the embedded paramMap, which gets a deep copy.
|
||||
* <p>
|
||||
* This is used for the defaul implementation of [[transform()]].
|
||||
* This is used for the default implementation of [[transform()]].
|
||||
*
|
||||
* In Java, we have to make this method public since Java does not understand Scala's protected
|
||||
* modifier.
|
||||
|
|
|
@ -47,7 +47,7 @@ if __name__ == "__main__":
|
|||
# $example on$
|
||||
data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)
|
||||
|
||||
# Split data aproximately into training (60%) and test (40%)
|
||||
# Split data approximately into training (60%) and test (40%)
|
||||
training, test = data.randomSplit([0.6, 0.4], seed=0)
|
||||
|
||||
# Train a naive Bayes model.
|
||||
|
|
|
@ -47,7 +47,7 @@ if __name__ == "__main__":
|
|||
# Instantiate regression metrics to compare predicted and actual ratings
|
||||
metrics = RegressionMetrics(scoreAndLabels)
|
||||
|
||||
# Root mean sqaured error
|
||||
# Root mean squared error
|
||||
print("RMSE = %s" % metrics.rootMeanSquaredError)
|
||||
|
||||
# R-squared
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#
|
||||
|
||||
# This example uses text8 file from http://mattmahoney.net/dc/text8.zip
|
||||
# The file was downloadded, unziped and split into multiple lines using
|
||||
# The file was downloaded, unzipped and split into multiple lines using
|
||||
#
|
||||
# wget http://mattmahoney.net/dc/text8.zip
|
||||
# unzip text8.zip
|
||||
|
|
|
@ -30,7 +30,7 @@ import breeze.linalg.{DenseVector, Vector}
|
|||
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
|
||||
*/
|
||||
object LocalFileLR {
|
||||
val D = 10 // Numer of dimensions
|
||||
val D = 10 // Number of dimensions
|
||||
val rand = new Random(42)
|
||||
|
||||
case class DataPoint(x: Vector[Double], y: Double)
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.spark._
|
|||
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
|
||||
*/
|
||||
object SparkHdfsLR {
|
||||
val D = 10 // Numer of dimensions
|
||||
val D = 10 // Number of dimensions
|
||||
val rand = new Random(42)
|
||||
|
||||
case class DataPoint(x: Vector[Double], y: Double)
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.spark._
|
|||
*/
|
||||
object SparkLR {
|
||||
val N = 10000 // Number of data points
|
||||
val D = 10 // Numer of dimensions
|
||||
val D = 10 // Number of dimensions
|
||||
val R = 0.7 // Scaling factor
|
||||
val ITERATIONS = 5
|
||||
val rand = new Random(42)
|
||||
|
|
|
@ -60,10 +60,10 @@ object RDDRelation {
|
|||
// Write out an RDD as a parquet file with overwrite mode.
|
||||
df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")
|
||||
|
||||
// Read in parquet file. Parquet files are self-describing so the schmema is preserved.
|
||||
// Read in parquet file. Parquet files are self-describing so the schema is preserved.
|
||||
val parquetFile = sqlContext.read.parquet("pair.parquet")
|
||||
|
||||
// Queries can be run using the DSL on parequet files just like the original RDD.
|
||||
// Queries can be run using the DSL on parquet files just like the original RDD.
|
||||
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
|
||||
|
||||
// These files can also be registered as tables.
|
||||
|
|
|
@ -44,7 +44,7 @@ object TwitterPopularTags {
|
|||
val filters = args.takeRight(args.length - 4)
|
||||
|
||||
// Set the system properties so that Twitter4j library used by twitter stream
|
||||
// can use them to generat OAuth credentials
|
||||
// can use them to generate OAuth credentials
|
||||
System.setProperty("twitter4j.oauth.consumerKey", consumerKey)
|
||||
System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret)
|
||||
System.setProperty("twitter4j.oauth.accessToken", accessToken)
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.spark.SparkFunSuite
|
|||
|
||||
class EdgeSuite extends SparkFunSuite {
|
||||
test ("compare") {
|
||||
// decending order
|
||||
// descending order
|
||||
val testEdges: Array[Edge[Int]] = Array(
|
||||
Edge(0x7FEDCBA987654321L, -0x7FEDCBA987654321L, 1),
|
||||
Edge(0x2345L, 0x1234L, 1),
|
||||
|
|
|
@ -541,7 +541,7 @@ object PrefixSpan extends Logging {
|
|||
}
|
||||
|
||||
/**
|
||||
* Represents a frequence sequence.
|
||||
* Represents a frequent sequence.
|
||||
* @param sequence a sequence of itemsets stored as an Array of Arrays
|
||||
* @param freq frequency
|
||||
* @tparam Item item type
|
||||
|
|
|
@ -688,7 +688,7 @@ object Unidoc {
|
|||
"-noqualifier", "java.lang"
|
||||
),
|
||||
|
||||
// Use GitHub repository for Scaladoc source linke
|
||||
// Use GitHub repository for Scaladoc source links
|
||||
unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}",
|
||||
|
||||
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq(
|
||||
|
|
|
@ -127,7 +127,7 @@ class PrefixSpanModel(JavaModelWrapper):
|
|||
|
||||
@since("1.6.0")
|
||||
def freqSequences(self):
|
||||
"""Gets frequence sequences"""
|
||||
"""Gets frequent sequences"""
|
||||
return self.call("getFreqSequences").map(lambda x: PrefixSpan.FreqSequence(x[0], x[1]))
|
||||
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ import org.apache.spark.annotation.DeveloperApi
|
|||
* all variables defined by that code. To extract the result of an
|
||||
* interpreted line to show the user, a second "result object" is created
|
||||
* which imports the variables exported by the above object and then
|
||||
* exports members called "$eval" and "$print". To accomodate user expressions
|
||||
* exports members called "$eval" and "$print". To accommodate user expressions
|
||||
* that read from variables or methods defined in previous statements, "import"
|
||||
* statements are used.
|
||||
*
|
||||
|
@ -1515,7 +1515,7 @@ import org.apache.spark.annotation.DeveloperApi
|
|||
exprTyper.symbolOfLine(code)
|
||||
|
||||
/**
|
||||
* Constucts type information based on the provided expression's final
|
||||
* Constructs type information based on the provided expression's final
|
||||
* result or the definition provided.
|
||||
*
|
||||
* @param expr The expression or definition
|
||||
|
|
|
@ -29,7 +29,7 @@ private case object OracleDialect extends JdbcDialect {
|
|||
override def getCatalystType(
|
||||
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
|
||||
// Handle NUMBER fields that have no precision/scale in special way
|
||||
// because JDBC ResultSetMetaData converts this to 0 procision and -127 scale
|
||||
// because JDBC ResultSetMetaData converts this to 0 precision and -127 scale
|
||||
// For more details, please see
|
||||
// https://github.com/apache/spark/pull/8780#issuecomment-145598968
|
||||
// and
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.spark.sql.execution.streaming.{CompositeOffset, LongOffset, Of
|
|||
trait OffsetSuite extends SparkFunSuite {
|
||||
/** Creates test to check all the comparisons of offsets given a `one` that is less than `two`. */
|
||||
def compare(one: Offset, two: Offset): Unit = {
|
||||
test(s"comparision $one <=> $two") {
|
||||
test(s"comparison $one <=> $two") {
|
||||
assert(one < two)
|
||||
assert(one <= two)
|
||||
assert(one <= one)
|
||||
|
|
Loading…
Reference in a new issue