[MINOR][DOC] Fix typo

## What changes were proposed in this pull request?

This PR fixes typo regarding `auxiliary verb + verb[s]`. This is a follow-on of #21956.

## How was this patch tested?

N/A

Closes #22040 from kiszk/spellcheck1.

Authored-by: Kazuaki Ishizaki <ishizaki@jp.ibm.com>
Signed-off-by: hyukjinkwon <gurwls223@apache.org>
This commit is contained in:
Kazuaki Ishizaki 2018-08-09 20:10:17 +08:00 committed by hyukjinkwon
parent 519e03d82e
commit 56e9e97073
18 changed files with 19 additions and 19 deletions

View file

@ -662,7 +662,7 @@ public final class BytesToBytesMap extends MemoryConsumer {
* It is only valid to call this method immediately after calling `lookup()` using the same key.
* </p>
* <p>
* The key and value must be word-aligned (that is, their sizes must multiples of 8).
* The key and value must be word-aligned (that is, their sizes must be a multiple of 8).
* </p>
* <p>
* After calling this method, calls to `get[Key|Value]Address()` and `get[Key|Value]Length`

View file

@ -51,7 +51,7 @@ final class UnsafeSorterSpillMerger {
if (spillReader.hasNext()) {
// We only add the spillReader to the priorityQueue if it is not empty. We do this to
// make sure the hasNext method of UnsafeSorterIterator returned by getSortedIterator
// does not return wrong result because hasNext will returns true
// does not return wrong result because hasNext will return true
// at least priorityQueue.size() times. If we allow n spillReaders in the
// priorityQueue, we will have n extra empty records in the result of UnsafeSorterIterator.
spillReader.loadNext();

View file

@ -107,7 +107,7 @@ class SparkHadoopUtil extends Logging {
}
/**
* Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
* Return an appropriate (subclass) of Configuration. Creating config can initialize some Hadoop
* subsystems.
*/
def newConfiguration(conf: SparkConf): Configuration = {

View file

@ -30,7 +30,7 @@ import org.apache.spark.api.java.*;
import org.apache.spark.*;
/**
* Java apps can uses both Java-friendly JavaSparkContext and Scala SparkContext.
* Java apps can use both Java-friendly JavaSparkContext and Scala SparkContext.
*/
public class JavaSparkContextSuite implements Serializable {

View file

@ -155,7 +155,7 @@ private[kafka010] case class InternalKafkaConsumer(
var toFetchOffset = offset
var consumerRecord: ConsumerRecord[Array[Byte], Array[Byte]] = null
// We want to break out of the while loop on a successful fetch to avoid using "return"
// which may causes a NonLocalReturnControl exception when this method is used as a function.
// which may cause a NonLocalReturnControl exception when this method is used as a function.
var isFetchComplete = false
while (toFetchOffset != UNKNOWN_OFFSET && !isFetchComplete) {

View file

@ -1484,7 +1484,7 @@ sealed trait LogisticRegressionSummary extends Serializable {
/**
* Convenient method for casting to binary logistic regression summary.
* This method will throws an Exception if the summary is not a binary summary.
* This method will throw an Exception if the summary is not a binary summary.
*/
@Since("2.3.0")
def asBinary: BinaryLogisticRegressionSummary = this match {

View file

@ -206,7 +206,7 @@ class DecimalType(FractionalType):
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must less or equal to precision.
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).

View file

@ -286,7 +286,7 @@ object DecimalPrecision extends TypeCoercionRule {
// Consider the following example: multiplying a column which is DECIMAL(38, 18) by 2.
// If we use the default precision and scale for the integer type, 2 is considered a
// DECIMAL(10, 0). According to the rules, the result would be DECIMAL(38 + 10 + 1, 18),
// which is out of range and therefore it will becomes DECIMAL(38, 7), leading to
// which is out of range and therefore it will become DECIMAL(38, 7), leading to
// potentially loosing 11 digits of the fractional part. Using only the precision needed
// by the Literal, instead, the result would be DECIMAL(38 + 1 + 1, 18), which would
// become DECIMAL(38, 16), safely having a much lower precision loss.

View file

@ -44,7 +44,7 @@ object CodegenObjectFactoryMode extends Enumeration {
/**
* A codegen object generator which creates objects with codegen path first. Once any compile
* error happens, it can fallbacks to interpreted implementation. In tests, we can use a SQL config
* error happens, it can fallback to interpreted implementation. In tests, we can use a SQL config
* `SQLConf.CODEGEN_FACTORY_MODE` to control fallback behavior.
*/
abstract class CodeGeneratorWithInterpretedFallback[IN, OUT] {

View file

@ -26,7 +26,7 @@ import org.apache.spark.sql.types.AbstractDataType
* This trait is typically used by operator expressions (e.g. [[Add]], [[Subtract]]) to define
* expected input types without any implicit casting.
*
* Most function expressions (e.g. [[Substring]] should extends [[ImplicitCastInputTypes]]) instead.
* Most function expressions (e.g. [[Substring]] should extend [[ImplicitCastInputTypes]]) instead.
*/
trait ExpectsInputTypes extends Expression {

View file

@ -766,7 +766,7 @@ class UnsupportedOperationsSuite extends SparkFunSuite {
*
* To test this correctly, the given logical plan is wrapped in a fake operator that makes the
* whole plan look like a streaming plan. Otherwise, a batch plan may throw not supported
* exception simply for not being a streaming plan, even though that plan could exists as batch
* exception simply for not being a streaming plan, even though that plan could exist as batch
* subplan inside some streaming plan.
*/
def assertSupportedInStreamingPlan(
@ -793,7 +793,7 @@ class UnsupportedOperationsSuite extends SparkFunSuite {
*
* To test this correctly, the given logical plan is wrapped in a fake operator that makes the
* whole plan look like a streaming plan. Otherwise, a batch plan may throw not supported
* exception simply for not being a streaming plan, even though that plan could exists as batch
* exception simply for not being a streaming plan, even though that plan could exist as batch
* subplan inside some streaming plan.
*/
def assertNotSupportedInStreamingPlan(

View file

@ -144,7 +144,7 @@ class EncoderResolutionSuite extends PlanTest {
// It should pass analysis
val bound = encoder.resolveAndBind(attrs)
// If no null values appear, it should works fine
// If no null values appear, it should work fine
bound.fromRow(InternalRow(new GenericArrayData(Array(1, 2))))
// If there is null value, it should throw runtime exception

View file

@ -110,7 +110,7 @@ object SQLMetrics {
* spill size, etc.
*/
def createSizeMetric(sc: SparkContext, name: String): SQLMetric = {
// The final result of this metric in physical operator UI may looks like:
// The final result of this metric in physical operator UI may look like:
// data size total (min, med, max):
// 100GB (100MB, 1GB, 10GB)
val acc = new SQLMetric(SIZE_METRIC, -1)

View file

@ -50,7 +50,7 @@ class FileStreamSource(
@transient private val fs = new Path(path).getFileSystem(hadoopConf)
private val qualifiedBasePath: Path = {
fs.makeQualified(new Path(path)) // can contains glob patterns
fs.makeQualified(new Path(path)) // can contain glob patterns
}
private val optionsWithPartitionBasePath = sourceOptions.optionMapWithoutPath ++ {

View file

@ -312,7 +312,7 @@ trait ProgressReporter extends Logging {
// DataSourceV2ScanExec records the number of rows it has read using SQLMetrics. However,
// just collecting all DataSourceV2ScanExec nodes and getting the metric is not correct as
// a DataSourceV2ScanExec instance may be referred to in the execution plan from two (or
// even multiple times) points and considering it twice will leads to double counting. We
// even multiple times) points and considering it twice will lead to double counting. We
// can't dedup them using their hashcode either because two different instances of
// DataSourceV2ScanExec can have the same hashcode but account for separate sets of
// records read, and deduping them to consider only one of them would be undercounting the

View file

@ -76,7 +76,7 @@ private[sql] trait SQLTestUtils extends SparkFunSuite with SQLTestUtilsBase with
/**
* Disable stdout and stderr when running the test. To not output the logs to the console,
* ConsoleAppender's `follow` should be set to `true` so that it will honors reassignments of
* ConsoleAppender's `follow` should be set to `true` so that it will honor reassignments of
* System.out or System.err. Otherwise, ConsoleAppender will still output to the console even if
* we change System.out and System.err.
*/

View file

@ -30,7 +30,7 @@ import org.apache.spark.sql.execution.command.DataWritingCommand
/**
* Create table and insert the query result into it.
*
* @param tableDesc the Table Describe, which may contains serde, storage handler etc.
* @param tableDesc the Table Describe, which may contain serde, storage handler etc.
* @param query the query whose result will be insert into the new relation
* @param mode SaveMode
*/

View file

@ -84,7 +84,7 @@ class HiveQuerySuite extends HiveComparisonTest with SQLTestUtils with BeforeAnd
}
// Testing the Broadcast based join for cartesian join (cross join)
// We assume that the Broadcast Join Threshold will works since the src is a small table
// We assume that the Broadcast Join Threshold will work since the src is a small table
private val spark_10484_1 = """
| SELECT a.key, b.key
| FROM src a LEFT JOIN src b WHERE a.key > b.key + 300