[SPARK-12327][SPARKR] fix code for lintr warning for commented code

shivaram

Author: felixcheung <felixcheung_m@hotmail.com>

Closes #10408 from felixcheung/rcodecomment.
This commit is contained in:
felixcheung 2016-01-03 20:53:35 +05:30 committed by Shivaram Venkataraman
parent 6c5bbd628a
commit c3d505602d
9 changed files with 88 additions and 11 deletions

View file

@ -1,2 +1,2 @@
linters: with_defaults(line_length_linter(100), camel_case_linter = NULL, open_curly_linter(allow_single_line = TRUE), closed_curly_linter(allow_single_line = TRUE), commented_code_linter = NULL)
linters: with_defaults(line_length_linter(100), camel_case_linter = NULL, open_curly_linter(allow_single_line = TRUE), closed_curly_linter(allow_single_line = TRUE))
exclusions: list("inst/profile/general.R" = 1, "inst/profile/shell.R")

View file

@ -180,7 +180,7 @@ setMethod("getJRDD", signature(rdd = "PipelinedRDD"),
}
# Save the serialization flag after we create a RRDD
rdd@env$serializedMode <- serializedMode
rdd@env$jrdd_val <- callJMethod(rddRef, "asJavaRDD") # rddRef$asJavaRDD()
rdd@env$jrdd_val <- callJMethod(rddRef, "asJavaRDD")
rdd@env$jrdd_val
})
@ -225,7 +225,7 @@ setMethod("cache",
#'
#' Persist this RDD with the specified storage level. For details of the
#' supported storage levels, refer to
#' http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence.
#'\url{http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence}.
#'
#' @param x The RDD to persist
#' @param newLevel The new storage level to be assigned
@ -382,11 +382,13 @@ setMethod("collectPartition",
#' \code{collectAsMap} returns a named list as a map that contains all of the elements
#' in a key-value pair RDD.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)), 2L)
#' collectAsMap(rdd) # list(`1` = 2, `3` = 4)
#'}
# nolint end
#' @rdname collect-methods
#' @aliases collectAsMap,RDD-method
#' @noRd
@ -442,11 +444,13 @@ setMethod("length",
#' @return list of (value, count) pairs, where count is number of each unique
#' value in rdd.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,3,2,1))
#' countByValue(rdd) # (1,2L), (2,2L), (3,1L)
#'}
# nolint end
#' @rdname countByValue
#' @aliases countByValue,RDD-method
#' @noRd
@ -597,11 +601,13 @@ setMethod("mapPartitionsWithIndex",
#' @param x The RDD to be filtered.
#' @param f A unary predicate function.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' unlist(collect(filterRDD(rdd, function (x) { x < 3 }))) # c(1, 2)
#'}
# nolint end
#' @rdname filterRDD
#' @aliases filterRDD,RDD,function-method
#' @noRd
@ -756,11 +762,13 @@ setMethod("foreachPartition",
#' @param x The RDD to take elements from
#' @param num Number of elements to take
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:10)
#' take(rdd, 2L) # list(1, 2)
#'}
# nolint end
#' @rdname take
#' @aliases take,RDD,numeric-method
#' @noRd
@ -824,11 +832,13 @@ setMethod("first",
#' @param x The RDD to remove duplicates from.
#' @param numPartitions Number of partitions to create.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, c(1,2,2,3,3,3))
#' sort(unlist(collect(distinct(rdd)))) # c(1, 2, 3)
#'}
# nolint end
#' @rdname distinct
#' @aliases distinct,RDD-method
#' @noRd
@ -974,11 +984,13 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical",
#' @param x The RDD.
#' @param func The function to be applied.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3))
#' collect(keyBy(rdd, function(x) { x*x })) # list(list(1, 1), list(4, 2), list(9, 3))
#'}
# nolint end
#' @rdname keyBy
#' @aliases keyBy,RDD
#' @noRd
@ -1113,11 +1125,13 @@ setMethod("saveAsTextFile",
#' @param numPartitions Number of partitions to create.
#' @return An RDD where all elements are sorted.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(3, 2, 1))
#' collect(sortBy(rdd, function(x) { x })) # list (1, 2, 3)
#'}
# nolint end
#' @rdname sortBy
#' @aliases sortBy,RDD,RDD-method
#' @noRd
@ -1188,11 +1202,13 @@ takeOrderedElem <- function(x, num, ascending = TRUE) {
#' @param num Number of elements to return.
#' @return The first N elements from the RDD in ascending order.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' takeOrdered(rdd, 6L) # list(1, 2, 3, 4, 5, 6)
#'}
# nolint end
#' @rdname takeOrdered
#' @aliases takeOrdered,RDD,RDD-method
#' @noRd
@ -1209,11 +1225,13 @@ setMethod("takeOrdered",
#' @return The top N elements from the RDD.
#' @rdname top
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
#' top(rdd, 6L) # list(10, 9, 7, 6, 5, 4)
#'}
# nolint end
#' @aliases top,RDD,RDD-method
#' @noRd
setMethod("top",
@ -1261,6 +1279,7 @@ setMethod("fold",
#' @rdname aggregateRDD
#' @seealso reduce
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(1, 2, 3, 4))
@ -1269,6 +1288,7 @@ setMethod("fold",
#' combOp <- function(x, y) { list(x[[1]] + y[[1]], x[[2]] + y[[2]]) }
#' aggregateRDD(rdd, zeroValue, seqOp, combOp) # list(10, 4)
#'}
# nolint end
#' @aliases aggregateRDD,RDD,RDD-method
#' @noRd
setMethod("aggregateRDD",
@ -1367,12 +1387,14 @@ setMethod("setName",
#' @return An RDD with zipped items.
#' @seealso zipWithIndex
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
#' collect(zipWithUniqueId(rdd))
#' # list(list("a", 0), list("b", 3), list("c", 1), list("d", 4), list("e", 2))
#'}
# nolint end
#' @rdname zipWithUniqueId
#' @aliases zipWithUniqueId,RDD
#' @noRd
@ -1408,12 +1430,14 @@ setMethod("zipWithUniqueId",
#' @return An RDD with zipped items.
#' @seealso zipWithUniqueId
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
#' collect(zipWithIndex(rdd))
#' # list(list("a", 0), list("b", 1), list("c", 2), list("d", 3), list("e", 4))
#'}
# nolint end
#' @rdname zipWithIndex
#' @aliases zipWithIndex,RDD
#' @noRd
@ -1454,12 +1478,14 @@ setMethod("zipWithIndex",
#' @return An RDD created by coalescing all elements within
#' each partition into a list.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, as.list(1:4), 2L)
#' collect(glom(rdd))
#' # list(list(1, 2), list(3, 4))
#'}
# nolint end
#' @rdname glom
#' @aliases glom,RDD
#' @noRd
@ -1519,6 +1545,7 @@ setMethod("unionRDD",
#' @param other Another RDD to be zipped.
#' @return An RDD zipped from the two RDDs.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, 0:4)
@ -1526,6 +1553,7 @@ setMethod("unionRDD",
#' collect(zipRDD(rdd1, rdd2))
#' # list(list(0, 1000), list(1, 1001), list(2, 1002), list(3, 1003), list(4, 1004))
#'}
# nolint end
#' @rdname zipRDD
#' @aliases zipRDD,RDD
#' @noRd
@ -1557,12 +1585,14 @@ setMethod("zipRDD",
#' @param other An RDD.
#' @return A new RDD which is the Cartesian product of these two RDDs.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, 1:2)
#' sortByKey(cartesian(rdd, rdd))
#' # list(list(1, 1), list(1, 2), list(2, 1), list(2, 2))
#'}
# nolint end
#' @rdname cartesian
#' @aliases cartesian,RDD,RDD-method
#' @noRd
@ -1587,6 +1617,7 @@ setMethod("cartesian",
#' @param numPartitions Number of the partitions in the result RDD.
#' @return An RDD with the elements from this that are not in other.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(1, 1, 2, 2, 3, 4))
@ -1594,6 +1625,7 @@ setMethod("cartesian",
#' collect(subtract(rdd1, rdd2))
#' # list(1, 1, 3)
#'}
# nolint end
#' @rdname subtract
#' @aliases subtract,RDD
#' @noRd
@ -1619,6 +1651,7 @@ setMethod("subtract",
#' @param numPartitions The number of partitions in the result RDD.
#' @return An RDD which is the intersection of these two RDDs.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(1, 10, 2, 3, 4, 5))
@ -1626,6 +1659,7 @@ setMethod("subtract",
#' collect(sortBy(intersection(rdd1, rdd2), function(x) { x }))
#' # list(1, 2, 3)
#'}
# nolint end
#' @rdname intersection
#' @aliases intersection,RDD
#' @noRd
@ -1653,6 +1687,7 @@ setMethod("intersection",
#' Assumes that all the RDDs have the *same number of partitions*, but
#' does *not* require them to have the same number of elements in each partition.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, 1:2, 2L) # 1, 2
@ -1662,6 +1697,7 @@ setMethod("intersection",
#' func = function(x, y, z) { list(list(x, y, z))} ))
#' # list(list(1, c(1,2), c(1,2,3)), list(2, c(3,4), c(4,5,6)))
#'}
# nolint end
#' @rdname zipRDD
#' @aliases zipPartitions,RDD
#' @noRd

View file

@ -17,6 +17,7 @@
# Utility functions to deserialize objects from Java.
# nolint start
# Type mapping from Java to R
#
# void -> NULL
@ -32,6 +33,8 @@
#
# Array[T] -> list()
# Object -> jobj
#
# nolint end
readObject <- function(con) {
# Read type first

View file

@ -30,12 +30,14 @@ NULL
#' @param key The key to look up for
#' @return a list of values in this RDD for key key
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' pairs <- list(c(1, 1), c(2, 2), c(1, 3))
#' rdd <- parallelize(sc, pairs)
#' lookup(rdd, 1) # list(1, 3)
#'}
# nolint end
#' @rdname lookup
#' @aliases lookup,RDD-method
#' @noRd
@ -58,11 +60,13 @@ setMethod("lookup",
#' @param x The RDD to count keys.
#' @return list of (key, count) pairs, where count is number of each key in rdd.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(c("a", 1), c("b", 1), c("a", 1)))
#' countByKey(rdd) # ("a", 2L), ("b", 1L)
#'}
# nolint end
#' @rdname countByKey
#' @aliases countByKey,RDD-method
#' @noRd
@ -77,11 +81,13 @@ setMethod("countByKey",
#'
#' @param x The RDD from which the keys of each tuple is returned.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)))
#' collect(keys(rdd)) # list(1, 3)
#'}
# nolint end
#' @rdname keys
#' @aliases keys,RDD
#' @noRd
@ -98,11 +104,13 @@ setMethod("keys",
#'
#' @param x The RDD from which the values of each tuple is returned.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)))
#' collect(values(rdd)) # list(2, 4)
#'}
# nolint end
#' @rdname values
#' @aliases values,RDD
#' @noRd
@ -348,6 +356,7 @@ setMethod("reduceByKey",
#' @return A list of elements of type list(K, V') where V' is the merged value for each key
#' @seealso reduceByKey
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' pairs <- list(list(1, 2), list(1.1, 3), list(1, 4))
@ -355,6 +364,7 @@ setMethod("reduceByKey",
#' reduced <- reduceByKeyLocally(rdd, "+")
#' reduced # list(list(1, 6), list(1.1, 3))
#'}
# nolint end
#' @rdname reduceByKeyLocally
#' @aliases reduceByKeyLocally,RDD,integer-method
#' @noRd
@ -412,6 +422,7 @@ setMethod("reduceByKeyLocally",
#' @return An RDD where each element is list(K, C) where C is the combined type
#' @seealso groupByKey, reduceByKey
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' pairs <- list(list(1, 2), list(1.1, 3), list(1, 4))
@ -420,6 +431,7 @@ setMethod("reduceByKeyLocally",
#' combined <- collect(parts)
#' combined[[1]] # Should be a list(1, 6)
#'}
# nolint end
#' @rdname combineByKey
#' @aliases combineByKey,RDD,ANY,ANY,ANY,integer-method
#' @noRd
@ -473,6 +485,7 @@ setMethod("combineByKey",
#' @return An RDD containing the aggregation result.
#' @seealso foldByKey, combineByKey
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 1), list(1, 2), list(2, 3), list(2, 4)))
@ -482,6 +495,7 @@ setMethod("combineByKey",
#' aggregateByKey(rdd, zeroValue, seqOp, combOp, 2L)
#' # list(list(1, list(3, 2)), list(2, list(7, 2)))
#'}
# nolint end
#' @rdname aggregateByKey
#' @aliases aggregateByKey,RDD,ANY,ANY,ANY,integer-method
#' @noRd
@ -509,11 +523,13 @@ setMethod("aggregateByKey",
#' @return An RDD containing the aggregation result.
#' @seealso aggregateByKey, combineByKey
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(1, 1), list(1, 2), list(2, 3), list(2, 4)))
#' foldByKey(rdd, 0, "+", 2L) # list(list(1, 3), list(2, 7))
#'}
# nolint end
#' @rdname foldByKey
#' @aliases foldByKey,RDD,ANY,ANY,integer-method
#' @noRd
@ -540,12 +556,14 @@ setMethod("foldByKey",
#' @return a new RDD containing all pairs of elements with matching keys in
#' two input RDDs.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list(1, 1), list(2, 4)))
#' rdd2 <- parallelize(sc, list(list(1, 2), list(1, 3)))
#' join(rdd1, rdd2, 2L) # list(list(1, list(1, 2)), list(1, list(1, 3))
#'}
# nolint end
#' @rdname join-methods
#' @aliases join,RDD,RDD-method
#' @noRd
@ -578,6 +596,7 @@ setMethod("join",
#' all pairs (k, (v, w)) for (k, w) in rdd2, or the pair (k, (v, NULL))
#' if no elements in rdd2 have key k.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list(1, 1), list(2, 4)))
@ -585,6 +604,7 @@ setMethod("join",
#' leftOuterJoin(rdd1, rdd2, 2L)
#' # list(list(1, list(1, 2)), list(1, list(1, 3)), list(2, list(4, NULL)))
#'}
# nolint end
#' @rdname join-methods
#' @aliases leftOuterJoin,RDD,RDD-method
#' @noRd
@ -616,6 +636,7 @@ setMethod("leftOuterJoin",
#' all pairs (k, (v, w)) for (k, v) in x, or the pair (k, (NULL, w))
#' if no elements in x have key k.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list(1, 2), list(1, 3)))
@ -623,6 +644,7 @@ setMethod("leftOuterJoin",
#' rightOuterJoin(rdd1, rdd2, 2L)
#' # list(list(1, list(2, 1)), list(1, list(3, 1)), list(2, list(NULL, 4)))
#'}
# nolint end
#' @rdname join-methods
#' @aliases rightOuterJoin,RDD,RDD-method
#' @noRd
@ -655,6 +677,7 @@ setMethod("rightOuterJoin",
#' (k, w) in y, or the pair (k, (NULL, w))/(k, (v, NULL)) if no elements
#' in x/y have key k.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list(1, 2), list(1, 3), list(3, 3)))
@ -664,6 +687,7 @@ setMethod("rightOuterJoin",
#' # list(2, list(NULL, 4)))
#' # list(3, list(3, NULL)),
#'}
# nolint end
#' @rdname join-methods
#' @aliases fullOuterJoin,RDD,RDD-method
#' @noRd
@ -688,6 +712,7 @@ setMethod("fullOuterJoin",
#' @return a new RDD containing all pairs of elements with values in a list
#' in all RDDs.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list(1, 1), list(2, 4)))
@ -695,6 +720,7 @@ setMethod("fullOuterJoin",
#' cogroup(rdd1, rdd2, numPartitions = 2L)
#' # list(list(1, list(1, list(2, 3))), list(2, list(list(4), list()))
#'}
# nolint end
#' @rdname cogroup
#' @aliases cogroup,RDD-method
#' @noRd
@ -740,11 +766,13 @@ setMethod("cogroup",
#' @param numPartitions Number of partitions to create.
#' @return An RDD where all (k, v) pair elements are sorted.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd <- parallelize(sc, list(list(3, 1), list(2, 2), list(1, 3)))
#' collect(sortByKey(rdd)) # list (list(1, 3), list(2, 2), list(3, 1))
#'}
# nolint end
#' @rdname sortByKey
#' @aliases sortByKey,RDD,RDD-method
#' @noRd
@ -805,6 +833,7 @@ setMethod("sortByKey",
#' @param numPartitions Number of the partitions in the result RDD.
#' @return An RDD with the pairs from x whose keys are not in other.
#' @examples
# nolint start
#'\dontrun{
#' sc <- sparkR.init()
#' rdd1 <- parallelize(sc, list(list("a", 1), list("b", 4),
@ -813,6 +842,7 @@ setMethod("sortByKey",
#' collect(subtractByKey(rdd1, rdd2))
#' # list(list("b", 4), list("b", 5))
#'}
# nolint end
#' @rdname subtractByKey
#' @aliases subtractByKey,RDD
#' @noRd

View file

@ -17,6 +17,7 @@
# Utility functions to serialize R objects so they can be read in Java.
# nolint start
# Type mapping from R to Java
#
# NULL -> Void
@ -31,6 +32,7 @@
# list[T] -> Array[T], where T is one of above mentioned types
# environment -> Map[String, T], where T is a native type
# jobj -> Object, where jobj is an object created in the backend
# nolint end
getSerdeType <- function(object) {
type <- class(object)[[1]]

View file

@ -223,14 +223,14 @@ test_that("takeSample() on RDDs", {
s <- takeSample(data, TRUE, 100L, seed)
expect_equal(length(s), 100L)
# Chance of getting all distinct elements is astronomically low, so test we
# got < 100
# got less than 100
expect_true(length(unique(s)) < 100L)
}
for (seed in 4:5) {
s <- takeSample(data, TRUE, 200L, seed)
expect_equal(length(s), 200L)
# Chance of getting all distinct elements is still quite low, so test we
# got < 100
# got less than 100
expect_true(length(unique(s)) < 100L)
}
})

View file

@ -176,8 +176,8 @@ test_that("partitionBy() partitions data correctly", {
resultRDD <- partitionBy(numPairsRdd, 2L, partitionByMagnitude)
expected_first <- list(list(1, 100), list(2, 200)) # key < 3
expected_second <- list(list(4, -1), list(3, 1), list(3, 0)) # key >= 3
expected_first <- list(list(1, 100), list(2, 200)) # key less than 3
expected_second <- list(list(4, -1), list(3, 1), list(3, 0)) # key greater than or equal 3
actual_first <- collectPartition(resultRDD, 0L)
actual_second <- collectPartition(resultRDD, 1L)

View file

@ -498,9 +498,11 @@ test_that("table() returns a new DataFrame", {
expect_equal(count(tabledf), 3)
dropTempTable(sqlContext, "table1")
# nolint start
# Test base::table is working
#a <- letters[1:3]
#expect_equal(class(table(a, sample(a))), "table")
# nolint end
})
test_that("toRDD() returns an RRDD", {
@ -766,8 +768,10 @@ test_that("sample on a DataFrame", {
sampled3 <- sample_frac(df, FALSE, 0.1, 0) # set seed for predictable result
expect_true(count(sampled3) < 3)
# nolint start
# Test base::sample is working
#expect_equal(length(sample(1:12)), 12)
# nolint end
})
test_that("select operators", {
@ -1052,8 +1056,8 @@ test_that("string operators", {
df2 <- createDataFrame(sqlContext, l2)
expect_equal(collect(select(df2, locate("aa", df2$a)))[1, 1], 1)
expect_equal(collect(select(df2, locate("aa", df2$a, 1)))[1, 1], 2)
expect_equal(collect(select(df2, lpad(df2$a, 8, "#")))[1, 1], "###aaads")
expect_equal(collect(select(df2, rpad(df2$a, 8, "#")))[1, 1], "aaads###")
expect_equal(collect(select(df2, lpad(df2$a, 8, "#")))[1, 1], "###aaads") # nolint
expect_equal(collect(select(df2, rpad(df2$a, 8, "#")))[1, 1], "aaads###") # nolint
l3 <- list(list(a = "a.b.c.d"))
df3 <- createDataFrame(sqlContext, l3)
@ -1259,7 +1263,7 @@ test_that("filter() on a DataFrame", {
expect_equal(count(filtered6), 2)
# Test stats::filter is working
#expect_true(is.ts(filter(1:100, rep(1, 3))))
#expect_true(is.ts(filter(1:100, rep(1, 3)))) # nolint
})
test_that("join() and merge() on a DataFrame", {
@ -1659,7 +1663,7 @@ test_that("cov() and corr() on a DataFrame", {
expect_true(abs(result - 1.0) < 1e-12)
# Test stats::cov is working
#expect_true(abs(max(cov(swiss)) - 1739.295) < 1e-3)
#expect_true(abs(max(cov(swiss)) - 1739.295) < 1e-3) # nolint
})
test_that("freqItems() on a DataFrame", {

View file

@ -95,7 +95,9 @@ test_that("cleanClosure on R functions", {
# TODO(shivaram): length(ls(env)) is 4 here for some reason and `lapply` is included in `env`.
# Disabling this test till we debug this.
#
# nolint start
# expect_equal(length(ls(env)), 3) # Only "g", "l" and "f". No "base", "field" or "defUse".
# nolint end
expect_true("g" %in% ls(env))
expect_true("l" %in% ls(env))
expect_true("f" %in% ls(env))