dc4c351837
## What changes were proposed in this pull request? Move all existing tests to non-installed directory so that it will never run by installing SparkR package For a follow-up PR: - remove all skip_on_cran() calls in tests - clean up test timer - improve or change basic tests that do run on CRAN (if anyone has suggestion) It looks like `R CMD build pkg` will still put pkg\tests (ie. the full tests) into the source package but `R CMD INSTALL` on such source package does not install these tests (and so `R CMD check` does not run them) ## How was this patch tested? - [x] unit tests, Jenkins - [x] AppVeyor - [x] make a source package, install it, `R CMD check` it - verify the full tests are not installed or run Author: Felix Cheung <felixcheung_m@hotmail.com> Closes #18264 from felixcheung/rtestset.
86 lines
2.6 KiB
R
86 lines
2.6 KiB
R
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
library(testthat)
|
|
|
|
context("MLlib frequent pattern mining")
|
|
|
|
# Tests for MLlib frequent pattern mining algorithms in SparkR
|
|
sparkSession <- sparkR.session(master = sparkRTestMaster, enableHiveSupport = FALSE)
|
|
|
|
test_that("spark.fpGrowth", {
|
|
data <- selectExpr(createDataFrame(data.frame(items = c(
|
|
"1,2",
|
|
"1,2",
|
|
"1,2,3",
|
|
"1,3"
|
|
))), "split(items, ',') as items")
|
|
|
|
model <- spark.fpGrowth(data, minSupport = 0.3, minConfidence = 0.8, numPartitions = 1)
|
|
|
|
itemsets <- collect(spark.freqItemsets(model))
|
|
|
|
expected_itemsets <- data.frame(
|
|
items = I(list(list("3"), list("3", "1"), list("2"), list("2", "1"), list("1"))),
|
|
freq = c(2, 2, 3, 3, 4)
|
|
)
|
|
|
|
expect_equivalent(expected_itemsets, itemsets)
|
|
|
|
expected_association_rules <- data.frame(
|
|
antecedent = I(list(list("2"), list("3"))),
|
|
consequent = I(list(list("1"), list("1"))),
|
|
confidence = c(1, 1)
|
|
)
|
|
|
|
expect_equivalent(expected_association_rules, collect(spark.associationRules(model)))
|
|
|
|
new_data <- selectExpr(createDataFrame(data.frame(items = c(
|
|
"1,2",
|
|
"1,3",
|
|
"2,3"
|
|
))), "split(items, ',') as items")
|
|
|
|
expected_predictions <- data.frame(
|
|
items = I(list(list("1", "2"), list("1", "3"), list("2", "3"))),
|
|
prediction = I(list(list(), list(), list("1")))
|
|
)
|
|
|
|
expect_equivalent(expected_predictions, collect(predict(model, new_data)))
|
|
|
|
if (not_cran_or_windows_with_hadoop()) {
|
|
modelPath <- tempfile(pattern = "spark-fpm", fileext = ".tmp")
|
|
write.ml(model, modelPath, overwrite = TRUE)
|
|
loaded_model <- read.ml(modelPath)
|
|
|
|
expect_equivalent(
|
|
itemsets,
|
|
collect(spark.freqItemsets(loaded_model)))
|
|
|
|
unlink(modelPath)
|
|
}
|
|
|
|
model_without_numpartitions <- spark.fpGrowth(data, minSupport = 0.3, minConfidence = 0.8)
|
|
expect_equal(
|
|
count(spark.freqItemsets(model_without_numpartitions)),
|
|
count(spark.freqItemsets(model))
|
|
)
|
|
|
|
})
|
|
|
|
sparkR.session.stop()
|