2015-07-20 23:49:38 -04:00
|
|
|
#
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
|
|
|
|
library(testthat)
|
|
|
|
|
|
|
|
context("MLlib functions")
|
|
|
|
|
|
|
|
# Tests for MLlib functions in SparkR
|
2016-07-17 22:02:21 -04:00
|
|
|
sparkSession <- sparkR.session(enableHiveSupport = FALSE)
|
2015-07-20 23:49:38 -04:00
|
|
|
|
2016-04-30 02:13:03 -04:00
|
|
|
test_that("formula of spark.glm", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
# directly calling the spark API
|
|
|
|
# dot minus and intercept vs native glm
|
|
|
|
model <- spark.glm(training, Sepal_Width ~ . - Species + 0)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ . - Species + 0, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
|
|
|
|
# feature interaction vs native glm
|
|
|
|
model <- spark.glm(training, Sepal_Width ~ Species:Sepal_Length)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ Species:Sepal.Length, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
|
|
|
|
# glm should work with long formula
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
training$LongLongLongLongLongName <- training$Sepal_Width
|
|
|
|
training$VeryLongLongLongLonLongName <- training$Sepal_Length
|
|
|
|
training$AnotherLongLongLongLongName <- training$Species
|
|
|
|
model <- spark.glm(training, LongLongLongLongLongName ~ VeryLongLongLongLonLongName +
|
|
|
|
AnotherLongLongLongLongName)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ Sepal.Length + Species, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
})
|
|
|
|
|
|
|
|
test_that("spark.glm and predict", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
# gaussian family
|
|
|
|
model <- spark.glm(training, Sepal_Width ~ Sepal_Length + Species)
|
|
|
|
prediction <- predict(model, training)
|
|
|
|
expect_equal(typeof(take(select(prediction, "prediction"), 1)$prediction), "double")
|
|
|
|
vals <- collect(select(prediction, "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ Sepal.Length + Species, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
|
|
|
|
# poisson family
|
|
|
|
model <- spark.glm(training, Sepal_Width ~ Sepal_Length + Species,
|
|
|
|
family = poisson(link = identity))
|
|
|
|
prediction <- predict(model, training)
|
|
|
|
expect_equal(typeof(take(select(prediction, "prediction"), 1)$prediction), "double")
|
|
|
|
vals <- collect(select(prediction, "prediction"))
|
|
|
|
rVals <- suppressWarnings(predict(glm(Sepal.Width ~ Sepal.Length + Species,
|
|
|
|
data = iris, family = poisson(link = identity)), iris))
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
|
|
|
|
# Test stats::predict is working
|
|
|
|
x <- rnorm(15)
|
|
|
|
y <- x + rnorm(15)
|
|
|
|
expect_equal(length(predict(lm(y ~ x))), 15)
|
|
|
|
})
|
|
|
|
|
|
|
|
test_that("spark.glm summary", {
|
|
|
|
# gaussian family
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
stats <- summary(spark.glm(training, Sepal_Width ~ Sepal_Length + Species))
|
|
|
|
|
|
|
|
rStats <- summary(glm(Sepal.Width ~ Sepal.Length + Species, data = iris))
|
|
|
|
|
|
|
|
coefs <- unlist(stats$coefficients)
|
|
|
|
rCoefs <- unlist(rStats$coefficients)
|
|
|
|
expect_true(all(abs(rCoefs - coefs) < 1e-4))
|
|
|
|
expect_true(all(
|
|
|
|
rownames(stats$coefficients) ==
|
|
|
|
c("(Intercept)", "Sepal_Length", "Species_versicolor", "Species_virginica")))
|
|
|
|
expect_equal(stats$dispersion, rStats$dispersion)
|
|
|
|
expect_equal(stats$null.deviance, rStats$null.deviance)
|
|
|
|
expect_equal(stats$deviance, rStats$deviance)
|
|
|
|
expect_equal(stats$df.null, rStats$df.null)
|
|
|
|
expect_equal(stats$df.residual, rStats$df.residual)
|
|
|
|
expect_equal(stats$aic, rStats$aic)
|
|
|
|
|
|
|
|
# binomial family
|
2016-05-26 14:20:20 -04:00
|
|
|
df <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
training <- df[df$Species %in% c("versicolor", "virginica"), ]
|
|
|
|
stats <- summary(spark.glm(training, Species ~ Sepal_Length + Sepal_Width,
|
|
|
|
family = binomial(link = "logit")))
|
|
|
|
|
|
|
|
rTraining <- iris[iris$Species %in% c("versicolor", "virginica"), ]
|
|
|
|
rStats <- summary(glm(Species ~ Sepal.Length + Sepal.Width, data = rTraining,
|
|
|
|
family = binomial(link = "logit")))
|
|
|
|
|
|
|
|
coefs <- unlist(stats$coefficients)
|
|
|
|
rCoefs <- unlist(rStats$coefficients)
|
|
|
|
expect_true(all(abs(rCoefs - coefs) < 1e-4))
|
|
|
|
expect_true(all(
|
|
|
|
rownames(stats$coefficients) ==
|
|
|
|
c("(Intercept)", "Sepal_Length", "Sepal_Width")))
|
|
|
|
expect_equal(stats$dispersion, rStats$dispersion)
|
|
|
|
expect_equal(stats$null.deviance, rStats$null.deviance)
|
|
|
|
expect_equal(stats$deviance, rStats$deviance)
|
|
|
|
expect_equal(stats$df.null, rStats$df.null)
|
|
|
|
expect_equal(stats$df.residual, rStats$df.residual)
|
|
|
|
expect_equal(stats$aic, rStats$aic)
|
|
|
|
|
2016-08-10 13:53:48 -04:00
|
|
|
# Test spark.glm works with weighted dataset
|
|
|
|
a1 <- c(0, 1, 2, 3)
|
|
|
|
a2 <- c(5, 2, 1, 3)
|
|
|
|
w <- c(1, 2, 3, 4)
|
|
|
|
b <- c(1, 0, 1, 0)
|
|
|
|
data <- as.data.frame(cbind(a1, a2, w, b))
|
|
|
|
df <- suppressWarnings(createDataFrame(data))
|
|
|
|
|
|
|
|
stats <- summary(spark.glm(df, b ~ a1 + a2, family = "binomial", weightCol = "w"))
|
|
|
|
rStats <- summary(glm(b ~ a1 + a2, family = "binomial", data = data, weights = w))
|
|
|
|
|
|
|
|
coefs <- unlist(stats$coefficients)
|
|
|
|
rCoefs <- unlist(rStats$coefficients)
|
|
|
|
expect_true(all(abs(rCoefs - coefs) < 1e-3))
|
|
|
|
expect_true(all(rownames(stats$coefficients) == c("(Intercept)", "a1", "a2")))
|
|
|
|
expect_equal(stats$dispersion, rStats$dispersion)
|
|
|
|
expect_equal(stats$null.deviance, rStats$null.deviance)
|
|
|
|
expect_equal(stats$deviance, rStats$deviance)
|
|
|
|
expect_equal(stats$df.null, rStats$df.null)
|
|
|
|
expect_equal(stats$df.residual, rStats$df.residual)
|
|
|
|
expect_equal(stats$aic, rStats$aic)
|
|
|
|
|
2016-04-30 02:13:03 -04:00
|
|
|
# Test summary works on base GLM models
|
|
|
|
baseModel <- stats::glm(Sepal.Width ~ Sepal.Length + Species, data = iris)
|
|
|
|
baseSummary <- summary(baseModel)
|
|
|
|
expect_true(abs(baseSummary$deviance - 12.19313) < 1e-4)
|
|
|
|
})
|
|
|
|
|
|
|
|
test_that("spark.glm save/load", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-30 02:13:03 -04:00
|
|
|
m <- spark.glm(training, Sepal_Width ~ Sepal_Length + Species)
|
|
|
|
s <- summary(m)
|
|
|
|
|
2016-04-30 11:37:56 -04:00
|
|
|
modelPath <- tempfile(pattern = "spark-glm", fileext = ".tmp")
|
2016-04-30 03:45:44 -04:00
|
|
|
write.ml(m, modelPath)
|
|
|
|
expect_error(write.ml(m, modelPath))
|
|
|
|
write.ml(m, modelPath, overwrite = TRUE)
|
|
|
|
m2 <- read.ml(modelPath)
|
2016-04-30 02:13:03 -04:00
|
|
|
s2 <- summary(m2)
|
|
|
|
|
|
|
|
expect_equal(s$coefficients, s2$coefficients)
|
|
|
|
expect_equal(rownames(s$coefficients), rownames(s2$coefficients))
|
|
|
|
expect_equal(s$dispersion, s2$dispersion)
|
|
|
|
expect_equal(s$null.deviance, s2$null.deviance)
|
|
|
|
expect_equal(s$deviance, s2$deviance)
|
|
|
|
expect_equal(s$df.null, s2$df.null)
|
|
|
|
expect_equal(s$df.residual, s2$df.residual)
|
|
|
|
expect_equal(s$aic, s2$aic)
|
|
|
|
expect_equal(s$iter, s2$iter)
|
|
|
|
expect_true(!s$is.loaded)
|
|
|
|
expect_true(s2$is.loaded)
|
|
|
|
|
|
|
|
unlink(modelPath)
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
test_that("formula of glm", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-12 13:51:07 -04:00
|
|
|
# dot minus and intercept vs native glm
|
|
|
|
model <- glm(Sepal_Width ~ . - Species + 0, data = training)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ . - Species + 0, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
[SPARK-11339][SPARKR] Document the list of functions in R base package that are masked by functions with same name in SparkR
Added tests for function that are reported as masked, to make sure the base:: or stats:: function can be called.
For those we can't call, added them to SparkR programming guide.
It would seem to me `table, sample, subset, filter, cov` not working are not actually expected - I investigated/experimented with them but couldn't get them to work. It looks like as they are defined in base or stats they are missing the S3 generic, eg.
```
> methods("transform")
[1] transform,ANY-method transform.data.frame
[3] transform,DataFrame-method transform.default
see '?methods' for accessing help and source code
> methods("subset")
[1] subset.data.frame subset,DataFrame-method subset.default
[4] subset.matrix
see '?methods' for accessing help and source code
Warning message:
In .S3methods(generic.function, class, parent.frame()) :
function 'subset' appears not to be S3 generic; found functions that look like S3 methods
```
Any idea?
More information on masking:
http://www.ats.ucla.edu/stat/r/faq/referencing_objects.htm
http://www.sfu.ca/~sweldon/howTo/guide4.pdf
This is what the output doc looks like (minus css):
![image](https://cloud.githubusercontent.com/assets/8969467/11229714/2946e5de-8d4d-11e5-94b0-dda9696b6fdd.png)
Author: felixcheung <felixcheung_m@hotmail.com>
Closes #9785 from felixcheung/rmasked.
2015-11-19 02:32:49 -05:00
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
# feature interaction vs native glm
|
|
|
|
model <- glm(Sepal_Width ~ Species:Sepal_Length, data = training)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ Species:Sepal.Length, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
2015-07-20 23:49:38 -04:00
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
# glm should work with long formula
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2015-11-05 19:34:10 -05:00
|
|
|
training$LongLongLongLongLongName <- training$Sepal_Width
|
|
|
|
training$VeryLongLongLongLonLongName <- training$Sepal_Length
|
|
|
|
training$AnotherLongLongLongLongName <- training$Species
|
|
|
|
model <- glm(LongLongLongLongLongName ~ VeryLongLongLongLonLongName + AnotherLongLongLongLongName,
|
|
|
|
data = training)
|
|
|
|
vals <- collect(select(predict(model, training), "prediction"))
|
|
|
|
rVals <- predict(glm(Sepal.Width ~ Sepal.Length + Species, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
|
|
|
})
|
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
test_that("glm and predict", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-12 13:51:07 -04:00
|
|
|
# gaussian family
|
2015-07-27 20:17:49 -04:00
|
|
|
model <- glm(Sepal_Width ~ Sepal_Length + Species, data = training)
|
2016-04-12 13:51:07 -04:00
|
|
|
prediction <- predict(model, training)
|
|
|
|
expect_equal(typeof(take(select(prediction, "prediction"), 1)$prediction), "double")
|
|
|
|
vals <- collect(select(prediction, "prediction"))
|
2015-07-27 20:17:49 -04:00
|
|
|
rVals <- predict(glm(Sepal.Width ~ Sepal.Length + Species, data = iris), iris)
|
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
2015-07-28 17:16:57 -04:00
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
# poisson family
|
|
|
|
model <- glm(Sepal_Width ~ Sepal_Length + Species, data = training,
|
|
|
|
family = poisson(link = identity))
|
|
|
|
prediction <- predict(model, training)
|
|
|
|
expect_equal(typeof(take(select(prediction, "prediction"), 1)$prediction), "double")
|
|
|
|
vals <- collect(select(prediction, "prediction"))
|
|
|
|
rVals <- suppressWarnings(predict(glm(Sepal.Width ~ Sepal.Length + Species,
|
|
|
|
data = iris, family = poisson(link = identity)), iris))
|
2015-09-25 03:43:22 -04:00
|
|
|
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
|
2015-11-10 00:06:01 -05:00
|
|
|
|
2016-04-12 13:51:07 -04:00
|
|
|
# Test stats::predict is working
|
|
|
|
x <- rnorm(15)
|
|
|
|
y <- x + rnorm(15)
|
|
|
|
expect_equal(length(predict(lm(y ~ x))), 15)
|
2015-11-10 00:06:01 -05:00
|
|
|
})
|
2016-02-23 18:42:58 -05:00
|
|
|
|
2016-04-15 11:23:51 -04:00
|
|
|
test_that("glm summary", {
|
|
|
|
# gaussian family
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-15 11:23:51 -04:00
|
|
|
stats <- summary(glm(Sepal_Width ~ Sepal_Length + Species, data = training))
|
|
|
|
|
|
|
|
rStats <- summary(glm(Sepal.Width ~ Sepal.Length + Species, data = iris))
|
|
|
|
|
|
|
|
coefs <- unlist(stats$coefficients)
|
|
|
|
rCoefs <- unlist(rStats$coefficients)
|
|
|
|
expect_true(all(abs(rCoefs - coefs) < 1e-4))
|
|
|
|
expect_true(all(
|
|
|
|
rownames(stats$coefficients) ==
|
|
|
|
c("(Intercept)", "Sepal_Length", "Species_versicolor", "Species_virginica")))
|
|
|
|
expect_equal(stats$dispersion, rStats$dispersion)
|
|
|
|
expect_equal(stats$null.deviance, rStats$null.deviance)
|
|
|
|
expect_equal(stats$deviance, rStats$deviance)
|
|
|
|
expect_equal(stats$df.null, rStats$df.null)
|
|
|
|
expect_equal(stats$df.residual, rStats$df.residual)
|
|
|
|
expect_equal(stats$aic, rStats$aic)
|
|
|
|
|
|
|
|
# binomial family
|
2016-05-26 14:20:20 -04:00
|
|
|
df <- suppressWarnings(createDataFrame(iris))
|
2016-04-15 11:23:51 -04:00
|
|
|
training <- df[df$Species %in% c("versicolor", "virginica"), ]
|
|
|
|
stats <- summary(glm(Species ~ Sepal_Length + Sepal_Width, data = training,
|
|
|
|
family = binomial(link = "logit")))
|
|
|
|
|
|
|
|
rTraining <- iris[iris$Species %in% c("versicolor", "virginica"), ]
|
|
|
|
rStats <- summary(glm(Species ~ Sepal.Length + Sepal.Width, data = rTraining,
|
|
|
|
family = binomial(link = "logit")))
|
|
|
|
|
|
|
|
coefs <- unlist(stats$coefficients)
|
|
|
|
rCoefs <- unlist(rStats$coefficients)
|
|
|
|
expect_true(all(abs(rCoefs - coefs) < 1e-4))
|
|
|
|
expect_true(all(
|
|
|
|
rownames(stats$coefficients) ==
|
|
|
|
c("(Intercept)", "Sepal_Length", "Sepal_Width")))
|
|
|
|
expect_equal(stats$dispersion, rStats$dispersion)
|
|
|
|
expect_equal(stats$null.deviance, rStats$null.deviance)
|
|
|
|
expect_equal(stats$deviance, rStats$deviance)
|
|
|
|
expect_equal(stats$df.null, rStats$df.null)
|
|
|
|
expect_equal(stats$df.residual, rStats$df.residual)
|
|
|
|
expect_equal(stats$aic, rStats$aic)
|
|
|
|
|
|
|
|
# Test summary works on base GLM models
|
|
|
|
baseModel <- stats::glm(Sepal.Width ~ Sepal.Length + Species, data = iris)
|
|
|
|
baseSummary <- summary(baseModel)
|
|
|
|
expect_true(abs(baseSummary$deviance - 12.19313) < 1e-4)
|
|
|
|
})
|
|
|
|
|
2016-04-29 12:42:54 -04:00
|
|
|
test_that("glm save/load", {
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(iris))
|
2016-04-29 12:42:54 -04:00
|
|
|
m <- glm(Sepal_Width ~ Sepal_Length + Species, data = training)
|
|
|
|
s <- summary(m)
|
|
|
|
|
|
|
|
modelPath <- tempfile(pattern = "glm", fileext = ".tmp")
|
2016-04-30 03:45:44 -04:00
|
|
|
write.ml(m, modelPath)
|
|
|
|
expect_error(write.ml(m, modelPath))
|
|
|
|
write.ml(m, modelPath, overwrite = TRUE)
|
|
|
|
m2 <- read.ml(modelPath)
|
2016-04-29 12:42:54 -04:00
|
|
|
s2 <- summary(m2)
|
|
|
|
|
|
|
|
expect_equal(s$coefficients, s2$coefficients)
|
|
|
|
expect_equal(rownames(s$coefficients), rownames(s2$coefficients))
|
|
|
|
expect_equal(s$dispersion, s2$dispersion)
|
|
|
|
expect_equal(s$null.deviance, s2$null.deviance)
|
|
|
|
expect_equal(s$deviance, s2$deviance)
|
|
|
|
expect_equal(s$df.null, s2$df.null)
|
|
|
|
expect_equal(s$df.residual, s2$df.residual)
|
|
|
|
expect_equal(s$aic, s2$aic)
|
|
|
|
expect_equal(s$iter, s2$iter)
|
|
|
|
expect_true(!s$is.loaded)
|
|
|
|
expect_true(s2$is.loaded)
|
|
|
|
|
|
|
|
unlink(modelPath)
|
|
|
|
})
|
|
|
|
|
2016-04-30 02:13:03 -04:00
|
|
|
test_that("spark.kmeans", {
|
2016-02-23 18:42:58 -05:00
|
|
|
newIris <- iris
|
|
|
|
newIris$Species <- NULL
|
2016-05-26 14:20:20 -04:00
|
|
|
training <- suppressWarnings(createDataFrame(newIris))
|
2016-02-23 18:42:58 -05:00
|
|
|
|
|
|
|
take(training, 1)
|
|
|
|
|
2016-06-21 11:31:15 -04:00
|
|
|
model <- spark.kmeans(data = training, ~ ., k = 2, maxIter = 10, initMode = "random")
|
2016-02-23 18:42:58 -05:00
|
|
|
sample <- take(select(predict(model, training), "prediction"), 1)
|
|
|
|
expect_equal(typeof(sample$prediction), "integer")
|
|
|
|
expect_equal(sample$prediction, 1)
|
|
|
|
|
|
|
|
# Test stats::kmeans is working
|
|
|
|
statsModel <- kmeans(x = newIris, centers = 2)
|
2016-02-24 10:05:20 -05:00
|
|
|
expect_equal(sort(unique(statsModel$cluster)), c(1, 2))
|
2016-02-23 18:42:58 -05:00
|
|
|
|
|
|
|
# Test fitted works on KMeans
|
|
|
|
fitted.model <- fitted(model)
|
|
|
|
expect_equal(sort(collect(distinct(select(fitted.model, "prediction")))$prediction), c(0, 1))
|
|
|
|
|
|
|
|
# Test summary works on KMeans
|
|
|
|
summary.model <- summary(model)
|
|
|
|
cluster <- summary.model$cluster
|
|
|
|
expect_equal(sort(collect(distinct(select(cluster, "prediction")))$prediction), c(0, 1))
|
2016-04-29 12:42:54 -04:00
|
|
|
|
|
|
|
# Test model save/load
|
2016-04-30 11:37:56 -04:00
|
|
|
modelPath <- tempfile(pattern = "spark-kmeans", fileext = ".tmp")
|
2016-04-30 03:45:44 -04:00
|
|
|
write.ml(model, modelPath)
|
|
|
|
expect_error(write.ml(model, modelPath))
|
|
|
|
write.ml(model, modelPath, overwrite = TRUE)
|
|
|
|
model2 <- read.ml(modelPath)
|
2016-04-29 12:42:54 -04:00
|
|
|
summary2 <- summary(model2)
|
|
|
|
expect_equal(sort(unlist(summary.model$size)), sort(unlist(summary2$size)))
|
|
|
|
expect_equal(summary.model$coefficients, summary2$coefficients)
|
|
|
|
expect_true(!summary.model$is.loaded)
|
|
|
|
expect_true(summary2$is.loaded)
|
|
|
|
|
|
|
|
unlink(modelPath)
|
2016-02-23 18:42:58 -05:00
|
|
|
})
|
2016-03-22 17:16:51 -04:00
|
|
|
|
2016-04-30 11:37:56 -04:00
|
|
|
test_that("spark.naiveBayes", {
|
2016-03-22 17:16:51 -04:00
|
|
|
# R code to reproduce the result.
|
|
|
|
# We do not support instance weights yet. So we ignore the frequencies.
|
|
|
|
#
|
|
|
|
#' library(e1071)
|
|
|
|
#' t <- as.data.frame(Titanic)
|
|
|
|
#' t1 <- t[t$Freq > 0, -5]
|
|
|
|
#' m <- naiveBayes(Survived ~ ., data = t1)
|
|
|
|
#' m
|
|
|
|
#' predict(m, t1)
|
|
|
|
#
|
|
|
|
# -- output of 'm'
|
|
|
|
#
|
|
|
|
# A-priori probabilities:
|
|
|
|
# Y
|
|
|
|
# No Yes
|
|
|
|
# 0.4166667 0.5833333
|
|
|
|
#
|
|
|
|
# Conditional probabilities:
|
|
|
|
# Class
|
|
|
|
# Y 1st 2nd 3rd Crew
|
|
|
|
# No 0.2000000 0.2000000 0.4000000 0.2000000
|
|
|
|
# Yes 0.2857143 0.2857143 0.2857143 0.1428571
|
|
|
|
#
|
|
|
|
# Sex
|
|
|
|
# Y Male Female
|
|
|
|
# No 0.5 0.5
|
|
|
|
# Yes 0.5 0.5
|
|
|
|
#
|
|
|
|
# Age
|
|
|
|
# Y Child Adult
|
|
|
|
# No 0.2000000 0.8000000
|
|
|
|
# Yes 0.4285714 0.5714286
|
|
|
|
#
|
|
|
|
# -- output of 'predict(m, t1)'
|
|
|
|
#
|
|
|
|
# Yes Yes Yes Yes No No Yes Yes No No Yes Yes Yes Yes Yes Yes Yes Yes No No Yes Yes No No
|
|
|
|
#
|
|
|
|
|
|
|
|
t <- as.data.frame(Titanic)
|
|
|
|
t1 <- t[t$Freq > 0, -5]
|
2016-05-26 14:20:20 -04:00
|
|
|
df <- suppressWarnings(createDataFrame(t1))
|
2016-06-21 11:31:15 -04:00
|
|
|
m <- spark.naiveBayes(df, Survived ~ ., smoothing = 0.0)
|
2016-03-22 17:16:51 -04:00
|
|
|
s <- summary(m)
|
|
|
|
expect_equal(as.double(s$apriori[1, "Yes"]), 0.5833333, tolerance = 1e-6)
|
|
|
|
expect_equal(sum(s$apriori), 1)
|
|
|
|
expect_equal(as.double(s$tables["Yes", "Age_Adult"]), 0.5714286, tolerance = 1e-6)
|
|
|
|
p <- collect(select(predict(m, df), "prediction"))
|
|
|
|
expect_equal(p$prediction, c("Yes", "Yes", "Yes", "Yes", "No", "No", "Yes", "Yes", "No", "No",
|
|
|
|
"Yes", "Yes", "Yes", "Yes", "Yes", "Yes", "Yes", "Yes", "No", "No",
|
|
|
|
"Yes", "Yes", "No", "No"))
|
|
|
|
|
2016-04-25 17:08:41 -04:00
|
|
|
# Test model save/load
|
2016-04-30 11:37:56 -04:00
|
|
|
modelPath <- tempfile(pattern = "spark-naiveBayes", fileext = ".tmp")
|
2016-04-30 03:45:44 -04:00
|
|
|
write.ml(m, modelPath)
|
|
|
|
expect_error(write.ml(m, modelPath))
|
|
|
|
write.ml(m, modelPath, overwrite = TRUE)
|
|
|
|
m2 <- read.ml(modelPath)
|
2016-04-25 17:08:41 -04:00
|
|
|
s2 <- summary(m2)
|
|
|
|
expect_equal(s$apriori, s2$apriori)
|
|
|
|
expect_equal(s$tables, s2$tables)
|
|
|
|
|
|
|
|
unlink(modelPath)
|
|
|
|
|
2016-03-22 17:16:51 -04:00
|
|
|
# Test e1071::naiveBayes
|
|
|
|
if (requireNamespace("e1071", quietly = TRUE)) {
|
|
|
|
expect_that(m <- e1071::naiveBayes(Survived ~ ., data = t1), not(throws_error()))
|
|
|
|
expect_equal(as.character(predict(m, t1[1, ])), "Yes")
|
|
|
|
}
|
|
|
|
})
|
2016-03-25 01:29:34 -04:00
|
|
|
|
2016-04-30 02:13:03 -04:00
|
|
|
test_that("spark.survreg", {
|
2016-03-25 01:29:34 -04:00
|
|
|
# R code to reproduce the result.
|
|
|
|
#
|
|
|
|
#' rData <- list(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0),
|
|
|
|
#' x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1))
|
|
|
|
#' library(survival)
|
|
|
|
#' model <- survreg(Surv(time, status) ~ x + sex, rData)
|
|
|
|
#' summary(model)
|
|
|
|
#' predict(model, data)
|
|
|
|
#
|
|
|
|
# -- output of 'summary(model)'
|
|
|
|
#
|
|
|
|
# Value Std. Error z p
|
|
|
|
# (Intercept) 1.315 0.270 4.88 1.07e-06
|
|
|
|
# x -0.190 0.173 -1.10 2.72e-01
|
|
|
|
# sex -0.253 0.329 -0.77 4.42e-01
|
|
|
|
# Log(scale) -1.160 0.396 -2.93 3.41e-03
|
|
|
|
#
|
|
|
|
# -- output of 'predict(model, data)'
|
|
|
|
#
|
|
|
|
# 1 2 3 4 5 6 7
|
|
|
|
# 3.724591 2.545368 3.079035 3.079035 2.390146 2.891269 2.891269
|
|
|
|
#
|
|
|
|
data <- list(list(4, 1, 0, 0), list(3, 1, 2, 0), list(1, 1, 1, 0),
|
|
|
|
list(1, 0, 1, 0), list(2, 1, 1, 1), list(2, 1, 0, 1), list(3, 0, 0, 1))
|
2016-05-26 14:20:20 -04:00
|
|
|
df <- createDataFrame(data, c("time", "status", "x", "sex"))
|
2016-04-30 02:13:03 -04:00
|
|
|
model <- spark.survreg(df, Surv(time, status) ~ x + sex)
|
2016-03-25 01:29:34 -04:00
|
|
|
stats <- summary(model)
|
|
|
|
coefs <- as.vector(stats$coefficients[, 1])
|
|
|
|
rCoefs <- c(1.3149571, -0.1903409, -0.2532618, -1.1599800)
|
|
|
|
expect_equal(coefs, rCoefs, tolerance = 1e-4)
|
|
|
|
expect_true(all(
|
|
|
|
rownames(stats$coefficients) ==
|
|
|
|
c("(Intercept)", "x", "sex", "Log(scale)")))
|
|
|
|
p <- collect(select(predict(model, df), "prediction"))
|
|
|
|
expect_equal(p$prediction, c(3.724591, 2.545368, 3.079035, 3.079035,
|
|
|
|
2.390146, 2.891269, 2.891269), tolerance = 1e-4)
|
|
|
|
|
2016-04-26 13:30:24 -04:00
|
|
|
# Test model save/load
|
2016-04-30 11:37:56 -04:00
|
|
|
modelPath <- tempfile(pattern = "spark-survreg", fileext = ".tmp")
|
2016-04-30 03:45:44 -04:00
|
|
|
write.ml(model, modelPath)
|
|
|
|
expect_error(write.ml(model, modelPath))
|
|
|
|
write.ml(model, modelPath, overwrite = TRUE)
|
|
|
|
model2 <- read.ml(modelPath)
|
2016-04-26 13:30:24 -04:00
|
|
|
stats2 <- summary(model2)
|
|
|
|
coefs2 <- as.vector(stats2$coefficients[, 1])
|
|
|
|
expect_equal(coefs, coefs2)
|
|
|
|
expect_equal(rownames(stats$coefficients), rownames(stats2$coefficients))
|
|
|
|
|
|
|
|
unlink(modelPath)
|
|
|
|
|
2016-03-25 01:29:34 -04:00
|
|
|
# Test survival::survreg
|
|
|
|
if (requireNamespace("survival", quietly = TRUE)) {
|
|
|
|
rData <- list(time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0),
|
|
|
|
x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1))
|
2016-05-03 12:29:49 -04:00
|
|
|
expect_error(
|
2016-03-25 01:29:34 -04:00
|
|
|
model <- survival::survreg(formula = survival::Surv(time, status) ~ x + sex, data = rData),
|
2016-05-03 12:29:49 -04:00
|
|
|
NA)
|
2016-03-25 01:29:34 -04:00
|
|
|
expect_equal(predict(model, rData)[[1]], 3.724591, tolerance = 1e-4)
|
|
|
|
}
|
|
|
|
})
|
2016-07-17 22:02:21 -04:00
|
|
|
|
2016-08-17 09:15:04 -04:00
|
|
|
test_that("spark.isotonicRegression", {
|
|
|
|
label <- c(7.0, 5.0, 3.0, 5.0, 1.0)
|
|
|
|
feature <- c(0.0, 1.0, 2.0, 3.0, 4.0)
|
|
|
|
weight <- c(1.0, 1.0, 1.0, 1.0, 1.0)
|
|
|
|
data <- as.data.frame(cbind(label, feature, weight))
|
|
|
|
df <- suppressWarnings(createDataFrame(data))
|
|
|
|
|
|
|
|
model <- spark.isoreg(df, label ~ feature, isotonic = FALSE,
|
|
|
|
weightCol = "weight")
|
|
|
|
# only allow one variable on the right hand side of the formula
|
|
|
|
expect_error(model2 <- spark.isoreg(df, ~., isotonic = FALSE))
|
|
|
|
result <- summary(model, df)
|
|
|
|
expect_equal(result$predictions, list(7, 5, 4, 4, 1))
|
|
|
|
|
|
|
|
# Test model prediction
|
|
|
|
predict_data <- list(list(-2.0), list(-1.0), list(0.5),
|
|
|
|
list(0.75), list(1.0), list(2.0), list(9.0))
|
|
|
|
predict_df <- createDataFrame(predict_data, c("feature"))
|
|
|
|
predict_result <- collect(select(predict(model, predict_df), "prediction"))
|
|
|
|
expect_equal(predict_result$prediction, c(7.0, 7.0, 6.0, 5.5, 5.0, 4.0, 1.0))
|
|
|
|
|
|
|
|
# Test model save/load
|
|
|
|
modelPath <- tempfile(pattern = "spark-isotonicRegression", fileext = ".tmp")
|
|
|
|
write.ml(model, modelPath)
|
|
|
|
expect_error(write.ml(model, modelPath))
|
|
|
|
write.ml(model, modelPath, overwrite = TRUE)
|
|
|
|
model2 <- read.ml(modelPath)
|
|
|
|
expect_equal(result, summary(model2, df))
|
|
|
|
|
|
|
|
unlink(modelPath)
|
|
|
|
})
|
|
|
|
|
2016-07-17 22:02:21 -04:00
|
|
|
sparkR.session.stop()
|