8c198e246d
## What changes were proposed in this pull request? This PR introduces the new SparkSession API for SparkR. `sparkR.session.getOrCreate()` and `sparkR.session.stop()` "getOrCreate" is a bit unusual in R but it's important to name this clearly. SparkR implementation should - SparkSession is the main entrypoint (vs SparkContext; due to limited functionality supported with SparkContext in SparkR) - SparkSession replaces SQLContext and HiveContext (both a wrapper around SparkSession, and because of API changes, supporting all 3 would be a lot more work) - Changes to SparkSession is mostly transparent to users due to SPARK-10903 - Full backward compatibility is expected - users should be able to initialize everything just in Spark 1.6.1 (`sparkR.init()`), but with deprecation warning - Mostly cosmetic changes to parameter list - users should be able to move to `sparkR.session.getOrCreate()` easily - An advanced syntax with named parameters (aka varargs aka "...") is supported; that should be closer to the Builder syntax that is in Scala/Python (which unfortunately does not work in R because it will look like this: `enableHiveSupport(config(config(master(appName(builder(), "foo"), "local"), "first", "value"), "next, "value"))` - Updating config on an existing SparkSession is supported, the behavior is the same as Python, in which config is applied to both SparkContext and SparkSession - Some SparkSession changes are not matched in SparkR, mostly because it would be breaking API change: `catalog` object, `createOrReplaceTempView` - Other SQLContext workarounds are replicated in SparkR, eg. `tables`, `tableNames` - `sparkR` shell is updated to use the SparkSession entrypoint (`sqlContext` is removed, just like with Scale/Python) - All tests are updated to use the SparkSession entrypoint - A bug in `read.jdbc` is fixed TODO - [x] Add more tests - [ ] Separate PR - update all roxygen2 doc coding example - [ ] Separate PR - update SparkR programming guide ## How was this patch tested? unit tests, manual tests shivaram sun-rui rxin Author: Felix Cheung <felixcheung_m@hotmail.com> Author: felixcheung <felixcheung_m@hotmail.com> Closes #13635 from felixcheung/rsparksession.
91 lines
3.1 KiB
R
91 lines
3.1 KiB
R
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
context("functions on binary files")
|
|
|
|
# JavaSparkContext handle
|
|
sparkSession <- sparkR.session()
|
|
sc <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "getJavaSparkContext", sparkSession)
|
|
|
|
mockFile <- c("Spark is pretty.", "Spark is awesome.")
|
|
|
|
test_that("saveAsObjectFile()/objectFile() following textFile() works", {
|
|
fileName1 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
fileName2 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
writeLines(mockFile, fileName1)
|
|
|
|
rdd <- textFile(sc, fileName1, 1)
|
|
saveAsObjectFile(rdd, fileName2)
|
|
rdd <- objectFile(sc, fileName2)
|
|
expect_equal(collect(rdd), as.list(mockFile))
|
|
|
|
unlink(fileName1)
|
|
unlink(fileName2, recursive = TRUE)
|
|
})
|
|
|
|
test_that("saveAsObjectFile()/objectFile() works on a parallelized list", {
|
|
fileName <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
|
|
l <- list(1, 2, 3)
|
|
rdd <- parallelize(sc, l, 1)
|
|
saveAsObjectFile(rdd, fileName)
|
|
rdd <- objectFile(sc, fileName)
|
|
expect_equal(collect(rdd), l)
|
|
|
|
unlink(fileName, recursive = TRUE)
|
|
})
|
|
|
|
test_that("saveAsObjectFile()/objectFile() following RDD transformations works", {
|
|
fileName1 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
fileName2 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
writeLines(mockFile, fileName1)
|
|
|
|
rdd <- textFile(sc, fileName1)
|
|
|
|
words <- flatMap(rdd, function(line) { strsplit(line, " ")[[1]] })
|
|
wordCount <- lapply(words, function(word) { list(word, 1L) })
|
|
|
|
counts <- reduceByKey(wordCount, "+", 2L)
|
|
|
|
saveAsObjectFile(counts, fileName2)
|
|
counts <- objectFile(sc, fileName2)
|
|
|
|
output <- collect(counts)
|
|
expected <- list(list("awesome.", 1), list("Spark", 2), list("pretty.", 1),
|
|
list("is", 2))
|
|
expect_equal(sortKeyValueList(output), sortKeyValueList(expected))
|
|
|
|
unlink(fileName1)
|
|
unlink(fileName2, recursive = TRUE)
|
|
})
|
|
|
|
test_that("saveAsObjectFile()/objectFile() works with multiple paths", {
|
|
fileName1 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
fileName2 <- tempfile(pattern = "spark-test", fileext = ".tmp")
|
|
|
|
rdd1 <- parallelize(sc, "Spark is pretty.")
|
|
saveAsObjectFile(rdd1, fileName1)
|
|
rdd2 <- parallelize(sc, "Spark is awesome.")
|
|
saveAsObjectFile(rdd2, fileName2)
|
|
|
|
rdd <- objectFile(sc, c(fileName1, fileName2))
|
|
expect_equal(count(rdd), 2)
|
|
|
|
unlink(fileName1, recursive = TRUE)
|
|
unlink(fileName2, recursive = TRUE)
|
|
})
|