spark-instrumented-optimizer/R/pkg/NAMESPACE
Burak Yavuz 1146c534d6 [SPARK-14353] Dataset Time Window window API for R
## What changes were proposed in this pull request?

The `window` function was added to Dataset with [this PR](https://github.com/apache/spark/pull/12008).
This PR adds the R API for this function.

With this PR, SQL, Java, and Scala will share the same APIs as in users can use:
 - `window(timeColumn, windowDuration)`
 - `window(timeColumn, windowDuration, slideDuration)`
 - `window(timeColumn, windowDuration, slideDuration, startTime)`

In Python and R, users can access all APIs above, but in addition they can do
 - In R:
   `window(timeColumn, windowDuration, startTime=...)`

that is, they can provide the startTime without providing the `slideDuration`. In this case, we will generate tumbling windows.

## How was this patch tested?

Unit tests + manual tests

Author: Burak Yavuz <brkyvz@gmail.com>

Closes #12141 from brkyvz/R-windows.
2016-04-05 17:21:41 -07:00

305 lines
7.1 KiB
Plaintext

# Imports from base R
importFrom(methods, setGeneric, setMethod, setOldClass)
# Disable native libraries till we figure out how to package it
# See SPARKR-7839
#useDynLib(SparkR, stringHashCode)
# S3 methods exported
export("sparkR.init")
export("sparkR.stop")
export("print.jobj")
# MLlib integration
exportMethods("glm",
"predict",
"summary",
"kmeans",
"fitted",
"naiveBayes",
"survreg")
# Job group lifecycle management methods
export("setJobGroup",
"clearJobGroup",
"cancelJobGroup")
exportClasses("DataFrame")
exportMethods("arrange",
"as.data.frame",
"attach",
"cache",
"collect",
"colnames",
"colnames<-",
"coltypes",
"coltypes<-",
"columns",
"count",
"cov",
"corr",
"covar_samp",
"covar_pop",
"crosstab",
"describe",
"dim",
"distinct",
"drop",
"dropDuplicates",
"dropna",
"dtypes",
"except",
"explain",
"fillna",
"filter",
"first",
"freqItems",
"group_by",
"groupBy",
"head",
"insertInto",
"intersect",
"isLocal",
"join",
"limit",
"merge",
"mutate",
"na.omit",
"names",
"names<-",
"ncol",
"nrow",
"orderBy",
"persist",
"printSchema",
"rbind",
"registerTempTable",
"rename",
"repartition",
"sample",
"sample_frac",
"sampleBy",
"saveAsParquetFile",
"saveAsTable",
"saveDF",
"schema",
"select",
"selectExpr",
"show",
"showDF",
"subset",
"summarize",
"summary",
"take",
"transform",
"unionAll",
"unique",
"unpersist",
"where",
"with",
"withColumn",
"withColumnRenamed",
"write.df",
"write.json",
"write.parquet",
"write.text")
exportClasses("Column")
exportMethods("%in%",
"abs",
"acos",
"add_months",
"alias",
"approxCountDistinct",
"approxQuantile",
"array_contains",
"asc",
"ascii",
"asin",
"atan",
"atan2",
"avg",
"base64",
"between",
"bin",
"bitwiseNOT",
"cast",
"cbrt",
"ceil",
"ceiling",
"column",
"concat",
"concat_ws",
"contains",
"conv",
"cos",
"cosh",
"count",
"countDistinct",
"crc32",
"hash",
"cume_dist",
"date_add",
"date_format",
"date_sub",
"datediff",
"dayofmonth",
"dayofyear",
"decode",
"dense_rank",
"desc",
"encode",
"endsWith",
"exp",
"explode",
"expm1",
"expr",
"factorial",
"first",
"floor",
"format_number",
"format_string",
"from_unixtime",
"from_utc_timestamp",
"getField",
"getItem",
"greatest",
"hex",
"hour",
"hypot",
"ifelse",
"initcap",
"instr",
"isNaN",
"isNotNull",
"isNull",
"kurtosis",
"lag",
"last",
"last_day",
"lead",
"least",
"length",
"levenshtein",
"like",
"lit",
"locate",
"log",
"log10",
"log1p",
"log2",
"lower",
"lpad",
"ltrim",
"max",
"md5",
"mean",
"min",
"minute",
"month",
"months_between",
"n",
"n_distinct",
"nanvl",
"negate",
"next_day",
"ntile",
"otherwise",
"percent_rank",
"pmod",
"quarter",
"rand",
"randn",
"rank",
"regexp_extract",
"regexp_replace",
"reverse",
"rint",
"rlike",
"round",
"row_number",
"rpad",
"rtrim",
"second",
"sha1",
"sha2",
"shiftLeft",
"shiftRight",
"shiftRightUnsigned",
"sd",
"sign",
"signum",
"sin",
"sinh",
"size",
"skewness",
"sort_array",
"soundex",
"stddev",
"stddev_pop",
"stddev_samp",
"struct",
"sqrt",
"startsWith",
"substr",
"substring_index",
"sum",
"sumDistinct",
"tan",
"tanh",
"toDegrees",
"toRadians",
"to_date",
"to_utc_timestamp",
"translate",
"trim",
"unbase64",
"unhex",
"unix_timestamp",
"upper",
"var",
"variance",
"var_pop",
"var_samp",
"weekofyear",
"when",
"window",
"year")
exportClasses("GroupedData")
exportMethods("agg")
export("sparkRSQL.init",
"sparkRHive.init")
export("as.DataFrame",
"cacheTable",
"clearCache",
"createDataFrame",
"createExternalTable",
"dropTempTable",
"jsonFile",
"loadDF",
"parquetFile",
"read.df",
"read.json",
"read.parquet",
"read.text",
"sql",
"str",
"tableToDF",
"tableNames",
"tables",
"uncacheTable")
export("structField",
"structField.jobj",
"structField.character",
"print.structField",
"structType",
"structType.jobj",
"structType.structField",
"print.structType")