spark-instrumented-optimizer/R/pkg/NAMESPACE
Oscar D. Lara Yejas 47735cdc2a [SPARK-10863][SPARKR] Method coltypes() (New version)
This is a follow up on PR #8984, as the corresponding branch for such PR was damaged.

Author: Oscar D. Lara Yejas <olarayej@mail.usf.edu>

Closes #9579 from olarayej/SPARK-10863_NEW14.
2015-11-10 11:07:57 -08:00

267 lines
6.2 KiB
Plaintext

# Imports from base R
importFrom(methods, setGeneric, setMethod, setOldClass)
# Disable native libraries till we figure out how to package it
# See SPARKR-7839
#useDynLib(SparkR, stringHashCode)
# S3 methods exported
export("sparkR.init")
export("sparkR.stop")
export("print.jobj")
# MLlib integration
exportMethods("glm",
"predict",
"summary")
# Job group lifecycle management methods
export("setJobGroup",
"clearJobGroup",
"cancelJobGroup")
exportClasses("DataFrame")
exportMethods("arrange",
"as.data.frame",
"attach",
"cache",
"collect",
"coltypes",
"columns",
"count",
"cov",
"corr",
"crosstab",
"describe",
"dim",
"distinct",
"dropna",
"dtypes",
"except",
"explain",
"fillna",
"filter",
"first",
"freqItems",
"group_by",
"groupBy",
"head",
"insertInto",
"intersect",
"isLocal",
"join",
"limit",
"merge",
"mutate",
"na.omit",
"names",
"ncol",
"nrow",
"orderBy",
"persist",
"printSchema",
"rbind",
"registerTempTable",
"rename",
"repartition",
"sample",
"sample_frac",
"sampleBy",
"saveAsParquetFile",
"saveAsTable",
"saveDF",
"schema",
"select",
"selectExpr",
"show",
"showDF",
"subset",
"summarize",
"summary",
"take",
"transform",
"unionAll",
"unique",
"unpersist",
"where",
"with",
"withColumn",
"withColumnRenamed",
"write.df")
exportClasses("Column")
exportMethods("%in%",
"abs",
"acos",
"add_months",
"alias",
"approxCountDistinct",
"asc",
"ascii",
"asin",
"atan",
"atan2",
"avg",
"base64",
"between",
"bin",
"bitwiseNOT",
"cast",
"cbrt",
"ceil",
"ceiling",
"column",
"concat",
"concat_ws",
"contains",
"conv",
"cos",
"cosh",
"count",
"countDistinct",
"crc32",
"cumeDist",
"date_add",
"date_format",
"date_sub",
"datediff",
"dayofmonth",
"dayofyear",
"denseRank",
"desc",
"endsWith",
"exp",
"explode",
"expm1",
"expr",
"factorial",
"first",
"floor",
"format_number",
"format_string",
"from_unixtime",
"from_utc_timestamp",
"getField",
"getItem",
"greatest",
"hex",
"hour",
"hypot",
"ifelse",
"initcap",
"instr",
"isNaN",
"isNotNull",
"isNull",
"lag",
"last",
"last_day",
"lead",
"least",
"length",
"levenshtein",
"like",
"lit",
"locate",
"log",
"log10",
"log1p",
"log2",
"lower",
"lpad",
"ltrim",
"max",
"md5",
"mean",
"min",
"minute",
"month",
"months_between",
"n",
"n_distinct",
"nanvl",
"negate",
"next_day",
"ntile",
"otherwise",
"percentRank",
"pmod",
"quarter",
"rand",
"randn",
"rank",
"regexp_extract",
"regexp_replace",
"reverse",
"rint",
"rlike",
"round",
"rowNumber",
"rpad",
"rtrim",
"second",
"sha1",
"sha2",
"shiftLeft",
"shiftRight",
"shiftRightUnsigned",
"sign",
"signum",
"sin",
"sinh",
"size",
"soundex",
"sqrt",
"startsWith",
"substr",
"substring_index",
"sum",
"sumDistinct",
"tan",
"tanh",
"toDegrees",
"toRadians",
"to_date",
"to_utc_timestamp",
"translate",
"trim",
"unbase64",
"unhex",
"unix_timestamp",
"upper",
"weekofyear",
"when",
"year")
exportClasses("GroupedData")
exportMethods("agg")
export("sparkRSQL.init",
"sparkRHive.init")
export("as.DataFrame",
"cacheTable",
"clearCache",
"createDataFrame",
"createExternalTable",
"dropTempTable",
"jsonFile",
"loadDF",
"parquetFile",
"read.df",
"sql",
"table",
"tableNames",
"tables",
"uncacheTable")
export("structField",
"structField.jobj",
"structField.character",
"print.structField",
"structType",
"structType.jobj",
"structType.structField",
"print.structType")