spark-instrumented-optimizer/R/pkg/NAMESPACE
hlin09 ca9f4ebb8e [SPARK-6991] [SPARKR] Adds support for zipPartitions.
Author: hlin09 <hlin09pu@gmail.com>

Closes #5568 from hlin09/zipPartitions and squashes the following commits:

12c08a5 [hlin09] Fix comments
d2d32db [hlin09] Merge branch 'master' into zipPartitions
ec56d2f [hlin09] Fix test.
27655d3 [hlin09] Adds support for zipPartitions.
2015-04-27 15:04:37 -07:00

198 lines
4.6 KiB
Plaintext

#exportPattern("^[[:alpha:]]+")
exportClasses("RDD")
exportClasses("Broadcast")
exportMethods(
"aggregateByKey",
"aggregateRDD",
"cache",
"cartesian",
"checkpoint",
"coalesce",
"cogroup",
"collect",
"collectAsMap",
"collectPartition",
"combineByKey",
"count",
"countByKey",
"countByValue",
"distinct",
"Filter",
"filterRDD",
"first",
"flatMap",
"flatMapValues",
"fold",
"foldByKey",
"foreach",
"foreachPartition",
"fullOuterJoin",
"glom",
"groupByKey",
"intersection",
"join",
"keyBy",
"keys",
"length",
"lapply",
"lapplyPartition",
"lapplyPartitionsWithIndex",
"leftOuterJoin",
"lookup",
"map",
"mapPartitions",
"mapPartitionsWithIndex",
"mapValues",
"maximum",
"minimum",
"numPartitions",
"partitionBy",
"persist",
"pipeRDD",
"reduce",
"reduceByKey",
"reduceByKeyLocally",
"repartition",
"rightOuterJoin",
"sampleByKey",
"sampleRDD",
"saveAsTextFile",
"saveAsObjectFile",
"sortBy",
"sortByKey",
"subtract",
"subtractByKey",
"sumRDD",
"take",
"takeOrdered",
"takeSample",
"top",
"unionRDD",
"unpersist",
"value",
"values",
"zipPartitions",
"zipRDD",
"zipWithIndex",
"zipWithUniqueId"
)
# S3 methods exported
export(
"textFile",
"objectFile",
"parallelize",
"hashCode",
"includePackage",
"broadcast",
"setBroadcastValue",
"setCheckpointDir"
)
export("sparkR.init")
export("sparkR.stop")
export("print.jobj")
useDynLib(SparkR, stringHashCode)
importFrom(methods, setGeneric, setMethod, setOldClass)
# SparkRSQL
exportClasses("DataFrame")
exportMethods("columns",
"distinct",
"dtypes",
"except",
"explain",
"filter",
"groupBy",
"head",
"insertInto",
"intersect",
"isLocal",
"limit",
"orderBy",
"names",
"printSchema",
"registerTempTable",
"repartition",
"sampleDF",
"saveAsParquetFile",
"saveAsTable",
"saveDF",
"schema",
"select",
"selectExpr",
"show",
"showDF",
"sortDF",
"toJSON",
"toRDD",
"unionAll",
"where",
"withColumn",
"withColumnRenamed")
exportClasses("Column")
exportMethods("abs",
"alias",
"approxCountDistinct",
"asc",
"avg",
"cast",
"contains",
"countDistinct",
"desc",
"endsWith",
"getField",
"getItem",
"isNotNull",
"isNull",
"last",
"like",
"lower",
"max",
"mean",
"min",
"rlike",
"sqrt",
"startsWith",
"substr",
"sum",
"sumDistinct",
"upper")
exportClasses("GroupedData")
exportMethods("agg")
export("sparkRSQL.init",
"sparkRHive.init")
export("cacheTable",
"clearCache",
"createDataFrame",
"createExternalTable",
"dropTempTable",
"jsonFile",
"jsonRDD",
"loadDF",
"parquetFile",
"sql",
"table",
"tableNames",
"tables",
"toDF",
"uncacheTable")
export("sparkRSQL.init",
"sparkRHive.init")
export("structField",
"structField.jobj",
"structField.character",
"print.structField",
"structType",
"structType.jobj",
"structType.structField",
"print.structType")