6481d27425
Put all public API in __all__, also put them all in pyspark.__init__.py, then we can got all the documents for public API by `pydoc pyspark`. It also can be used by other programs (such as Sphinx or Epydoc) to generate only documents for public APIs. Author: Davies Liu <davies.liu@gmail.com> Closes #2205 from davies/public and squashes the following commits: c6c5567 [Davies Liu] fix message f7b35be [Davies Liu] put SchemeRDD, Row in pyspark.sql module 7e3016a [Davies Liu] add __all__ in mllib 6281b48 [Davies Liu] fix doc for SchemaRDD 6caab21 [Davies Liu] add public interfaces into pyspark.__init__.py
78 lines
3.1 KiB
Python
78 lines
3.1 KiB
Python
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
"""
|
|
PySpark is the Python API for Spark.
|
|
|
|
Public classes:
|
|
|
|
- L{SparkContext<pyspark.context.SparkContext>}
|
|
Main entry point for Spark functionality.
|
|
- L{RDD<pyspark.rdd.RDD>}
|
|
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
|
|
- L{Broadcast<pyspark.broadcast.Broadcast>}
|
|
A broadcast variable that gets reused across tasks.
|
|
- L{Accumulator<pyspark.accumulators.Accumulator>}
|
|
An "add-only" shared variable that tasks can only add values to.
|
|
- L{SparkConf<pyspark.conf.SparkConf>}
|
|
For configuring Spark.
|
|
- L{SparkFiles<pyspark.files.SparkFiles>}
|
|
Access files shipped with jobs.
|
|
- L{StorageLevel<pyspark.storagelevel.StorageLevel>}
|
|
Finer-grained cache persistence levels.
|
|
|
|
Spark SQL:
|
|
- L{SQLContext<pyspark.sql.SQLContext>}
|
|
Main entry point for SQL functionality.
|
|
- L{SchemaRDD<pyspark.sql.SchemaRDD>}
|
|
A Resilient Distributed Dataset (RDD) with Schema information for the data contained. In
|
|
addition to normal RDD operations, SchemaRDDs also support SQL.
|
|
- L{Row<pyspark.sql.Row>}
|
|
A Row of data returned by a Spark SQL query.
|
|
|
|
Hive:
|
|
- L{HiveContext<pyspark.context.HiveContext>}
|
|
Main entry point for accessing data stored in Apache Hive..
|
|
"""
|
|
|
|
# The following block allows us to import python's random instead of mllib.random for scripts in
|
|
# mllib that depend on top level pyspark packages, which transitively depend on python's random.
|
|
# Since Python's import logic looks for modules in the current package first, we eliminate
|
|
# mllib.random as a candidate for C{import random} by removing the first search path, the script's
|
|
# location, in order to force the loader to look in Python's top-level modules for C{random}.
|
|
import sys
|
|
s = sys.path.pop(0)
|
|
import random
|
|
sys.path.insert(0, s)
|
|
|
|
from pyspark.conf import SparkConf
|
|
from pyspark.context import SparkContext
|
|
from pyspark.rdd import RDD
|
|
from pyspark.files import SparkFiles
|
|
from pyspark.storagelevel import StorageLevel
|
|
from pyspark.accumulators import Accumulator, AccumulatorParam
|
|
from pyspark.broadcast import Broadcast
|
|
from pyspark.serializers import MarshalSerializer, PickleSerializer
|
|
|
|
# for back compatibility
|
|
from pyspark.sql import SQLContext, HiveContext, SchemaRDD, Row
|
|
|
|
__all__ = [
|
|
"SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
|
|
"Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
|
|
]
|