[SQL] Move SaveMode to SQL package.

Author: Yin Huai <yhuai@databricks.com>

Closes #4542 from yhuai/moveSaveMode and squashes the following commits:

65a4425 [Yin Huai] Move SaveMode to sql package.
This commit is contained in:
Yin Huai 2015-02-12 15:32:17 -08:00 committed by Michael Armbrust
parent ada993e954
commit c025a46882
12 changed files with 9 additions and 14 deletions

View file

@ -149,7 +149,7 @@ class DataFrame(object):
def _java_save_mode(self, mode):
"""Returns the Java save mode based on the Python save mode represented by a string.
"""
jSaveMode = self._sc._jvm.org.apache.spark.sql.sources.SaveMode
jSaveMode = self._sc._jvm.org.apache.spark.sql.SaveMode
jmode = jSaveMode.ErrorIfExists
mode = mode.lower()
if mode == "append":

View file

@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources;
package org.apache.spark.sql;
/**
* SaveMode is used to specify the expected behavior of saving a DataFrame to a data source.

View file

@ -27,7 +27,6 @@ import org.apache.spark.api.java.JavaRDD
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.sources.SaveMode
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils

View file

@ -26,7 +26,6 @@ import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedSt
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.storage.StorageLevel
import org.apache.spark.sql.sources.SaveMode
import org.apache.spark.sql.types.StructType
private[sql] class IncomputableColumn(protected[sql] val expr: Expression) extends Column {

View file

@ -21,7 +21,7 @@ import java.io.IOException
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.{SaveMode, DataFrame, SQLContext}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.StructType

View file

@ -44,9 +44,8 @@ import org.apache.spark.rdd.{NewHadoopPartition, NewHadoopRDD, RDD}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.parquet.ParquetTypesConverter._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SQLConf, SQLContext}
import org.apache.spark.sql.types.{IntegerType, StructField, StructType, _}
import org.apache.spark.sql.types.StructType._
import org.apache.spark.sql.{DataFrame, Row, SQLConf, SQLContext}
import org.apache.spark.{Partition => SparkPartition, TaskContext, SerializableWritable, Logging, SparkException}

View file

@ -20,7 +20,7 @@ package org.apache.spark.sql.sources
import scala.language.implicitConversions
import org.apache.spark.Logging
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.{SaveMode, DataFrame, SQLContext}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.AbstractSparkSQLParser
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation

View file

@ -18,7 +18,7 @@ package org.apache.spark.sql.sources
import org.apache.spark.annotation.{Experimental, DeveloperApi}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.{SaveMode, DataFrame, Row, SQLContext}
import org.apache.spark.sql.catalyst.expressions.{Expression, Attribute}
import org.apache.spark.sql.types.StructType

View file

@ -22,7 +22,7 @@ import java.io.File
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.catalyst.util
import org.apache.spark.sql.{SQLConf, DataFrame}
import org.apache.spark.sql.{SaveMode, SQLConf, DataFrame}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils

View file

@ -21,7 +21,7 @@ import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.analysis.EliminateSubQueries
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.{SaveMode, DataFrame, SQLContext}
import org.apache.spark.sql.catalyst.expressions.Row
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.RunnableCommand

View file

@ -25,7 +25,7 @@ import java.util.Map;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.sql.sources.SaveMode;
import org.apache.spark.sql.SaveMode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;

View file

@ -18,8 +18,6 @@
package org.apache.spark.sql.hive
import java.io.File
import org.apache.spark.sql.sources.SaveMode
import org.scalatest.BeforeAndAfterEach
import org.apache.commons.io.FileUtils