[SPARK-22788][STREAMING] Use correct hadoop config for fs append support.
Still look at the old one in case any Spark user is setting it explicitly, though. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #19983 from vanzin/SPARK-22788.
This commit is contained in:
parent
9962390af7
commit
7570eab6be
|
@ -29,7 +29,9 @@ private[streaming] object HdfsUtils {
|
|||
// If the file exists and we have append support, append instead of creating a new file
|
||||
val stream: FSDataOutputStream = {
|
||||
if (dfs.isFile(dfsPath)) {
|
||||
if (conf.getBoolean("hdfs.append.support", false) || dfs.isInstanceOf[RawLocalFileSystem]) {
|
||||
if (conf.getBoolean("dfs.support.append", true) ||
|
||||
conf.getBoolean("hdfs.append.support", false) ||
|
||||
dfs.isInstanceOf[RawLocalFileSystem]) {
|
||||
dfs.append(dfsPath)
|
||||
} else {
|
||||
throw new IllegalStateException("File exists and there is no append support!")
|
||||
|
|
Loading…
Reference in a new issue