5723d26d7e
Parquet hard coded a JUL logger which always writes to stdout. This PR redirects it via SLF4j JUL bridge handler, so that we can control Parquet logs via `log4j.properties`. This solution is inspired by https://github.com/Parquet/parquet-mr/issues/390#issuecomment-46064909. Author: Cheng Lian <lian@databricks.com> Closes #8196 from liancheng/spark-8118/redirect-parquet-jul.
19 lines
949 B
Plaintext
19 lines
949 B
Plaintext
# Set everything to be logged to the console
|
|
log4j.rootCategory=INFO, console
|
|
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
|
log4j.appender.console.target=System.err
|
|
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
|
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
|
|
|
|
# Settings to quiet third party logs that are too verbose
|
|
log4j.logger.org.spark-project.jetty=WARN
|
|
log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
|
|
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
|
|
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
|
|
log4j.logger.org.apache.parquet=ERROR
|
|
log4j.logger.parquet=ERROR
|
|
|
|
# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
|
|
log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
|
|
log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
|