diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index b5e5d6f1465f..936e7dc7bdb5 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -478,7 +478,12 @@ private[spark] object SparkConf extends Logging { DeprecatedConfig("spark.kryoserializer.buffer.mb", "1.4", "Please use spark.kryoserializer.buffer instead. The default value for " + "spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " + - "are no longer accepted. To specify the equivalent now, one may use '64k'.") + "are no longer accepted. To specify the equivalent now, one may use '64k'."), + DeprecatedConfig("spark.cleaner.ttl", "1.4", + "TTL-based metadata cleaning is no longer necessary in recent Spark versions " + + "and can lead to confusing errors if metadata is deleted for entities that are still in " + + "use. Except in extremely special circumstances, you should remove this setting and rely " + + "on Spark's reference-tracking-based cleanup instead. See SPARK-7689 for more details.") ) Map(configs.map { cfg => (cfg.key -> cfg) }:_*) diff --git a/docs/configuration.md b/docs/configuration.md index 0de824546c75..9dc42c32a3c6 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -722,17 +722,6 @@ Apart from these, the following properties are also available, and may be useful Which broadcast implementation to use. - - spark.cleaner.ttl - (infinite) - - Duration (seconds) of how long Spark will remember any metadata (stages generated, tasks - generated, etc.). Periodic cleanups will ensure that metadata older than this duration will be - forgotten. This is useful for running Spark for many hours / days (for example, running 24/7 in - case of Spark Streaming applications). Note that any RDD that persists in memory for more than - this duration will be cleared as well. - - spark.executor.cores 1 in YARN mode, all the available cores on the worker in standalone mode.