diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 47019c04aada2..c5646d2956aeb 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -1386,7 +1386,6 @@ package object config { private[spark] val SHUFFLE_ACCURATE_BLOCK_SKEWED_FACTOR = ConfigBuilder("spark.shuffle.accurateBlockSkewedFactor") - .internal() .doc("A shuffle block is considered as skewed and will be accurately recorded in " + "HighlyCompressedMapStatus if its size is larger than this factor multiplying " + "the median shuffle block size or SHUFFLE_ACCURATE_BLOCK_THRESHOLD. It is " + diff --git a/docs/configuration.md b/docs/configuration.md index 73d57b687ca2a..3c83ed92c1280 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1232,6 +1232,19 @@ Apart from these, the following properties are also available, and may be useful
spark.shuffle.accurateBlockSkewedFactor
HighlyCompressedMapStatus
if its size is larger than this factor multiplying
+ the median shuffle block size or spark.shuffle.accurateBlockThreshold
. It is
+ recommended to set this parameter to be the same as
+ spark.sql.adaptive.skewJoin.skewedPartitionFactor
. Set to -1.0 to disable this
+ feature by default.
+ spark.shuffle.registration.timeout