Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ function totalDurationAlpha(totalGCTime, totalDuration) {
(Math.min(totalGCTime / totalDuration + 0.5, 1)) : 1;
}

// When GCTimePercent is edited change ToolTips.TASK_TIME to match
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm, why do you propose to remove this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ToolTips.TASK_TIME - Shaded red when garbage collection (GC) time is over 10% of task time

So I think it needs to be removed; Meanwhile, I did not find any usage in the spark code base using the following command: find . -name "*" -type file -print0 | xargs -0 grep "TASK_TIME"

var GCTimePercent = 0.1;

function totalDurationStyle(totalGCTime, totalDuration) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,6 @@ private[history] class ApplicationCacheCheckFilter(
val httpRequest = request.asInstanceOf[HttpServletRequest]
val httpResponse = response.asInstanceOf[HttpServletResponse]
val requestURI = httpRequest.getRequestURI
val operation = httpRequest.getMethod

// if the request is for an attempt, check to see if it is in need of delete/refresh
// and have the cache update the UI if so
Expand Down
3 changes: 0 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -404,9 +404,6 @@ private[spark] object HadoopRDD extends Logging {
*/
val CONFIGURATION_INSTANTIATION_LOCK = new Object()

/** Update the input bytes read metric each time this number of records has been read */
val RECORDS_BETWEEN_BYTES_READ_METRIC_UPDATES = 256

/**
* The three methods below are helpers for accessing the local map, a property of the SparkEnv of
* the local process.
Expand Down
5 changes: 0 additions & 5 deletions core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -590,11 +590,6 @@ private class ProxyRedirectHandler(_proxyUri: String) extends HandlerWrapper {
override def sendRedirect(location: String): Unit = {
val newTarget = if (location != null) {
val target = new URI(location)
val path = if (target.getPath().startsWith("/")) {
target.getPath()
} else {
req.getRequestURI().stripSuffix("/") + "/" + target.getPath()
}
// The target path should already be encoded, so don't re-encode it, just the
// proxy address part.
val proxyBase = UIUtils.uiRoot(req)
Expand Down
7 changes: 0 additions & 7 deletions core/src/main/scala/org/apache/spark/ui/ToolTips.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,6 @@ private[spark] object ToolTips {

val OUTPUT = "Bytes written to Hadoop."

val STORAGE_MEMORY =
"Memory used / total available memory for storage of data " +
"like RDD partitions cached in memory. "

val SHUFFLE_WRITE =
"Bytes and records written to disk in order to be read by a shuffle in a future stage."

Expand Down Expand Up @@ -88,9 +84,6 @@ private[spark] object ToolTips {
also create multiple RDDs internally. Cached RDDs are shown in green.
"""

val TASK_TIME =
"Shaded red when garbage collection (GC) time is over 10% of task time"

val APPLICATION_EXECUTOR_LIMIT =
"""Maximum number of executors that this application will use. This limit is finite only when
dynamic allocation is enabled. The number of granted executors may exceed the limit
Expand Down