Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
525d999
[SPARK-26311][YARN] New feature: custom log URL for stdout/stderr
HeartSaVioR Dec 8, 2018
9b00a11
Address review comments from @vanzin
HeartSaVioR Dec 11, 2018
b1e08fd
Address review comments from @squito
HeartSaVioR Dec 11, 2018
ff6d9aa
Changed approach: only SHS provides custom executor log URLs
HeartSaVioR Jan 16, 2019
a36dd58
Fix binary compatibility issue
HeartSaVioR Jan 17, 2019
665f0b3
Clean up old stuff
HeartSaVioR Jan 17, 2019
8cd1db3
Address review comments from @squito
HeartSaVioR Jan 17, 2019
b641bd1
Move the role of replacing log URLs to AppStatusStore - HistoryAppSta…
HeartSaVioR Jan 18, 2019
02117a0
Fix existing UT break, as well as add unit test on acceptance test fo…
HeartSaVioR Jan 18, 2019
e3d3d49
Add missing RAT exclude, as well as code cleaning a bit
HeartSaVioR Jan 18, 2019
08bf035
Reduce lines of new test case file
HeartSaVioR Jan 18, 2019
782fe7f
Remove @JsonIgnore which doesn't work actually
HeartSaVioR Jan 18, 2019
b5d4e5f
Add incomplete app to test target for custom log url as well as others
HeartSaVioR Jan 18, 2019
ca6be5f
Also introduce a new config to toggle applying custom log url for inc…
HeartSaVioR Jan 19, 2019
229a75a
Also apply custom log url to driver log as well (in case of YARN clus…
HeartSaVioR Jan 19, 2019
3d9cdb4
Add org.apache.spark.scheduler.SparkListenerApplicationStart to Mima …
HeartSaVioR Jan 19, 2019
14eff2e
Fix UT in yarn
HeartSaVioR Jan 19, 2019
eb0ebaa
Address review comments from @squito
HeartSaVioR Jan 22, 2019
63b02f5
Fix more nits
HeartSaVioR Jan 22, 2019
31d8e4e
Address review comments from @vanzin
HeartSaVioR Jan 23, 2019
d8574e9
Change log level to INFO
HeartSaVioR Jan 24, 2019
29f33e4
Addressed review comments from vanzin
HeartSaVioR Jan 25, 2019
e4db8c5
Address review comments from vanzin
HeartSaVioR Jan 26, 2019
0acfb20
Fix a silly bug
HeartSaVioR Jan 26, 2019
954b17a
Address review comments
HeartSaVioR Jan 28, 2019
0df31ac
More inlines...
HeartSaVioR Jan 28, 2019
98b7b16
Remove unnecessary lines
HeartSaVioR Jan 29, 2019
2d04802
Fix a bug: we can't support NM_HOST / NM_PORT / NM_HTTP_PORT consiste…
HeartSaVioR Jan 29, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2355,7 +2355,8 @@ class SparkContext(config: SparkConf) extends Logging {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls))
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -359,10 +359,9 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
return None
}

val ui = SparkUI.create(None, new AppStatusStore(kvstore), conf, secManager, app.info.name,
HistoryServer.getAttemptURI(appId, attempt.info.attemptId),
attempt.info.startTime.getTime(),
attempt.info.appSparkVersion)
val ui = SparkUI.create(None, new HistoryAppStatusStore(conf, kvstore), conf, secManager,
app.info.name, HistoryServer.getAttemptURI(appId, attempt.info.attemptId),
attempt.info.startTime.getTime(), attempt.info.appSparkVersion)
loadPlugins().foreach(_.setupUI(ui))

val loadedUI = LoadedAppUI(ui)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.deploy.history

import java.util.concurrent.atomic.AtomicBoolean

import scala.util.matching.Regex

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.History._
import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1
import org.apache.spark.util.kvstore.KVStore

private[spark] class HistoryAppStatusStore(
conf: SparkConf,
store: KVStore)
extends AppStatusStore(store, None) with Logging {

import HistoryAppStatusStore._

private val logUrlPattern: Option[String] = {
val appInfo = super.applicationInfo()
val applicationCompleted = appInfo.attempts.nonEmpty && appInfo.attempts.head.completed
if (applicationCompleted || conf.get(APPLY_CUSTOM_EXECUTOR_LOG_URL_TO_INCOMPLETE_APP)) {
conf.get(CUSTOM_EXECUTOR_LOG_URL)
} else {
None
}
}

private val informedForMissingAttributes = new AtomicBoolean(false)

override def executorList(activeOnly: Boolean): Seq[v1.ExecutorSummary] = {
val execList = super.executorList(activeOnly)
logUrlPattern match {
case Some(pattern) => execList.map(replaceLogUrls(_, pattern))
case None => execList
}
}

override def executorSummary(executorId: String): v1.ExecutorSummary = {
val execSummary = super.executorSummary(executorId)
logUrlPattern match {
case Some(pattern) => replaceLogUrls(execSummary, pattern)
case None => execSummary
}
}

private def replaceLogUrls(exec: v1.ExecutorSummary, urlPattern: String): v1.ExecutorSummary = {
val attributes = exec.attributes

// Relation between pattern {{FILE_NAME}} and attribute {{LOG_FILES}}
// Given that HistoryAppStatusStore don't know which types of log files can be provided
// from resource manager, we require resource manager to provide available types of log
// files, which are encouraged to be same as types of log files provided in original log URLs.
// Once we get the list of log files, we need to expose them to end users as a pattern
// so that end users can compose custom log URL(s) including log file name(s).
val allPatterns = CUSTOM_URL_PATTERN_REGEX.findAllMatchIn(urlPattern).map(_.group(1)).toSet
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs at least a comment about what's going on with FILE_NAME and LOG_FILES. They're special-cased in a bunch of places but I didn't really find an explanation for that.

I'm also wondering whether you could us replaceAllIn here to simplify this code...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll add comment regarding relation between FILE_NAME pattern and LOG_FILES attribute here.

I'm not sure we can simplify the code block with replaceAllIn, because findAllMatchIn is intentionally used to find all patterns regardless of availability in attributes and enable fail-back to original log URLs. If we just simply use replaceAllIn to only replace matched pattern, final URL will become odd and broken one if there's missing attribute.

val allPatternsExceptFileName = allPatterns.filter(_ != "FILE_NAME")
val allAttributeKeys = attributes.keySet
val allAttributeKeysExceptLogFiles = allAttributeKeys.filter(_ != "LOG_FILES")

if (allPatternsExceptFileName.diff(allAttributeKeysExceptLogFiles).nonEmpty) {
logFailToRenewLogUrls("some of required attributes are missing in app's event log.",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are these warnings gonna show up every time an old event log is read (i.e. no extra attributes recorded)? That sounds pretty noisy and scary for users.

Copy link
Contributor Author

@HeartSaVioR HeartSaVioR Jan 23, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Totally agreed. Thanks for finding! Maybe we have flag here and show only once per instance. It would show up again when SparkUI for application is invalidated and recreated, but I don't think it will be noisy.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be better to avoid a warning if the executor doesn't have any attributes recorded. That's either an old event log or from an application that doesn't support this feature.

If you just log an info message that is less noisy than this, it should be fine in that case. Leave the warning for when the app is actually missing info.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK. I made a change like I commented earlier, but agree to change the log level to INFO. Let me change it. Thanks!

allPatternsExceptFileName, allAttributeKeys)
return exec
} else if (allPatterns.contains("FILE_NAME") && !allAttributeKeys.contains("LOG_FILES")) {
logFailToRenewLogUrls("'FILE_NAME' parameter is provided, but file information is " +
"missing in app's event log.", allPatternsExceptFileName, allAttributeKeys)
return exec
}

val updatedUrl = allPatternsExceptFileName.foldLeft(urlPattern) { case (orig, patt) =>
// we already checked the existence of attribute when comparing keys
orig.replace(s"{{$patt}}", attributes(patt))
}

val newLogUrlMap = if (allPatterns.contains("FILE_NAME")) {
// allAttributeKeys should contain "LOG_FILES"
attributes("LOG_FILES").split(",").map { file =>
file -> updatedUrl.replace("{{FILE_NAME}}", file)
}.toMap
} else {
Map("log" -> updatedUrl)
}

replaceExecutorLogs(exec, newLogUrlMap)
}

private def logFailToRenewLogUrls(
reason: String,
allPatterns: Set[String],
allAttributes: Set[String]): Unit = {
if (informedForMissingAttributes.compareAndSet(false, true)) {
logInfo(s"Fail to renew executor log urls: $reason. Required: $allPatterns / " +
s"available: $allAttributes. Falling back to show app's original log urls.")
}
}

private def replaceExecutorLogs(
source: v1.ExecutorSummary,
newExecutorLogs: Map[String, String]): v1.ExecutorSummary = {
new v1.ExecutorSummary(source.id, source.hostPort, source.isActive, source.rddBlocks,
source.memoryUsed, source.diskUsed, source.totalCores, source.maxTasks, source.activeTasks,
source.failedTasks, source.completedTasks, source.totalTasks, source.totalDuration,
source.totalGCTime, source.totalInputBytes, source.totalShuffleRead,
source.totalShuffleWrite, source.isBlacklisted, source.maxMemory, source.addTime,
source.removeTime, source.removeReason, newExecutorLogs, source.memoryMetrics,
source.blacklistedInStages, source.peakMemoryMetrics, source.attributes)
}

}

private[spark] object HistoryAppStatusStore {
val CUSTOM_URL_PATTERN_REGEX: Regex = "\\{\\{([A-Za-z0-9_\\-]+)\\}\\}".r
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ private[spark] class CoarseGrainedExecutorBackend(
rpcEnv.asyncSetupEndpointRefByURI(driverUrl).flatMap { ref =>
// This is a very fast action so we can use "ThreadUtils.sameThread"
driver = Some(ref)
ref.ask[Boolean](RegisterExecutor(executorId, self, hostname, cores, extractLogUrls))
ref.ask[Boolean](RegisterExecutor(executorId, self, hostname, cores, extractLogUrls,
extractAttributes))
}(ThreadUtils.sameThread).onComplete {
// This is a very fast action so we can use "ThreadUtils.sameThread"
case Success(msg) =>
Expand All @@ -76,6 +77,12 @@ private[spark] class CoarseGrainedExecutorBackend(
.map(e => (e._1.substring(prefix.length).toLowerCase(Locale.ROOT), e._2))
}

def extractAttributes: Map[String, String] = {
val prefix = "SPARK_EXECUTOR_ATTRIBUTE_"
sys.env.filterKeys(_.startsWith(prefix))
.map(e => (e._1.substring(prefix.length).toUpperCase(Locale.ROOT), e._2))
}

override def receive: PartialFunction[Any, Unit] = {
case RegisteredExecutor =>
logInfo("Successfully registered with driver")
Expand Down
18 changes: 18 additions & 0 deletions core/src/main/scala/org/apache/spark/internal/config/History.scala
Original file line number Diff line number Diff line change
Expand Up @@ -125,4 +125,22 @@ private[spark] object History {
val KERBEROS_KEYTAB = ConfigBuilder("spark.history.kerberos.keytab")
.stringConf
.createOptional

val CUSTOM_EXECUTOR_LOG_URL = ConfigBuilder("spark.history.custom.executor.log.url")
.doc("Specifies custom spark executor log url for supporting external log service instead of " +
"using cluster managers' application log urls in the history server. Spark will support " +
"some path variables via patterns which can vary on cluster manager. Please check the " +
"documentation for your cluster manager to see which patterns are supported, if any. " +
"This configuration has no effect on a live application, it only affects the history server.")
.stringConf
.createOptional

val APPLY_CUSTOM_EXECUTOR_LOG_URL_TO_INCOMPLETE_APP =
ConfigBuilder("spark.history.custom.executor.log.url.applyIncompleteApplication")
.doc("Whether to apply custom executor log url, as specified by " +
"`spark.history.custom.executor.log.url`, to incomplete application as well. " +
"Even if this is true, this still only affects the behavior of the history server, " +
"not running spark applications.")
.booleanConf
.createWithDefault(true)
}
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ private[spark] trait SchedulerBackend {
*/
def getDriverLogUrls: Option[Map[String, String]] = None

/**
* Get the attributes on driver. These attributes are used to replace log URLs when
* custom log url pattern is specified.
* @return Map containing attributes on driver.
*/
def getDriverAttributes: Option[Map[String, String]] = None

/**
* Get the max number of tasks that can be concurrent launched currently.
* Note that please don't cache the value returned by this method, because the number can change
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,8 @@ case class SparkListenerApplicationStart(
time: Long,
sparkUser: String,
appAttemptId: Option[String],
driverLogs: Option[Map[String, String]] = None) extends SparkListenerEvent
driverLogs: Option[Map[String, String]] = None,
driverAttributes: Option[Map[String, String]] = None) extends SparkListenerEvent

@DeveloperApi
case class SparkListenerApplicationEnd(time: Long) extends SparkListenerEvent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ private[spark] object CoarseGrainedClusterMessages {
executorRef: RpcEndpointRef,
hostname: String,
cores: Int,
logUrls: Map[String, String])
logUrls: Map[String, String],
attributes: Map[String, String])
extends CoarseGrainedClusterMessage

case class StatusUpdate(executorId: String, taskId: Long, state: TaskState,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp

override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {

case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls) =>
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls, attributes) =>
if (executorDataMap.contains(executorId)) {
executorRef.send(RegisterExecutorFailed("Duplicate executor ID: " + executorId))
context.reply(true)
Expand All @@ -207,7 +207,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val data = new ExecutorData(executorRef, executorAddress, hostname,
cores, cores, logUrls)
cores, cores, logUrls, attributes)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,6 @@ private[cluster] class ExecutorData(
override val executorHost: String,
var freeCores: Int,
override val totalCores: Int,
override val logUrlMap: Map[String, String]
) extends ExecutorInfo(executorHost, totalCores, logUrlMap)
override val logUrlMap: Map[String, String],
override val attributes: Map[String, String]
) extends ExecutorInfo(executorHost, totalCores, logUrlMap, attributes)
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,12 @@ import org.apache.spark.annotation.DeveloperApi
class ExecutorInfo(
val executorHost: String,
val totalCores: Int,
val logUrlMap: Map[String, String]) {
val logUrlMap: Map[String, String],
val attributes: Map[String, String]) {

def this(executorHost: String, totalCores: Int, logUrlMap: Map[String, String]) = {
this(executorHost, totalCores, logUrlMap, Map.empty)
}

def canEqual(other: Any): Boolean = other.isInstanceOf[ExecutorInfo]

Expand All @@ -35,12 +40,13 @@ class ExecutorInfo(
(that canEqual this) &&
executorHost == that.executorHost &&
totalCores == that.totalCores &&
logUrlMap == that.logUrlMap
logUrlMap == that.logUrlMap &&
attributes == that.attributes
case _ => false
}

override def hashCode(): Int = {
val state = Seq(executorHost, totalCores, logUrlMap)
val state = Seq(executorHost, totalCores, logUrlMap, attributes)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ private[spark] class LocalSchedulerBackend(
listenerBus.post(SparkListenerExecutorAdded(
System.currentTimeMillis,
executorEndpoint.localExecutorId,
new ExecutorInfo(executorEndpoint.localExecutorHostname, totalCores, Map.empty)))
new ExecutorInfo(executorEndpoint.localExecutorHostname, totalCores, Map.empty,
Map.empty)))
launcherBackend.setAppId(appId)
launcherBackend.setState(SparkAppHandle.State.RUNNING)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ private[spark] class AppStatusListener(
.orElse(liveExecutors.get(SparkContext.LEGACY_DRIVER_IDENTIFIER))
driver.foreach { d =>
d.executorLogs = logs.toMap
d.attributes = event.driverAttributes.getOrElse(Map.empty).toMap
update(d, System.nanoTime())
}
}
Expand Down Expand Up @@ -190,6 +191,7 @@ private[spark] class AppStatusListener(
exec.totalCores = event.executorInfo.totalCores
exec.maxTasks = event.executorInfo.totalCores / coresPerTask
exec.executorLogs = event.executorInfo.logUrlMap
exec.attributes = event.executorInfo.attributes
liveUpdate(exec, System.nanoTime())
}

Expand Down
4 changes: 3 additions & 1 deletion core/src/main/scala/org/apache/spark/status/LiveEntity.scala
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ private class LiveExecutor(val executorId: String, _addTime: Long) extends LiveE
var blacklistedInStages: Set[Int] = TreeSet()

var executorLogs = Map[String, String]()
var attributes = Map[String, String]()

// Memory metrics. They may not be recorded (e.g. old event logs) so if totalOnHeap is not
// initialized, the store will not contain this information.
Expand Down Expand Up @@ -306,7 +307,8 @@ private class LiveExecutor(val executorId: String, _addTime: Long) extends LiveE
executorLogs,
memoryMetrics,
blacklistedInStages,
Some(peakExecutorMetrics).filter(_.isSet))
Some(peakExecutorMetrics).filter(_.isSet),
attributes)
new ExecutorSummaryWrapper(info)
}
}
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/status/api/v1/api.scala
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ class ExecutorSummary private[spark](
val blacklistedInStages: Set[Int],
@JsonSerialize(using = classOf[ExecutorMetricsJsonSerializer])
@JsonDeserialize(using = classOf[ExecutorMetricsJsonDeserializer])
val peakMemoryMetrics: Option[ExecutorMetrics])
val peakMemoryMetrics: Option[ExecutorMetrics],
val attributes: Map[String, String])

class MemoryMetrics private[spark](
val usedOnHeapStorageMemory: Long,
Expand Down
16 changes: 12 additions & 4 deletions core/src/main/scala/org/apache/spark/util/JsonProtocol.scala
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ private[spark] object JsonProtocol {
("Timestamp" -> applicationStart.time) ~
("User" -> applicationStart.sparkUser) ~
("App Attempt ID" -> applicationStart.appAttemptId.map(JString(_)).getOrElse(JNothing)) ~
("Driver Logs" -> applicationStart.driverLogs.map(mapToJson).getOrElse(JNothing))
("Driver Logs" -> applicationStart.driverLogs.map(mapToJson).getOrElse(JNothing)) ~
("Driver Attributes" -> applicationStart.driverAttributes.map(mapToJson).getOrElse(JNothing))
}

def applicationEndToJson(applicationEnd: SparkListenerApplicationEnd): JValue = {
Expand Down Expand Up @@ -486,7 +487,8 @@ private[spark] object JsonProtocol {
def executorInfoToJson(executorInfo: ExecutorInfo): JValue = {
("Host" -> executorInfo.executorHost) ~
("Total Cores" -> executorInfo.totalCores) ~
("Log Urls" -> mapToJson(executorInfo.logUrlMap))
("Log Urls" -> mapToJson(executorInfo.logUrlMap)) ~
("Attributes" -> mapToJson(executorInfo.attributes))
}

def blockUpdatedInfoToJson(blockUpdatedInfo: BlockUpdatedInfo): JValue = {
Expand Down Expand Up @@ -693,7 +695,9 @@ private[spark] object JsonProtocol {
val sparkUser = (json \ "User").extract[String]
val appAttemptId = jsonOption(json \ "App Attempt ID").map(_.extract[String])
val driverLogs = jsonOption(json \ "Driver Logs").map(mapFromJson)
SparkListenerApplicationStart(appName, appId, time, sparkUser, appAttemptId, driverLogs)
val driverAttributes = jsonOption(json \ "Driver Attributes").map(mapFromJson)
SparkListenerApplicationStart(appName, appId, time, sparkUser, appAttemptId, driverLogs,
driverAttributes)
}

def applicationEndFromJson(json: JValue): SparkListenerApplicationEnd = {
Expand Down Expand Up @@ -1061,7 +1065,11 @@ private[spark] object JsonProtocol {
val executorHost = (json \ "Host").extract[String]
val totalCores = (json \ "Total Cores").extract[Int]
val logUrls = mapFromJson(json \ "Log Urls").toMap
new ExecutorInfo(executorHost, totalCores, logUrls)
val attributes = jsonOption(json \ "Attributes") match {
case Some(attr) => mapFromJson(attr).toMap
case None => Map.empty[String, String]
}
new ExecutorInfo(executorHost, totalCores, logUrls, attributes)
}

def blockUpdatedInfoFromJson(json: JValue): BlockUpdatedInfo = {
Expand Down
Loading