@@ -241,8 +241,6 @@ object LBFGS extends Logging {
241241 val bcW = data.context.broadcast(w)
242242 val localGradient = gradient
243243
244- // Given (current accumulated gradient, current loss) and (label, features)
245- // tuples, updates the current gradient and current loss
246244 val seqOp = (c : (Vector , Double ), v : (Double , Vector )) =>
247245 (c, v) match {
248246 case ((grad, loss), (label, features)) =>
@@ -251,7 +249,6 @@ object LBFGS extends Logging {
251249 (denseGrad, loss + l)
252250 }
253251
254- // Adds two (gradient, loss) tuples
255252 val combOp = (c1 : (Vector , Double ), c2 : (Vector , Double )) =>
256253 (c1, c2) match { case ((grad1, loss1), (grad2, loss2)) =>
257254 val denseGrad1 = grad1.toDense
@@ -261,7 +258,7 @@ object LBFGS extends Logging {
261258 }
262259
263260 val zeroSparseVector = Vectors .sparse(n, Seq ())
264- val (gradientSum, lossSum) = data.treeAggregate(zeroSparseVector, 0.0 )(seqOp, combOp)
261+ val (gradientSum, lossSum) = data.treeAggregate(( zeroSparseVector, 0.0 ) )(seqOp, combOp)
265262
266263 /**
267264 * regVal is sum of weight squares if it's L2 updater;
0 commit comments