Skip to content

Commit 3c2a96a

Browse files
committed
fix scalastyle errors
1 parent 29c6b43 commit 3c2a96a

File tree

2 files changed

+51
-22
lines changed

2 files changed

+51
-22
lines changed

external/kafka/src/main/scala/org/apache/spark/rdd/kafka/KafkaCluster.scala

Lines changed: 30 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,14 @@ package org.apache.spark.rdd.kafka
2020
import scala.util.control.NonFatal
2121
import scala.collection.mutable.ArrayBuffer
2222
import java.util.Properties
23-
import kafka.api.{OffsetCommitRequest, OffsetRequest, OffsetFetchRequest, PartitionOffsetRequestInfo, TopicMetadata, TopicMetadataRequest, TopicMetadataResponse}
23+
import kafka.api._
2424
import kafka.common.{ErrorMapping, OffsetMetadataAndError, TopicAndPartition}
2525
import kafka.consumer.{ConsumerConfig, SimpleConsumer}
2626

2727
/**
2828
* Convenience methods for interacting with a Kafka cluster.
29-
* @param kafkaParams Kafka <a href="http://kafka.apache.org/documentation.html#configuration">configuration parameters</a>.
29+
* @param kafkaParams Kafka <a href="http://kafka.apache.org/documentation.html#configuration">
30+
* configuration parameters</a>.
3031
* Requires "metadata.broker.list" or "bootstrap.servers" to be set with Kafka broker(s),
3132
* NOT zookeeper servers, specified in host1:port1,host2:port2 form
3233
*/
@@ -45,7 +46,8 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
4546
val config: ConsumerConfig = KafkaCluster.consumerConfig(kafkaParams)
4647

4748
def connect(host: String, port: Int): SimpleConsumer =
48-
new SimpleConsumer(host, port, config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId)
49+
new SimpleConsumer(host, port, config.socketTimeoutMs,
50+
config.socketReceiveBufferBytes, config.clientId)
4951

5052
def connect(hostAndPort: (String, Int)): SimpleConsumer =
5153
connect(hostAndPort._1, hostAndPort._2)
@@ -54,7 +56,8 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
5456
findLeader(topic, partition).right.map(connect)
5557

5658
def findLeader(topic: String, partition: Int): Either[Err, (String, Int)] = {
57-
val req = TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, 0, config.clientId, Seq(topic))
59+
val req = TopicMetadataRequest(TopicMetadataRequest.CurrentVersion,
60+
0, config.clientId, Seq(topic))
5861
val errs = new Err
5962
withBrokers(errs) { consumer =>
6063
val resp: TopicMetadataResponse = consumer.send(req)
@@ -79,7 +82,8 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
7982
}
8083

8184
def getPartitionMetadata(topics: Set[String]): Either[Err, Set[TopicMetadata]] = {
82-
val req = TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, 0, config.clientId, topics.toSeq)
85+
val req = TopicMetadataRequest(TopicMetadataRequest.CurrentVersion,
86+
0, config.clientId, topics.toSeq)
8387
val errs = new Err
8488
withBrokers(errs) { consumer =>
8589
val resp: TopicMetadataResponse = consumer.send(req)
@@ -90,13 +94,20 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
9094
Left(errs)
9195
}
9296

93-
def getLatestLeaderOffsets(topicAndPartitions: Set[TopicAndPartition]): Either[Err, Map[TopicAndPartition, Long]] =
97+
def getLatestLeaderOffsets(
98+
topicAndPartitions: Set[TopicAndPartition]
99+
): Either[Err, Map[TopicAndPartition, Long]] =
94100
getLeaderOffsets(topicAndPartitions, OffsetRequest.LatestTime)
95101

96-
def getEarliestLeaderOffsets(topicAndPartitions: Set[TopicAndPartition]): Either[Err, Map[TopicAndPartition, Long]] =
102+
def getEarliestLeaderOffsets(
103+
topicAndPartitions: Set[TopicAndPartition]
104+
): Either[Err, Map[TopicAndPartition, Long]] =
97105
getLeaderOffsets(topicAndPartitions, OffsetRequest.EarliestTime)
98106

99-
def getLeaderOffsets(topicAndPartitions: Set[TopicAndPartition], before: Long): Either[Err, Map[TopicAndPartition, Long]] =
107+
def getLeaderOffsets(
108+
topicAndPartitions: Set[TopicAndPartition],
109+
before: Long
110+
): Either[Err, Map[TopicAndPartition, Long]] =
100111
getLeaderOffsets(topicAndPartitions, before, 1).right.map { r =>
101112
r.map { kv =>
102113
// mapValues isnt serializable, see SI-7005
@@ -136,7 +147,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
136147
Left(errs)
137148
}
138149

139-
def getConsumerOffsets(groupId: String, topicAndPartitions: Set[TopicAndPartition]): Either[Err, Map[TopicAndPartition, Long]] = {
150+
def getConsumerOffsets(
151+
groupId: String,
152+
topicAndPartitions: Set[TopicAndPartition]
153+
): Either[Err, Map[TopicAndPartition, Long]] = {
140154
getConsumerOffsetMetadata(groupId, topicAndPartitions).right.map { r =>
141155
r.map { kv =>
142156
kv._1 -> kv._2.offset
@@ -173,7 +187,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) {
173187
Left(errs)
174188
}
175189

176-
def setConsumerOffsets(groupId: String, offsets: Map[TopicAndPartition, Long]): Unit = {
190+
def setConsumerOffsets(
191+
groupId: String,
192+
offsets: Map[TopicAndPartition, Long]
193+
): Either[Err, Map[TopicAndPartition, Short]] = {
177194
setConsumerOffsetMetadata(groupId, offsets.map { kv =>
178195
kv._1 -> OffsetMetadataAndError(kv._2)
179196
})
@@ -233,8 +250,9 @@ object KafkaCluster {
233250
val props = new Properties()
234251
kafkaParams.foreach(param => props.put(param._1, param._2))
235252
Seq("zookeeper.connect", "group.id").foreach { s =>
236-
if (!props.contains(s))
237-
props.setProperty(s, "")
253+
if (!props.contains(s)) {
254+
props.setProperty(s, "")
255+
}
238256
}
239257
new ConsumerConfig(props)
240258
}

external/kafka/src/main/scala/org/apache/spark/rdd/kafka/KafkaRDD.scala

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,18 @@ private[spark] case class KafkaRDDPartition(
4141

4242
/** A batch-oriented interface for consuming from Kafka.
4343
* Each given Kafka topic/partition corresponds to an RDD partition.
44-
* Starting and ending offsets are specified in advance, so that you can control exactly-once semantics.
45-
* For an easy interface to Kafka-managed offsets, see {@link org.apache.spark.rdd.kafka.KafkaCluster}
46-
* @param kafkaParams Kafka <a href="http://kafka.apache.org/documentation.html#configuration">configuration parameters</a>.
44+
* Starting and ending offsets are specified in advance,
45+
* so that you can control exactly-once semantics.
46+
* For an easy interface to Kafka-managed offsets,
47+
* see {@link org.apache.spark.rdd.kafka.KafkaCluster}
48+
* @param kafkaParams Kafka <a href="http://kafka.apache.org/documentation.html#configuration">
49+
* configuration parameters</a>.
4750
* Requires "metadata.broker.list" or "bootstrap.servers" to be set with Kafka broker(s),
4851
* NOT zookeeper servers, specified in host1:port1,host2:port2 form.
49-
* @param fromOffsets per-topic/partition Kafka offsets defining the (inclusive) starting point of the batch
50-
* @param untilOffsets per-topic/partition Kafka offsets defining the (exclusive) ending point of the batch
52+
* @param fromOffsets per-topic/partition Kafka offsets defining the (inclusive)
53+
* starting point of the batch
54+
* @param untilOffsets per-topic/partition Kafka offsets defining the (exclusive)
55+
* ending point of the batch
5156
* @param messageHandler function for translating each message into the desired type
5257
*/
5358
class KafkaRDD[
@@ -74,22 +79,26 @@ class KafkaRDD[
7479
override def compute(thePart: Partition, context: TaskContext) = {
7580
val part = thePart.asInstanceOf[KafkaRDDPartition]
7681
if (part.fromOffset >= part.untilOffset) {
77-
log.warn(s"Beginning offset is same or after ending offset, skipping ${part.topic} ${part.partition}")
82+
log.warn("Beginning offset is same or after ending offset" +
83+
s"skipping ${part.topic} ${part.partition}")
7884
Iterator.empty
7985
} else {
8086
new NextIterator[R] {
8187
context.addTaskCompletionListener{ context => closeIfNeeded() }
8288

8389
val kc = new KafkaCluster(kafkaParams)
84-
log.info(s"Computing topic ${part.topic}, partition ${part.partition}, offsets ${part.fromOffset} -> ${part.untilOffset}")
90+
log.info(s"Computing topic ${part.topic}, partition ${part.partition}" +
91+
s"offsets ${part.fromOffset} -> ${part.untilOffset}")
8592
val keyDecoder = classTag[U].runtimeClass.getConstructor(classOf[VerifiableProperties])
8693
.newInstance(kc.config.props)
8794
.asInstanceOf[Decoder[K]]
8895
val valueDecoder = classTag[T].runtimeClass.getConstructor(classOf[VerifiableProperties])
8996
.newInstance(kc.config.props)
9097
.asInstanceOf[Decoder[V]]
9198
val consumer: SimpleConsumer = kc.connectLeader(part.topic, part.partition).fold(
92-
errs => throw new Exception(s"""Couldn't connect to leader for topic ${part.topic} ${part.partition}: ${errs.mkString("\n")}"""),
99+
errs => throw new Exception(
100+
s"Couldn't connect to leader for topic ${part.topic} ${part.partition}:" +
101+
errs.mkString("\n")),
93102
consumer => consumer
94103
)
95104
var requestOffset = part.fromOffset
@@ -107,7 +116,8 @@ class KafkaRDD[
107116
val err = resp.errorCode(part.topic, part.partition)
108117
if (err == ErrorMapping.LeaderNotAvailableCode ||
109118
err == ErrorMapping.NotLeaderForPartitionCode) {
110-
log.error(s"Lost leader for topic ${part.topic} partition ${part.partition}, sleeping for ${kc.config.refreshLeaderBackoffMs}ms")
119+
log.error(s"Lost leader for topic ${part.topic} partition ${part.partition}, " +
120+
s" sleeping for ${kc.config.refreshLeaderBackoffMs}ms")
111121
Thread.sleep(kc.config.refreshLeaderBackoffMs)
112122
}
113123
// Let normal rdd retry sort out reconnect attempts
@@ -126,7 +136,8 @@ class KafkaRDD[
126136
finished = true
127137
}
128138
requestOffset = item.nextOffset
129-
messageHandler(new MessageAndMetadata(part.topic, part.partition, item.message, item.offset, keyDecoder, valueDecoder))
139+
messageHandler(new MessageAndMetadata(
140+
part.topic, part.partition, item.message, item.offset, keyDecoder, valueDecoder))
130141
}
131142
}
132143
}

0 commit comments

Comments
 (0)