Skip to content

Commit 97a60cf

Browse files
committed
[SPARK-7929] Turn whitespace checker on for more token types.
This is the last batch of changes to complete SPARK-7929. Previous related PRs: apache#6480 apache#6478 apache#6477 apache#6476 apache#6475 apache#6474 apache#6473 Author: Reynold Xin <[email protected]> Closes apache#6487 from rxin/whitespace-lint and squashes the following commits: b33d43d [Reynold Xin] [SPARK-7929] Turn whitespace checker on for more token types.
1 parent 36067ce commit 97a60cf

File tree

19 files changed

+52
-53
lines changed

19 files changed

+52
-53
lines changed

external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/TransactionProcessor.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ private class TransactionProcessor(val channel: Channel, val seqNum: String,
143143
eventBatch.setErrorMsg(msg)
144144
} else {
145145
// At this point, the events are available, so fill them into the event batch
146-
eventBatch = new EventBatch("",seqNum, events)
146+
eventBatch = new EventBatch("", seqNum, events)
147147
}
148148
})
149149
} catch {

external/flume/src/main/scala/org/apache/spark/streaming/flume/EventTransformer.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ private[streaming] object EventTransformer extends Logging {
6060
out.write(body)
6161
val numHeaders = headers.size()
6262
out.writeInt(numHeaders)
63-
for ((k,v) <- headers) {
63+
for ((k, v) <- headers) {
6464
val keyBuff = Utils.serialize(k.toString)
6565
out.writeInt(keyBuff.length)
6666
out.write(keyBuff)

external/kafka/src/test/scala/org/apache/spark/streaming/kafka/KafkaRDDSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class KafkaRDDSuite extends FunSuite with BeforeAndAfterAll {
6565

6666
val offsetRanges = Array(OffsetRange(topic, 0, 0, messages.size))
6767

68-
val rdd = KafkaUtils.createRDD[String, String, StringDecoder, StringDecoder](
68+
val rdd = KafkaUtils.createRDD[String, String, StringDecoder, StringDecoder](
6969
sc, kafkaParams, offsetRanges)
7070

7171
val received = rdd.map(_._2).collect.toSet

external/mqtt/src/main/scala/org/apache/spark/streaming/mqtt/MQTTInputDStream.scala

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,10 @@
1717

1818
package org.apache.spark.streaming.mqtt
1919

20-
import java.io.IOException
21-
import java.util.concurrent.Executors
22-
import java.util.Properties
23-
24-
import scala.collection.JavaConversions._
25-
import scala.collection.Map
26-
import scala.collection.mutable.HashMap
27-
import scala.reflect.ClassTag
28-
2920
import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken
3021
import org.eclipse.paho.client.mqttv3.MqttCallback
3122
import org.eclipse.paho.client.mqttv3.MqttClient
32-
import org.eclipse.paho.client.mqttv3.MqttClientPersistence
33-
import org.eclipse.paho.client.mqttv3.MqttException
3423
import org.eclipse.paho.client.mqttv3.MqttMessage
35-
import org.eclipse.paho.client.mqttv3.MqttTopic
3624
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
3725

3826
import org.apache.spark.storage.StorageLevel
@@ -87,7 +75,7 @@ class MQTTReceiver(
8775

8876
// Handles Mqtt message
8977
override def messageArrived(topic: String, message: MqttMessage) {
90-
store(new String(message.getPayload(),"utf-8"))
78+
store(new String(message.getPayload(), "utf-8"))
9179
}
9280

9381
override def deliveryComplete(token: IMqttDeliveryToken) {

extras/kinesis-asl/src/main/scala/org/apache/spark/examples/streaming/KinesisWordCountASL.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ object KinesisWordProducerASL {
208208
recordsPerSecond: Int,
209209
wordsPerRecord: Int): Seq[(String, Int)] = {
210210

211-
val randomWords = List("spark","you","are","my","father")
211+
val randomWords = List("spark", "you", "are", "my", "father")
212212
val totals = scala.collection.mutable.Map[String, Int]()
213213

214214
// Create the low-level Kinesis Client from the AWS Java SDK.

extras/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisUtils.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ object KinesisUtils {
5555
*/
5656
def createStream(
5757
ssc: StreamingContext,
58-
kinesisAppName: String,
58+
kinesisAppName: String,
5959
streamName: String,
6060
endpointUrl: String,
6161
regionName: String,
@@ -102,7 +102,7 @@ object KinesisUtils {
102102
*/
103103
def createStream(
104104
ssc: StreamingContext,
105-
kinesisAppName: String,
105+
kinesisAppName: String,
106106
streamName: String,
107107
endpointUrl: String,
108108
regionName: String,

scalastyle-config.xml

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@
5151
</parameters>
5252
</check>
5353
<check level="error" class="org.scalastyle.scalariform.SpacesAfterPlusChecker" enabled="true"></check>
54-
<check level="error" class="org.scalastyle.file.WhitespaceEndOfLineChecker" enabled="false"></check>
5554
<check level="error" class="org.scalastyle.scalariform.SpacesBeforePlusChecker" enabled="true"></check>
55+
<check level="error" class="org.scalastyle.file.WhitespaceEndOfLineChecker" enabled="false"></check>
5656
<check level="error" class="org.scalastyle.file.FileLineLengthChecker" enabled="true">
5757
<parameters>
5858
<parameter name="maxLineLength"><![CDATA[100]]></parameter>
@@ -142,4 +142,15 @@
142142
<check level="error" class="org.scalastyle.file.NoNewLineAtEofChecker" enabled="false"></check>
143143
<check level="error" class="org.scalastyle.scalariform.NonASCIICharacterChecker" enabled="true"></check>
144144
<check level="error" class="org.scalastyle.scalariform.SpaceAfterCommentStartChecker" enabled="true"></check>
145+
<check level="error" class="org.scalastyle.scalariform.EnsureSingleSpaceBeforeTokenChecker" enabled="true">
146+
<parameters>
147+
<parameter name="tokens">ARROW, EQUALS</parameter>
148+
</parameters>
149+
</check>
150+
<check level="error" class="org.scalastyle.scalariform.EnsureSingleSpaceAfterTokenChecker" enabled="true">
151+
<parameters>
152+
<parameter name="tokens">ARROW, EQUALS, COMMA, COLON, IF, WHILE, FOR</parameter>
153+
</parameters>
154+
</check>
155+
<check level="error" class="org.scalastyle.scalariform.NotImplementedErrorUsage" enabled="true"></check>
145156
</scalastyle>

sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,10 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors {
7878
Literal(java.sql.Date.valueOf("2014-09-23")) ::
7979
Literal(Decimal(BigDecimal(123.123))) ::
8080
Literal(new java.sql.Timestamp(123123)) ::
81-
Literal(Array[Byte](1,2,3)) ::
82-
Literal.create(Seq[Int](1,2,3), ArrayType(IntegerType)) ::
83-
Literal.create(Map[Int, Int](1->2, 2->1), MapType(IntegerType, IntegerType)) ::
84-
Literal.create(Row(1,2.0d,3.0f),
81+
Literal(Array[Byte](1, 2, 3)) ::
82+
Literal.create(Seq[Int](1, 2, 3), ArrayType(IntegerType)) ::
83+
Literal.create(Map[Int, Int](1 -> 2, 2 -> 1), MapType(IntegerType, IntegerType)) ::
84+
Literal.create(Row(1, 2.0d, 3.0f),
8585
StructType(StructField("c1", IntegerType) ::
8686
StructField("c2", DoubleType) ::
8787
StructField("c3", FloatType) :: Nil)) ::
@@ -111,8 +111,8 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors {
111111
case DecimalType() => PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector
112112
case StructType(fields) =>
113113
ObjectInspectorFactory.getStandardStructObjectInspector(
114-
java.util.Arrays.asList(fields.map(f => f.name) :_*),
115-
java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) :_*))
114+
java.util.Arrays.asList(fields.map(f => f.name) : _*),
115+
java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) : _*))
116116
}
117117

118118
def checkDataType(dt1: Seq[DataType], dt2: Seq[DataType]): Unit = {

sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
160160
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil ,
161161
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=4"::Nil
162162
)
163-
assert(listFolders(tmpDir,List()).sortBy(_.toString()) == expected.sortBy(_.toString))
163+
assert(listFolders(tmpDir, List()).sortBy(_.toString()) == expected.sortBy(_.toString))
164164
sql("DROP TABLE table_with_partition")
165165
sql("DROP TABLE tmp_table")
166166
}

sql/hive/src/test/scala/org/apache/spark/sql/hive/ListTablesSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class ListTablesSuite extends QueryTest with BeforeAndAfterAll {
2929
import org.apache.spark.sql.hive.test.TestHive.implicits._
3030

3131
val df =
32-
sparkContext.parallelize((1 to 10).map(i => (i,s"str$i"))).toDF("key", "value")
32+
sparkContext.parallelize((1 to 10).map(i => (i, s"str$i"))).toDF("key", "value")
3333

3434
override def beforeAll(): Unit = {
3535
// The catalog in HiveContext is a case insensitive one.

0 commit comments

Comments
 (0)