Skip to content

Commit a985492

Browse files
committed
Move Converter examples to own package
1 parent 365d0be commit a985492

File tree

6 files changed

+43
-34
lines changed

6 files changed

+43
-34
lines changed

examples/src/main/python/cassandra_inputformat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,8 @@
7171
"org.apache.cassandra.hadoop.cql3.CqlPagingInputFormat",
7272
"java.util.Map",
7373
"java.util.Map",
74-
keyConverter="org.apache.spark.examples.CassandraCQLKeyConverter",
75-
valueConverter="org.apache.spark.examples.CassandraCQLValueConverter",
74+
keyConverter="org.apache.spark.examples.pythonconverters.CassandraCQLKeyConverter",
75+
valueConverter="org.apache.spark.examples.pythonconverters.CassandraCQLValueConverter",
7676
conf=conf)
7777
output = cass_rdd.collect()
7878
for (k, v) in output:

examples/src/main/python/hbase_inputformat.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,12 @@
6161
sc = SparkContext(appName="HBaseInputFormat")
6262

6363
conf = {"hbase.zookeeper.quorum": host, "hbase.mapreduce.inputtable": table}
64-
hbase_rdd = sc.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
65-
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
66-
"org.apache.hadoop.hbase.client.Result",
67-
valueConverter="org.apache.spark.examples.HBaseConverter",
68-
conf=conf)
64+
hbase_rdd = sc.newAPIHadoopRDD(
65+
"org.apache.hadoop.hbase.mapreduce.TableInputFormat",
66+
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
67+
"org.apache.hadoop.hbase.client.Result",
68+
valueConverter="org.apache.spark.examples.pythonconverters.HBaseConverter",
69+
conf=conf)
6970
output = hbase_rdd.collect()
7071
for (k, v) in output:
7172
print (k, v)

examples/src/main/scala/org/apache/spark/examples/CassandraCQLTest.scala

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,23 +32,7 @@ import org.apache.hadoop.mapreduce.Job
3232

3333
import org.apache.spark.{SparkConf, SparkContext}
3434
import org.apache.spark.SparkContext._
35-
import org.apache.spark.api.python.Converter
3635

37-
class CassandraCQLKeyConverter extends Converter {
38-
import collection.JavaConversions._
39-
override def convert(obj: Any) = {
40-
val result = obj.asInstanceOf[java.util.Map[String, ByteBuffer]]
41-
mapAsJavaMap(result.mapValues(bb => ByteBufferUtil.toInt(bb)))
42-
}
43-
}
44-
45-
class CassandraCQLValueConverter extends Converter {
46-
import collection.JavaConversions._
47-
override def convert(obj: Any) = {
48-
val result = obj.asInstanceOf[java.util.Map[String, ByteBuffer]]
49-
mapAsJavaMap(result.mapValues(bb => ByteBufferUtil.string(bb)))
50-
}
51-
}
5236

5337
/*
5438
Need to create following keyspace and column family in cassandra before running this example

examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,12 @@
1717

1818
package org.apache.spark.examples
1919

20-
import org.apache.hadoop.hbase.client.{Result, HBaseAdmin}
20+
import org.apache.hadoop.hbase.client.HBaseAdmin
2121
import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
2222
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
2323

2424
import org.apache.spark._
25-
import org.apache.spark.rdd.NewHadoopRDD
26-
import org.apache.spark.api.python.Converter
27-
import org.apache.hadoop.hbase.util.Bytes
28-
29-
class HBaseConverter extends Converter {
30-
override def convert(obj: Any) = {
31-
val result = obj.asInstanceOf[Result]
32-
Bytes.toStringBinary(result.value())
33-
}
34-
}
25+
3526

3627
object HBaseTest {
3728
def main(args: Array[String]) {
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
package org.apache.spark.examples.pythonconverters
2+
3+
import org.apache.spark.api.python.Converter
4+
import java.nio.ByteBuffer
5+
import org.apache.cassandra.utils.ByteBufferUtil
6+
import collection.JavaConversions.{mapAsJavaMap, mapAsScalaMap}
7+
8+
9+
class CassandraCQLKeyConverter extends Converter {
10+
override def convert(obj: Any) = {
11+
val result = obj.asInstanceOf[java.util.Map[String, ByteBuffer]]
12+
mapAsJavaMap(result.mapValues(bb => ByteBufferUtil.toInt(bb)))
13+
}
14+
}
15+
16+
class CassandraCQLValueConverter extends Converter {
17+
override def convert(obj: Any) = {
18+
val result = obj.asInstanceOf[java.util.Map[String, ByteBuffer]]
19+
mapAsJavaMap(result.mapValues(bb => ByteBufferUtil.string(bb)))
20+
}
21+
}
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
package org.apache.spark.examples.pythonconverters
2+
3+
import org.apache.spark.api.python.Converter
4+
import org.apache.hadoop.hbase.client.Result
5+
import org.apache.hadoop.hbase.util.Bytes
6+
7+
class HBaseConverter extends Converter {
8+
override def convert(obj: Any) = {
9+
val result = obj.asInstanceOf[Result]
10+
Bytes.toStringBinary(result.value())
11+
}
12+
}

0 commit comments

Comments
 (0)