1818package org .apache .spark .sql .kafka010
1919
2020import java .{util => ju }
21- import java .util .concurrent .{ConcurrentLinkedQueue , Executors , TimeUnit }
21+ import java .util .concurrent .{Executors , TimeUnit }
2222
2323import scala .collection .mutable
2424import scala .util .Random
2525
26+ import org .apache .kafka .clients .producer .ProducerRecord
2627import org .apache .kafka .common .serialization .ByteArraySerializer
2728
2829import org .apache .spark .{SparkConf , SparkException }
@@ -141,13 +142,12 @@ class CachedKafkaProducerStressSuite extends KafkaContinuousTest with KafkaTest
141142 (1 to numConcurrentProducers).map {
142143 i => kafkaParamsUniqueMap.put(i, kafkaParams.updated(" retries" , s " $i" ).asJava)
143144 }
144- val toBeReleasedQueue = new ConcurrentLinkedQueue [CachedKafkaProducer ]()
145145
146- def acquire (i : Int ): Unit = {
146+ def acquire (i : Int ): CachedKafkaProducer = {
147147 val producer = CachedKafkaProducer .acquire(kafkaParamsUniqueMap(i))
148148 producer.kafkaProducer // materialize producer for the first time.
149149 assert(! producer.isClosed, " Acquired producer cannot be closed." )
150- toBeReleasedQueue.add( producer)
150+ producer
151151 }
152152
153153 def release (producer : CachedKafkaProducer ): Unit = {
@@ -158,28 +158,23 @@ class CachedKafkaProducerStressSuite extends KafkaContinuousTest with KafkaTest
158158 }
159159 }
160160 }
161+ val data = (1 to 100 ).map(_.toString)
162+
161163 val threadPool = Executors .newFixedThreadPool(numThreads)
162164 try {
163165 val futuresAcquire = (1 to 10 * numConcurrentProducers).map { i =>
164166 threadPool.submit(new Runnable {
165167 override def run (): Unit = {
166- acquire(i % numConcurrentProducers + 1 )
168+ val producer = acquire(i % numConcurrentProducers + 1 )
169+ data.foreach { d =>
170+ val record = new ProducerRecord [Array [Byte ], Array [Byte ]](topic, 0 , null , d.getBytes)
171+ producer.kafkaProducer.send(record)
172+ }
173+ release(producer)
167174 }
168175 })
169176 }
170- val futuresRelease = (1 to 10 * numConcurrentProducers).map { i =>
171- val cachedKafkaProducer = toBeReleasedQueue.poll()
172- // 2x release should not corrupt the state of cache.
173- (1 to 2 ).map { j =>
174- threadPool.submit(new Runnable {
175- override def run (): Unit = {
176- release(cachedKafkaProducer)
177- }
178- })
179- }
180- }
181177 futuresAcquire.foreach(_.get(1 , TimeUnit .MINUTES ))
182- futuresRelease.flatten.foreach(_.get(1 , TimeUnit .MINUTES ))
183178 } finally {
184179 threadPool.shutdown()
185180 CachedKafkaProducer .clear()
0 commit comments