@@ -33,13 +33,15 @@ import io.ktor.client.plugins.websocket.DefaultClientWebSocketSession
3333import io.ktor.websocket.Frame
3434import io.ktor.websocket.close
3535import io.ktor.websocket.readBytes
36+ import kotlinx.coroutines.CoroutineName
3637import java.util.concurrent.ConcurrentLinkedQueue
3738import java.util.concurrent.atomic.AtomicBoolean
3839import kotlin.coroutines.CoroutineContext
3940import kotlinx.coroutines.CoroutineScope
4041import kotlinx.coroutines.Dispatchers
4142import kotlinx.coroutines.cancel
4243import kotlinx.coroutines.channels.Channel.Factory.UNLIMITED
44+ import kotlinx.coroutines.delay
4345import kotlinx.coroutines.flow.Flow
4446import kotlinx.coroutines.flow.buffer
4547import kotlinx.coroutines.flow.catch
@@ -49,7 +51,6 @@ import kotlinx.coroutines.flow.onCompletion
4951import kotlinx.coroutines.flow.onEach
5052import kotlinx.coroutines.isActive
5153import kotlinx.coroutines.launch
52- import kotlinx.coroutines.yield
5354import kotlinx.serialization.ExperimentalSerializationApi
5455import kotlinx.serialization.Serializable
5556import kotlinx.serialization.encodeToString
@@ -120,7 +121,6 @@ internal constructor(
120121 functionCallHandler : ((FunctionCallPart ) -> FunctionResponsePart )? = null,
121122 enableInterruptions : Boolean = false,
122123 ) {
123-
124124 val context = firebaseApp.applicationContext
125125 if (
126126 ContextCompat .checkSelfPermission(context, RECORD_AUDIO ) != PackageManager .PERMISSION_GRANTED
@@ -137,8 +137,8 @@ internal constructor(
137137 )
138138 return @catchAsync
139139 }
140-
141- scope = CoroutineScope (blockingDispatcher + childJob())
140+ // TODO: maybe it should be THREAD_PRIORITY_AUDIO anyways for playback and recording (not network though)
141+ scope = CoroutineScope (blockingDispatcher + childJob() + CoroutineName ( " LiveSession Scope " ) )
142142 audioHelper = AudioHelper .build()
143143
144144 recordUserAudio()
@@ -201,7 +201,7 @@ internal constructor(
201201 )
202202 }
203203 ?.let { emit(it.toPublic()) }
204- yield ( )
204+ delay( 0 )
205205 }
206206 }
207207 .onCompletion { stopAudioConversation() }
@@ -326,7 +326,10 @@ internal constructor(
326326 ?.listenToRecording()
327327 ?.buffer(UNLIMITED )
328328 ?.accumulateUntil(MIN_BUFFER_SIZE )
329- ?.onEach { sendMediaStream(listOf (MediaData (it, " audio/pcm" ))) }
329+ ?.onEach {
330+ sendMediaStream(listOf (MediaData (it, " audio/pcm" )))
331+ delay(0 )
332+ }
330333 ?.catch { throw FirebaseAIException .from(it) }
331334 ?.launchIn(scope)
332335 }
@@ -374,7 +377,7 @@ internal constructor(
374377 if (it.interrupted) {
375378 playBackQueue.clear()
376379 } else {
377- println (" Sending audio parts" )
380+ println (" Queuing audio parts from model " )
378381 val audioParts = it.content?.parts?.filterIsInstance<InlineDataPart >().orEmpty()
379382 for (part in audioParts) {
380383 playBackQueue.add(part.inlineData)
@@ -390,7 +393,7 @@ internal constructor(
390393 }
391394 }
392395 }
393- .launchIn(CoroutineScope ( Dispatchers . IO ) )
396+ .launchIn(scope )
394397 }
395398
396399 /* *
@@ -401,7 +404,7 @@ internal constructor(
401404 * Launched asynchronously on [scope].
402405 */
403406 private fun listenForModelPlayback (enableInterruptions : Boolean = false) {
404- CoroutineScope ( Dispatchers . IO ) .launch {
407+ scope .launch {
405408 while (isActive) {
406409 val playbackData = playBackQueue.poll()
407410 if (playbackData == null ) {
@@ -410,8 +413,9 @@ internal constructor(
410413 if (! enableInterruptions) {
411414 audioHelper?.resumeRecording()
412415 }
413- yield ( )
416+ delay( 0 )
414417 } else {
418+ println (" Playing audio data" )
415419 /* *
416420 * We pause the recording while the model is speaking to avoid interrupting it because of
417421 * no echo cancellation
0 commit comments