Skip to main content
Stream transcription results as audio is being processed, providing real-time feedback for voice interfaces.

Streaming Transcription

val audioData = // ... recorded audio bytes

RunAnywhere.transcribeStream(
    audioData = audioData,
    options = STTOptions(language = "en"),
    onPartialResult = { partial ->
        // Update UI with partial transcription
        transcriptionTextView.text = partial.transcript
    }
)

Real-Time Microphone Transcription

class LiveTranscriptionViewModel : ViewModel() {
    private val _partialText = MutableStateFlow("")
    val partialText: StateFlow<String> = _partialText

    private val _finalText = MutableStateFlow("")
    val finalText: StateFlow<String> = _finalText

    fun processAudioSamples(samples: FloatArray) {
        viewModelScope.launch {
            // Process streaming audio samples
            RunAnywhere.processStreamingAudio(samples)
        }
    }

    fun stopTranscription() {
        viewModelScope.launch {
            RunAnywhere.stopStreamingTranscription()
        }
    }
}

Stream Audio Samples

For continuous microphone input:
// Start streaming transcription
suspend fun startLiveTranscription(
    audioSamplesFlow: Flow<FloatArray>,
    onPartialResult: (String) -> Unit
) {
    audioSamplesFlow.collect { samples ->
        RunAnywhere.processStreamingAudio(samples)
        // Handle partial results via event system
    }
}

Subscribe to STT Events

Use the event system for streaming updates:
lifecycleScope.launch {
    RunAnywhere.events.sttEvents.collect { event ->
        when (event.eventType) {
            STTEventType.PARTIAL_RESULT -> {
                event.transcript?.let { partial ->
                    partialTextView.text = partial
                }
            }
            STTEventType.TRANSCRIPTION_COMPLETED -> {
                event.transcript?.let { final ->
                    finalTextView.text = final
                }
            }
            STTEventType.TRANSCRIPTION_FAILED -> {
                showError(event.error ?: "Transcription failed")
            }
        }
    }
}
class VoiceSearchActivity : AppCompatActivity() {
    private lateinit var audioRecorder: AudioRecorder

    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)

        // Load STT model
        lifecycleScope.launch {
            RunAnywhere.loadSTTModel("whisper-tiny")
        }

        // Subscribe to transcription events
        lifecycleScope.launch {
            RunAnywhere.events.sttEvents.collect { event ->
                if (event.eventType == STTEventType.PARTIAL_RESULT) {
                    searchInput.setText(event.transcript)
                }
            }
        }

        micButton.setOnClickListener {
            if (isRecording) {
                stopRecording()
            } else {
                startRecording()
            }
        }
    }

    private fun startRecording() {
        audioRecorder.start { samples ->
            lifecycleScope.launch {
                RunAnywhere.processStreamingAudio(samples)
            }
        }
    }

    private fun stopRecording() {
        audioRecorder.stop()
        lifecycleScope.launch {
            RunAnywhere.stopStreamingTranscription()
        }
    }
}

Performance Tips

For optimal streaming performance: - Process audio in chunks of 30-100ms - Use 16kHz sample rate for Whisper models - Consider using VAD to detect speech segments
Use CaseRecommended Approach
Pre-recorded audiotranscribe()
Live microphoneprocessStreamingAudio()
Real-time dictationStreaming + VAD