Audio through QAudioOutput
-
'm using FFmpeg and Qt to decode audio in a separate thread and play it using QAudioOutput. I’ve implemented a custom QIODevice subclass (AudioBufferDevice) to feed audio data to QAudioOutput.
Although the decoding thread is actively sending audio data via enqueueData(), the readData() function in AudioBufferDevice is never called, and no audio is played.
class AudioBufferDevice : public QIODevice { Q_OBJECT private: QByteArray buffer; mutable QMutex mutex; public: explicit AudioBufferDevice(QObject* parent = nullptr) : QIODevice(parent) {} void openForRead() { QIODevice::open(QIODevice::ReadOnly); } void enqueueData(const QByteArray& data) { QMutexLocker locker(&mutex); buffer.append(data); } qint64 readData(char* data, qint64 maxlen) override { QMutexLocker locker(&mutex); qint64 bytesToRead = qMin(maxlen, qint64(buffer.size())); if (bytesToRead <= 0) return 0; memcpy(data, buffer.constData(), bytesToRead); buffer.remove(0, bytesToRead); qDebug() << "Read" << bytesToRead << "bytes from buffer"; return bytesToRead; } qint64 writeData(const char*, qint64) override { return -1; } qint64 bytesAvailable() const override { QMutexLocker locker(&mutex); return buffer.size() + QIODevice::bytesAvailable(); } }; class AudioDecoder : public QThread { Q_OBJECT public: AudioDecoder(AVFormatContext* fmtCtx, AVCodecContext* codecCtx, int streamIdx, QObject* parent = nullptr) : QThread(parent), fmtCtx(fmtCtx), codecCtx(codecCtx), streamIndex(streamIdx) { } void run() override { SwrContext* swr_ctx = swr_alloc(); AVChannelLayout in_ch_layout = codecCtx->ch_layout; AVChannelLayout out_ch_layout; av_channel_layout_default(&out_ch_layout, 2); swr_alloc_set_opts2(&swr_ctx, &out_ch_layout, AV_SAMPLE_FMT_S16, 44100, &in_ch_layout, codecCtx->sample_fmt, codecCtx->sample_rate, 0, nullptr); swr_init(swr_ctx); // QAudio setup QAudioFormat format; format.setSampleRate(44100); format.setChannelCount(2); format.setSampleSize(16); format.setCodec("audio/pcm"); format.setByteOrder(QAudioFormat::LittleEndian); format.setSampleType(QAudioFormat::SignedInt); QAudioDeviceInfo device = QAudioDeviceInfo::defaultOutputDevice(); if (!device.isFormatSupported(format)) format = device.nearestFormat(format); QAudioOutput* audioOutput = new QAudioOutput(device, format); AudioBufferDevice* bufferDevice = new AudioBufferDevice(); bufferDevice->openForRead(); // Pre-fill buffer with silence to avoid starvation QByteArray silence(44100 * 2 * 2 / 10, 0); // 100ms of silence bufferDevice->enqueueData(silence); connect(audioOutput, &QAudioOutput::stateChanged, [](QAudio::State state) { qDebug() << "Audio output state changed:" << state; }); audioOutput->start(bufferDevice); while (!isInterruptionRequested()) { AVPacket* pkt = av_packet_alloc(); AVFrame* frame = av_frame_alloc(); if (av_read_frame(fmtCtx, pkt) >= 0) { if (pkt->stream_index == streamIndex) { if (avcodec_send_packet(codecCtx, pkt) == 0) { while (avcodec_receive_frame(codecCtx, frame) == 0) { int out_channels = 2; int out_sample_rate = 44100; AVSampleFormat out_format = AV_SAMPLE_FMT_S16; int max_dst_nb_samples = av_rescale_rnd( swr_get_delay(swr_ctx, codecCtx->sample_rate) + frame->nb_samples, out_sample_rate, codecCtx->sample_rate, AV_ROUND_UP); uint8_t* out_buffer = nullptr; int out_line_size = 0; av_samples_alloc(&out_buffer, &out_line_size, out_channels, max_dst_nb_samples, out_format, 0); int convertedSamples = swr_convert( swr_ctx, &out_buffer, max_dst_nb_samples, (const uint8_t**)frame->data, frame->nb_samples); int dataSize = av_samples_get_buffer_size( nullptr, out_channels, convertedSamples, out_format, 1); qDebug() << "Wriring Audio Samples +++++++++++++++++++++++++++++ " << dataSize; QByteArray audioBytes(reinterpret_cast<const char*>(out_buffer), dataSize); bufferDevice->enqueueData(audioBytes); // QThread::msleep(20); av_freep(&out_buffer); } } } } av_packet_free(&pkt); av_frame_free(&frame); } swr_free(&swr_ctx); } private: AVFormatContext* fmtCtx; AVCodecContext* codecCtx; int streamIndex; };
void StartAudioDecoding()
{
AudioDecoder* audioThread = new AudioDecoder(audioFmtCtx, inAudioCtx, audioStreamIndex);
audioThread->start();
} -
@SGaist said in Audio through QAudioOutput:
Hi,
How exactly are you using that class of yours ?
Which version of Qt ?@SGaist Hi, thanks for the reply!
I’m using Qt version 5.14
I'm decoding audio in a separate thread using FFmpeg. Once I decode and resample the audio frames to raw PCM (16-bit, 2 channels, 44100 Hz), I pass the data to my custom AudioBufferDevice class using enqueueData().
Here's how I use the class:
I create the QAudioFormat and QAudioOutput like this:
QAudioFormat format; format.setSampleRate(44100); format.setChannelCount(2); format.setSampleSize(16); format.setCodec("audio/pcm"); format.setSampleType(QAudioFormat::SignedInt); format.setByteOrder(QAudioFormat::LittleEndian); audioOutput = new QAudioOutput(format, this); audioBuffer = new AudioBufferDevice(this); audioBuffer->open(QIODevice::ReadOnly); audioOutput->start(audioBuffer); In my decoding thread, after decoding and resampling, I do:
audioBuffer->enqueueData(QByteArray((const char*)data, dataSize));
@summit said in Audio through QAudioOutput:
In my decoding thread, after decoding and resampling, I do:
You must not access QObjects from any other threrad than the one it is living in. From the comments you wrote (the code is not readable nor complete) it looks like you create QAudioOutput and others in the main thread and then access it in a secondary thread.
-
Hi,
How exactly are you using that class of yours ?
Which version of Qt ? -
@SGaist said in Audio through QAudioOutput:
Hi,
How exactly are you using that class of yours ?
Which version of Qt ?@SGaist Hi, thanks for the reply!
I’m using Qt version 5.14
I'm decoding audio in a separate thread using FFmpeg. Once I decode and resample the audio frames to raw PCM (16-bit, 2 channels, 44100 Hz), I pass the data to my custom AudioBufferDevice class using enqueueData().
Here's how I use the class:
I create the QAudioFormat and QAudioOutput like this:
QAudioFormat format; format.setSampleRate(44100); format.setChannelCount(2); format.setSampleSize(16); format.setCodec("audio/pcm"); format.setSampleType(QAudioFormat::SignedInt); format.setByteOrder(QAudioFormat::LittleEndian); audioOutput = new QAudioOutput(format, this); audioBuffer = new AudioBufferDevice(this); audioBuffer->open(QIODevice::ReadOnly); audioOutput->start(audioBuffer); In my decoding thread, after decoding and resampling, I do:
audioBuffer->enqueueData(QByteArray((const char*)data, dataSize));
-
@SGaist said in Audio through QAudioOutput:
Hi,
How exactly are you using that class of yours ?
Which version of Qt ?@SGaist Hi, thanks for the reply!
I’m using Qt version 5.14
I'm decoding audio in a separate thread using FFmpeg. Once I decode and resample the audio frames to raw PCM (16-bit, 2 channels, 44100 Hz), I pass the data to my custom AudioBufferDevice class using enqueueData().
Here's how I use the class:
I create the QAudioFormat and QAudioOutput like this:
QAudioFormat format; format.setSampleRate(44100); format.setChannelCount(2); format.setSampleSize(16); format.setCodec("audio/pcm"); format.setSampleType(QAudioFormat::SignedInt); format.setByteOrder(QAudioFormat::LittleEndian); audioOutput = new QAudioOutput(format, this); audioBuffer = new AudioBufferDevice(this); audioBuffer->open(QIODevice::ReadOnly); audioOutput->start(audioBuffer); In my decoding thread, after decoding and resampling, I do:
audioBuffer->enqueueData(QByteArray((const char*)data, dataSize));
@summit said in Audio through QAudioOutput:
In my decoding thread, after decoding and resampling, I do:
You must not access QObjects from any other threrad than the one it is living in. From the comments you wrote (the code is not readable nor complete) it looks like you create QAudioOutput and others in the main thread and then access it in a secondary thread.
-
-
@Christian-Ehrlicher This was the issue , Thank you very much it is solved now.