Qt+FFmpeg录屏录音

2023-05-16

欢迎加QQ群309798848交流C/C++/linux/Qt/音视频/OpenCV

源码:Qt+FFmpeg录屏录音

NanaRecorder

之前的录屏项目ScreenCapture存在音视频同步问题,所以重写了第二个版本:NanaRecorder。
录制桌面和系统声音(扬声器)。

录制流程

主线程:UI线程,调用Recorder接口
采集线程:采集到帧后->格式转换/重采样->写进FIFO
编码线程:循环从FIFO读取帧->编码->写进文件

环境依赖

- VS2019
- Qt5.12.9 
- FFmpeg4.1

UI


以下是第一版代码:ScreenCapture

  • 录屏功能支持:开始,暂停,结束。
  • 使用Qt+C++封装FFmpeg API,没有使用废弃的FFmpeg API。
  • 主线程:Qt GUI线程,以后可接入录屏UI。
  • MuxThreadProc:复用线程,启动音视频采集线程。打开输入/输出流,然后从fifoBuffer读取帧,编码生成各种格式视频。
  • ScreenRecordThreadProc:视频采集线程,从输入流采集帧,缩放后写入fifoBuffer。
  • SoundRecordThreadProc:音频采集线程,从输入流采集样本,重采样后写入fifoBuffer。

ScreenRecordImpl.h

#pragma once
#include <Windows.h>
#include <atomic>
#include <QObject>
#include <QString>
#include <QMutex>
#include <condition_variable>

#ifdef	__cplusplus
extern "C"
{
#endif
struct AVFormatContext;
struct AVCodecContext;
struct AVCodec;
struct AVFifoBuffer;
struct AVAudioFifo;
struct AVFrame;
struct SwsContext;
struct SwrContext;
#ifdef __cplusplus
};
#endif

class ScreenRecordImpl : public QObject
{
	Q_OBJECT
private:
	enum RecordState {
		NotStarted,
		Started,
		Paused,
		Stopped,
		Unknown,
	};
public:
	ScreenRecordImpl(QObject * parent = Q_NULLPTR);
	void Init(const QVariantMap& map);

	private slots :
	void Start();
	void Pause();
	void Stop();

private:
	//从fifobuf读取音视频帧,写入输出流,复用,生成文件
	void MuxThreadProc();
	//从视频输入流读取帧,写入fifobuf
	void ScreenRecordThreadProc();
	//从音频输入流读取帧,写入fifobuf
	void SoundRecordThreadProc();
	int OpenVideo();
	int OpenAudio();
	int OpenOutput();
	QString GetSpeakerDeviceName();
	//获取麦克风设备名称
	QString GetMicrophoneDeviceName();
	AVFrame* AllocAudioFrame(AVCodecContext* c, int nbSamples);
	void InitVideoBuffer();
	void InitAudioBuffer();
	void FlushVideoDecoder();
	void FlushAudioDecoder();
	//void FlushVideoEncoder();
	//void FlushAudioEncoder();
	void FlushEncoders();
	void Release();

private:
	QString				m_filePath;
	int					m_width;
	int					m_height;
	int					m_fps;
	int					m_audioBitrate;

	int m_vIndex;		//输入视频流索引
	int m_aIndex;		//输入音频流索引
	int m_vOutIndex;	//输出视频流索引
	int m_aOutIndex;	//输出音频流索引
	AVFormatContext		*m_vFmtCtx;
	AVFormatContext		*m_aFmtCtx;
	AVFormatContext		*m_oFmtCtx;
	AVCodecContext		*m_vDecodeCtx;
	AVCodecContext		*m_aDecodeCtx;
	AVCodecContext		*m_vEncodeCtx;
	AVCodecContext		*m_aEncodeCtx;
	SwsContext			*m_swsCtx;
	SwrContext			*m_swrCtx;
	AVFifoBuffer		*m_vFifoBuf;
	AVAudioFifo			*m_aFifoBuf;

	AVFrame				*m_vOutFrame;
	uint8_t				*m_vOutFrameBuf;
	int					m_vOutFrameSize;

	int					m_nbSamples;
	RecordState			m_state;
	std::condition_variable m_cvNotPause;	//当点击暂停的时候,两个采集线程挂起
	std::mutex				m_mtxPause;
	std::condition_variable m_cvVBufNotFull;
	std::condition_variable m_cvVBufNotEmpty;
	std::mutex				m_mtxVBuf;
	std::condition_variable m_cvABufNotFull;
	std::condition_variable m_cvABufNotEmpty;
	std::mutex				m_mtxABuf;
	int64_t					m_vCurPts;
	int64_t					m_aCurPts;
};

ScreenRecordImpl.cpp

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
#include <libavutil\avassert.h>
#ifdef __cplusplus
};
#endif

#include "ScreenRecordImpl.h"
#include <QDebug>
#include <QAudioDeviceInfo>
#include <thread>
#include <fstream>

#include <dshow.h>

using namespace std;

int g_vCollectFrameCnt = 0;	//视频采集帧数
int g_vEncodeFrameCnt = 0;	//视频编码帧数
int g_aCollectFrameCnt = 0;	//音频采集帧数
int g_aEncodeFrameCnt = 0;	//音频编码帧数

ScreenRecordImpl::ScreenRecordImpl(QObject * parent) :
	QObject(parent)
	, m_fps(30)
	, m_vIndex(-1), m_aIndex(-1)
	, m_vFmtCtx(nullptr), m_aFmtCtx(nullptr), m_oFmtCtx(nullptr)
	, m_vDecodeCtx(nullptr), m_aDecodeCtx(nullptr)
	, m_vEncodeCtx(nullptr), m_aEncodeCtx(nullptr)
	, m_vFifoBuf(nullptr), m_aFifoBuf(nullptr)
	, m_swsCtx(nullptr)
	, m_swrCtx(nullptr)
	, m_state(RecordState::NotStarted)
	, m_vCurPts(0), m_aCurPts(0)
{
}

void ScreenRecordImpl::Init(const QVariantMap& map)
{
	m_filePath = map["filePath"].toString();
	m_width = map["width"].toInt();
	m_height = map["height"].toInt();
	m_fps = map["fps"].toInt();
	m_audioBitrate = map["audioBitrate"].toInt();
}

void ScreenRecordImpl::Start()
{
	if (m_state == RecordState::NotStarted)
	{
		qDebug() << "start record";
		m_state = RecordState::Started;
		std::thread muxThread(&ScreenRecordImpl::MuxThreadProc, this);
		muxThread.detach();
	}
	else if (m_state == RecordState::Paused)
	{
		qDebug() << "continue record";
		m_state = RecordState::Started;
		m_cvNotPause.notify_one();
	}
}

void ScreenRecordImpl::Pause()
{
	qDebug() << "pause record";
	m_state = RecordState::Paused;
}

void ScreenRecordImpl::Stop()
{
	qDebug() << "stop record";
	RecordState state = m_state;
	m_state = RecordState::Stopped;
	if (state == RecordState::Paused)
		m_cvNotPause.notify_one();
}

int ScreenRecordImpl::OpenVideo()
{
	int ret = -1;
	AVInputFormat *ifmt = av_find_input_format("gdigrab");
	AVDictionary *options = nullptr;
	AVCodec *decoder = nullptr;
	av_dict_set(&options, "framerate", QString::number(m_fps).toStdString().c_str(), NULL);

	if (avformat_open_input(&m_vFmtCtx, "desktop", ifmt, &options) != 0)
	{
		qDebug() << "Cant not open video input stream";
		return -1;
	}
	if (avformat_find_stream_info(m_vFmtCtx, nullptr) < 0)
	{
		printf("Couldn't find stream information.(无法获取视频流信息)\n");
		return -1;
	}
	for (int i = 0; i < m_vFmtCtx->nb_streams; ++i)
	{
		AVStream *stream = m_vFmtCtx->streams[i];
		if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			decoder = avcodec_find_decoder(stream->codecpar->codec_id);
			if (decoder == nullptr)
			{
				printf("Codec not found.(没有找到解码器)\n");
				return -1;
			}
			//从视频流中拷贝参数到codecCtx
			m_vDecodeCtx = avcodec_alloc_context3(decoder);
			if ((ret = avcodec_parameters_to_context(m_vDecodeCtx, stream->codecpar)) < 0)
			{
				qDebug() << "Video avcodec_parameters_to_context failed,error code: " << ret;
				return -1;
			}
			m_vIndex = i;
			break;
		}
	}
	if (avcodec_open2(m_vDecodeCtx, decoder, nullptr) < 0)
	{
		printf("Could not open codec.(无法打开解码器)\n");
		return -1;
	}

	m_swsCtx = sws_getContext(m_vDecodeCtx->width, m_vDecodeCtx->height, m_vDecodeCtx->pix_fmt,
		m_width, m_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
	return 0;
}

static char *dup_wchar_to_utf8(wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}

static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
	const enum AVSampleFormat *p = codec->sample_fmts;

	while (*p != AV_SAMPLE_FMT_NONE) {
		if (*p == sample_fmt)
			return 1;
		p++;
	}
	return 0;
}

int ScreenRecordImpl::OpenAudio()
{
	int ret = -1;
	AVCodec *decoder = nullptr;
	qDebug() << GetMicrophoneDeviceName();

	AVInputFormat *ifmt = av_find_input_format("dshow");
	QString audioDeviceName = "audio=" + GetMicrophoneDeviceName();

	if (avformat_open_input(&m_aFmtCtx, audioDeviceName.toStdString().c_str(), ifmt, nullptr) < 0)
	{
		qDebug() << "Can not open audio input stream";
		return -1;
	}
	if (avformat_find_stream_info(m_aFmtCtx, nullptr) < 0)
		return -1;

	for (int i = 0; i < m_aFmtCtx->nb_streams; ++i)
	{
		AVStream * stream = m_aFmtCtx->streams[i];
		if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			decoder = avcodec_find_decoder(stream->codecpar->codec_id);
			if (decoder == nullptr)
			{
				printf("Codec not found.(没有找到解码器)\n");
				return -1;
			}
			//从视频流中拷贝参数到codecCtx
			m_aDecodeCtx = avcodec_alloc_context3(decoder);
			if ((ret = avcodec_parameters_to_context(m_aDecodeCtx, stream->codecpar)) < 0)
			{
				qDebug() << "Audio avcodec_parameters_to_context failed,error code: " << ret;
				return -1;
			}
			m_aIndex = i;
			break;
		}
	}
	if (0 > avcodec_open2(m_aDecodeCtx, decoder, NULL))
	{
		printf("can not find or open audio decoder!\n");
		return -1;
	}
	return 0;
}

int ScreenRecordImpl::OpenOutput()
{
	int ret = -1;
	AVStream *vStream = nullptr, *aStream = nullptr;
	const char *outFileName = "test.mp4";
	ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, nullptr, outFileName);
	if (ret < 0)
	{
		qDebug() << "avformat_alloc_output_context2 failed";
		return -1;
	}

	if (m_vFmtCtx->streams[m_vIndex]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
	{
		vStream = avformat_new_stream(m_oFmtCtx, nullptr);
		if (!vStream)
		{
			printf("can not new stream for output!\n");
			return -1;
		}
		//AVFormatContext第一个创建的流索引是0,第二个创建的流索引是1
		m_vOutIndex = vStream->index;
		vStream->time_base = AVRational{ 1, m_fps };

		m_vEncodeCtx = avcodec_alloc_context3(NULL);
		if (nullptr == m_vEncodeCtx)
		{
			qDebug() << "avcodec_alloc_context3 failed";
			return -1;
		}
		m_vEncodeCtx->width = m_width;
		m_vEncodeCtx->height = m_height;
		m_vEncodeCtx->codec_type = AVMEDIA_TYPE_VIDEO;
		m_vEncodeCtx->time_base.num = 1;
		m_vEncodeCtx->time_base.den = m_fps;
		m_vEncodeCtx->pix_fmt = AV_PIX_FMT_YUV420P;
		m_vEncodeCtx->codec_id = AV_CODEC_ID_H264;
		m_vEncodeCtx->bit_rate = 800 * 1000;
		m_vEncodeCtx->rc_max_rate = 800 * 1000;
		m_vEncodeCtx->rc_buffer_size = 500 * 1000;
		//设置图像组层的大小, gop_size越大,文件越小 
		m_vEncodeCtx->gop_size = 30;
		m_vEncodeCtx->max_b_frames = 3;
		 //设置h264中相关的参数,不设置avcodec_open2会失败
		m_vEncodeCtx->qmin = 10;	//2
		m_vEncodeCtx->qmax = 31;	//31
		m_vEncodeCtx->max_qdiff = 4;
		m_vEncodeCtx->me_range = 16;	//0	
		m_vEncodeCtx->max_qdiff = 4;	//3	
		m_vEncodeCtx->qcompress = 0.6;	//0.5

		//查找视频编码器
		AVCodec *encoder;
		encoder = avcodec_find_encoder(m_vEncodeCtx->codec_id);
		if (!encoder)
		{
			qDebug() << "Can not find the encoder, id: " << m_vEncodeCtx->codec_id;
			return -1;
		}
		m_vEncodeCtx->codec_tag = 0;
		//正确设置sps/pps
		m_vEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
		//打开视频编码器
		ret = avcodec_open2(m_vEncodeCtx, encoder, nullptr);
		if (ret < 0)
		{
			qDebug() << "Can not open encoder id: " << encoder->id << "error code: " << ret;
			return -1;
		}
		//将codecCtx中的参数传给输出流
		ret = avcodec_parameters_from_context(vStream->codecpar, m_vEncodeCtx);
		if (ret < 0)
		{
			qDebug() << "Output avcodec_parameters_from_context,error code:" << ret;
			return -1;
		}
	}
	if (m_aFmtCtx->streams[m_aIndex]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
	{
		aStream = avformat_new_stream(m_oFmtCtx, NULL);
		if (!aStream)
		{
			printf("can not new audio stream for output!\n");
			return -1;
		}
		m_aOutIndex = aStream->index;

		AVCodec *encoder = avcodec_find_encoder(m_oFmtCtx->oformat->audio_codec);
		if (!encoder)
		{
			qDebug() << "Can not find audio encoder, id: " << m_oFmtCtx->oformat->audio_codec;
			return -1;
		}
		m_aEncodeCtx = avcodec_alloc_context3(encoder);
		if (nullptr == m_vEncodeCtx)
		{
			qDebug() << "audio avcodec_alloc_context3 failed";
			return -1;
		}
		m_aEncodeCtx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
		m_aEncodeCtx->bit_rate = m_audioBitrate;
		m_aEncodeCtx->sample_rate = 44100;
		if (encoder->supported_samplerates) 
		{
			m_aEncodeCtx->sample_rate = encoder->supported_samplerates[0];
			for (int i = 0; encoder->supported_samplerates[i]; ++i)
			{
				if (encoder->supported_samplerates[i] == 44100)
					m_aEncodeCtx->sample_rate = 44100;
			}
		}
		m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
		m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
		if (encoder->channel_layouts) 
		{
			m_aEncodeCtx->channel_layout = encoder->channel_layouts[0];
			for (int i = 0; encoder->channel_layouts[i]; ++i) 
			{
				if (encoder->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
					m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
			}
		}
		m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
		aStream->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };

		m_aEncodeCtx->codec_tag = 0;
		m_aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

		if (!check_sample_fmt(encoder, m_aEncodeCtx->sample_fmt)) 
		{
			qDebug() << "Encoder does not support sample format " << av_get_sample_fmt_name(m_aEncodeCtx->sample_fmt);
			return -1;
		}

		//打开音频编码器,打开后frame_size被设置
		ret = avcodec_open2(m_aEncodeCtx, encoder, 0);
		if (ret < 0)
		{
			qDebug() << "Can not open the audio encoder, id: " << encoder->id << "error code: " << ret;
			return -1;
		}
		//将codecCtx中的参数传给音频输出流
		ret = avcodec_parameters_from_context(aStream->codecpar, m_aEncodeCtx);
		if (ret < 0)
		{
			qDebug() << "Output audio avcodec_parameters_from_context,error code:" << ret;
			return -1;
		}

		m_swrCtx = swr_alloc();
		if (!m_swrCtx)
		{
			qDebug() << "swr_alloc failed";
			return -1;
		}
		av_opt_set_int(m_swrCtx, "in_channel_count", m_aDecodeCtx->channels, 0);	//2
		av_opt_set_int(m_swrCtx, "in_sample_rate", m_aDecodeCtx->sample_rate, 0);	//44100
		av_opt_set_sample_fmt(m_swrCtx, "in_sample_fmt", m_aDecodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_S16
		av_opt_set_int(m_swrCtx, "out_channel_count", m_aEncodeCtx->channels, 0);	//2
		av_opt_set_int(m_swrCtx, "out_sample_rate", m_aEncodeCtx->sample_rate, 0);	//44100
		av_opt_set_sample_fmt(m_swrCtx, "out_sample_fmt", m_aEncodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_FLTP

		if ((ret = swr_init(m_swrCtx)) < 0) 
		{
			qDebug() << "swr_init failed";
			return -1;
		}
	}

	//打开输出文件
	if (!(m_oFmtCtx->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&m_oFmtCtx->pb, outFileName, AVIO_FLAG_WRITE) < 0)
		{
			printf("can not open output file handle!\n");
			return -1;
		}
	}
	//写文件头
	if (avformat_write_header(m_oFmtCtx, nullptr) < 0)
	{
		printf("can not write the header of the output file!\n");
		return -1;
	}
	return 0;
}

QString ScreenRecordImpl::GetSpeakerDeviceName()
{
	char sName[256] = { 0 };
	QString speaker = "";
	bool bRet = false;
	::CoInitialize(NULL);

	ICreateDevEnum* pCreateDevEnum;//enumrate all speaker devices
	HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,
		NULL,
		CLSCTX_INPROC_SERVER,
		IID_ICreateDevEnum,
		(void**)&pCreateDevEnum);

	IEnumMoniker* pEm;
	hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioRendererCategory, &pEm, 0);
	if (hr != NOERROR)
	{
		::CoUninitialize();
		return "";
	}

	pEm->Reset();
	ULONG cFetched;
	IMoniker *pM;
	while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK)
	{

		IPropertyBag* pBag = NULL;
		hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
		if (SUCCEEDED(hr))
		{
			VARIANT var;
			var.vt = VT_BSTR;
			hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等
			if (hr == NOERROR)
			{
				//获取设备名称
				WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);
				speaker = QString::fromLocal8Bit(sName);
				SysFreeString(var.bstrVal);
			}
			pBag->Release();
		}
		pM->Release();
		bRet = true;
	}
	pCreateDevEnum = NULL;
	pEm = NULL;
	::CoUninitialize();
	return speaker;
}

QString ScreenRecordImpl::GetMicrophoneDeviceName()
{
	char sName[256] = { 0 };
	QString capture = "";
	bool bRet = false;
	::CoInitialize(NULL);

	ICreateDevEnum* pCreateDevEnum;//enumrate all audio capture devices
	HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,
		NULL,
		CLSCTX_INPROC_SERVER,
		IID_ICreateDevEnum,
		(void**)&pCreateDevEnum);

	IEnumMoniker* pEm;
	hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioInputDeviceCategory, &pEm, 0);
	if (hr != NOERROR)
	{
		::CoUninitialize();
		return "";
	}

	pEm->Reset();
	ULONG cFetched;
	IMoniker *pM;
	while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK)
	{

		IPropertyBag* pBag = NULL;
		hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
		if (SUCCEEDED(hr))
		{
			VARIANT var;
			var.vt = VT_BSTR;
			hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等
			if (hr == NOERROR)
			{
				//获取设备名称
				WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);
				capture = QString::fromLocal8Bit(sName);
				SysFreeString(var.bstrVal);
			}
			pBag->Release();
		}
		pM->Release();
		bRet = true;
	}
	pCreateDevEnum = NULL;
	pEm = NULL;
	::CoUninitialize();
	return capture;
}

AVFrame* ScreenRecordImpl::AllocAudioFrame(AVCodecContext* c, int nbSamples)
{
	AVFrame *frame = av_frame_alloc();
	int ret;

	frame->format = c->sample_fmt;
	frame->channel_layout = c->channel_layout ? c->channel_layout: AV_CH_LAYOUT_STEREO;
	frame->sample_rate = c->sample_rate;
	frame->nb_samples = nbSamples;

	if (nbSamples)
	{
		ret = av_frame_get_buffer(frame, 0);
		if (ret < 0) 
		{
			qDebug() << "av_frame_get_buffer failed";
			return nullptr;
		}
	}
	return frame;
}

void ScreenRecordImpl::InitVideoBuffer()
{
	m_vOutFrameSize = av_image_get_buffer_size(m_vEncodeCtx->pix_fmt, m_width, m_height, 1);
	m_vOutFrameBuf = (uint8_t *)av_malloc(m_vOutFrameSize);
	m_vOutFrame = av_frame_alloc();
	//先让AVFrame指针指向buf,后面再写入数据到buf
	av_image_fill_arrays(m_vOutFrame->data, m_vOutFrame->linesize, m_vOutFrameBuf, m_vEncodeCtx->pix_fmt, m_width, m_height, 1);
	//申请30帧缓存
	if (!(m_vFifoBuf = av_fifo_alloc_array(30, m_vOutFrameSize)))
	{
		qDebug() << "av_fifo_alloc_array failed";
		return;
	}
}

void ScreenRecordImpl::InitAudioBuffer()
{
	m_nbSamples = m_aEncodeCtx->frame_size;
	if (!m_nbSamples)
	{
		qDebug() << "m_nbSamples==0";
		m_nbSamples = 1024;
	}
	m_aFifoBuf = av_audio_fifo_alloc(m_aEncodeCtx->sample_fmt, m_aEncodeCtx->channels, 30 * m_nbSamples);
	if (!m_aFifoBuf)
	{
		qDebug() << "av_audio_fifo_alloc failed";
		return;
	}
}

void ScreenRecordImpl::FlushVideoDecoder()
{
	int ret = -1;
	int y_size = m_width * m_height;
	AVFrame	*oldFrame = av_frame_alloc();
	AVFrame *newFrame = av_frame_alloc();

	ret = avcodec_send_packet(m_vDecodeCtx, nullptr);
	if (ret != 0)
	{
		qDebug() << "flush video avcodec_send_packet failed, ret: " << ret;
		return;
	}
	while (ret >= 0)
	{
		ret = avcodec_receive_frame(m_vDecodeCtx, oldFrame);
		if (ret < 0)
		{
			if (ret == AVERROR(EAGAIN))
			{
				qDebug() << "flush EAGAIN avcodec_receive_frame";
				ret = 1;
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				qDebug() << "flush video decoder finished";
				break;
			}
			qDebug() << "flush video avcodec_receive_frame error, ret: " << ret;
			return;
		}
		++g_vCollectFrameCnt;
		sws_scale(m_swsCtx, (const uint8_t* const*)oldFrame->data, oldFrame->linesize, 0,
			m_vEncodeCtx->height, newFrame->data, newFrame->linesize);

		{
			unique_lock<mutex> lk(m_mtxVBuf);
			m_cvVBufNotFull.wait(lk, [this] { return av_fifo_space(m_vFifoBuf) >= m_vOutFrameSize; });
		}
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[0], y_size, NULL);
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[1], y_size / 4, NULL);
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[2], y_size / 4, NULL);
		m_cvVBufNotEmpty.notify_one();
	}
	qDebug() << "video collect frame count: " << g_vCollectFrameCnt;
}

//void ScreenRecordImpl::FlushVideoEncoder()
//{
//	int ret = -1;
//	AVPacket pkt = { 0 };
//	av_init_packet(&pkt);
//	ret = avcodec_send_frame(m_vEncodeCtx, nullptr);
//	qDebug() << "avcodec_send_frame ret:" << ret;
//	while (ret >= 0)
//	{
//		ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);
//		if (ret < 0)
//		{
//			av_packet_unref(&pkt);
//			if (ret == AVERROR(EAGAIN))
//			{
//				qDebug() << "flush EAGAIN avcodec_receive_packet";
//				ret = 1;
//				continue;
//			}
//			else if (ret == AVERROR_EOF)
//			{
//				qDebug() << "flush video encoder finished";
//				break;
//			}
//			qDebug() << "flush video avcodec_receive_packet failed, ret: " << ret;
//			return;
//		}
//		pkt.stream_index = m_vOutIndex;
//		av_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);
//
//		ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
//		if (ret == 0)
//			qDebug() << "flush Write video packet id: " << ++g_vEncodeFrameCnt;
//		else
//			qDebug() << "video av_interleaved_write_frame failed, ret:" << ret;
//		av_free_packet(&pkt);
//	}
//}

void ScreenRecordImpl::FlushAudioDecoder()
{
	int ret = -1;
	AVPacket pkt = { 0 };
	av_init_packet(&pkt);
	int dstNbSamples, maxDstNbSamples;
	AVFrame *rawFrame = av_frame_alloc();
	AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, m_nbSamples);
	maxDstNbSamples = dstNbSamples = av_rescale_rnd(m_nbSamples,
		m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);

	ret = avcodec_send_packet(m_aDecodeCtx, nullptr);
	if (ret != 0)
	{
		qDebug() << "flush audio avcodec_send_packet  failed, ret: " << ret;
		return;
	}
	while (ret >= 0)
	{
		ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);
		if (ret < 0)
		{
			if (ret == AVERROR(EAGAIN))
			{
				qDebug() << "flush audio EAGAIN avcodec_receive_frame";
				ret = 1;
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				qDebug() << "flush audio decoder finished";
				break;
			}
			qDebug() << "flush audio avcodec_receive_frame error, ret: " << ret;
			return;
		}
		++g_aCollectFrameCnt;

		dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
			m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
		if (dstNbSamples > maxDstNbSamples)
		{
			qDebug() << "flush audio newFrame realloc";
			av_freep(&newFrame->data[0]);
			ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,
				dstNbSamples, m_aEncodeCtx->sample_fmt, 1);
			if (ret < 0)
			{
				qDebug() << "flush av_samples_alloc failed";
				return;
			}
			maxDstNbSamples = dstNbSamples;
			m_aEncodeCtx->frame_size = dstNbSamples;
			m_nbSamples = newFrame->nb_samples;
		}
		newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,
			(const uint8_t **)rawFrame->data, rawFrame->nb_samples);
		if (newFrame->nb_samples < 0)
		{
			qDebug() << "flush swr_convert failed";
			return;
		}

		{
			unique_lock<mutex> lk(m_mtxABuf);
			m_cvABufNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });
		}
		if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples)
		{
			qDebug() << "av_audio_fifo_write";
			return;
		}
		m_cvABufNotEmpty.notify_one();
	}
	qDebug() << "audio collect frame count: " << g_aCollectFrameCnt;
}

//void ScreenRecordImpl::FlushAudioEncoder()
//{
//}

void ScreenRecordImpl::FlushEncoders()
{
	int ret = -1;
	bool vBeginFlush = false;
	bool aBeginFlush = false;

	m_vCurPts = m_aCurPts = 0;

	int nFlush = 2;

	while (1)
	{
		AVPacket pkt = { 0 };
		av_init_packet(&pkt);
		if (av_compare_ts(m_vCurPts, m_oFmtCtx->streams[m_vOutIndex]->time_base,
			m_aCurPts, m_oFmtCtx->streams[m_aOutIndex]->time_base) <= 0)
		{
			if (!vBeginFlush)
			{
				vBeginFlush = true;
				ret = avcodec_send_frame(m_vEncodeCtx, nullptr);
				if (ret != 0)
				{
					qDebug() << "flush video avcodec_send_frame failed, ret: " << ret;
					return;
				}
			}
			ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);
			if (ret < 0)
			{
				av_packet_unref(&pkt);
				if (ret == AVERROR(EAGAIN))
				{
					qDebug() << "flush video EAGAIN avcodec_receive_packet";
					ret = 1;
					continue;
				}
				else if (ret == AVERROR_EOF)
				{
					qDebug() << "flush video encoder finished";
					//break;
					if (!(--nFlush))
						break;
					m_vCurPts = INT_MAX;
					continue;
				}
				qDebug() << "flush video avcodec_receive_packet failed, ret: " << ret;
				return;
			}
			pkt.stream_index = m_vOutIndex;
			//将pts从编码层的timebase转成复用层的timebase
			av_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);
			m_vCurPts = pkt.pts;
			qDebug() << "m_vCurPts: " << m_vCurPts;

			ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
			if (ret == 0)
				qDebug() << "flush Write video packet id: " << ++g_vEncodeFrameCnt;
			else
				qDebug() << "flush video av_interleaved_write_frame failed, ret:" << ret;
			av_free_packet(&pkt);
		}
		else
		{
			if (!aBeginFlush)
			{
				aBeginFlush = true;
				ret = avcodec_send_frame(m_aEncodeCtx, nullptr);
				if (ret != 0)
				{
					qDebug() << "flush audio avcodec_send_frame failed, ret: " << ret;
					return;
				}
			}
			ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
			if (ret < 0)
			{
				av_packet_unref(&pkt);
				if (ret == AVERROR(EAGAIN))
				{
					qDebug() << "flush EAGAIN avcodec_receive_packet";
					ret = 1;
					continue;
				}
				else if (ret == AVERROR_EOF)
				{
					qDebug() << "flush audio encoder finished";
					/*break;*/
					if (!(--nFlush))
						break;
					m_aCurPts = INT_MAX;
					continue;
				}
				qDebug() << "flush audio avcodec_receive_packet failed, ret: " << ret;
				return;
			}
			pkt.stream_index = m_aOutIndex;
			//将pts从编码层的timebase转成复用层的timebase
			av_packet_rescale_ts(&pkt, m_aEncodeCtx->time_base, m_oFmtCtx->streams[m_aOutIndex]->time_base);
			m_aCurPts = pkt.pts;
			qDebug() << "m_aCurPts: " << m_aCurPts;
			ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
			if (ret == 0)
				qDebug() << "flush write audio packet id: " << ++g_aEncodeFrameCnt;
			else
				qDebug() << "flush audio av_interleaved_write_frame failed, ret: " << ret;
			av_free_packet(&pkt);
		}
	}
}

void ScreenRecordImpl::Release()
{
	if (m_vOutFrame)
	{
		av_frame_free(&m_vOutFrame);
		m_vOutFrame = nullptr;
	}
	if (m_vOutFrameBuf)
	{
		av_free(m_vOutFrameBuf);
		m_vOutFrameBuf = nullptr;
	}
	if (m_oFmtCtx)
	{
		avio_close(m_oFmtCtx->pb);
		avformat_free_context(m_oFmtCtx);
		m_oFmtCtx = nullptr;
	}
	//if (m_vDecodeCtx)
	//{
    //  // FIXME: 为什么这里会崩溃
	//	avcodec_free_context(&m_vDecodeCtx);
	//	m_vDecodeCtx = nullptr;
	//}
	if (m_aDecodeCtx)
	{
		avcodec_free_context(&m_aDecodeCtx);
		m_aDecodeCtx = nullptr;
	}
	if (m_vEncodeCtx)
	{
		avcodec_free_context(&m_vEncodeCtx);
		m_vEncodeCtx = nullptr;
	}
	if (m_aEncodeCtx)
	{
		avcodec_free_context(&m_aEncodeCtx);
		m_aEncodeCtx = nullptr;
	}
	if (m_vFifoBuf)
	{
		av_fifo_freep(&m_vFifoBuf);
		m_vFifoBuf = nullptr;
	}
	if (m_aFifoBuf)
	{
		av_audio_fifo_free(m_aFifoBuf);
		m_aFifoBuf = nullptr;
	}
	if (m_vFmtCtx)
	{
		avformat_close_input(&m_vFmtCtx);
		m_vFmtCtx = nullptr;
	}
	if (m_aFmtCtx)
	{
		avformat_close_input(&m_aFmtCtx);
		m_aFmtCtx = nullptr;
	}
}

void ScreenRecordImpl::MuxThreadProc()
{
	int ret = -1;
	bool done = false;
	int vFrameIndex = 0, aFrameIndex = 0;

	av_register_all();
	avdevice_register_all();
	avcodec_register_all();

	if (OpenVideo() < 0)
		return;
	if (OpenAudio() < 0)
		return;
	if (OpenOutput() < 0)
		return;

	InitVideoBuffer();
	InitAudioBuffer();

	//启动音视频数据采集线程
	std::thread screenRecord(&ScreenRecordImpl::ScreenRecordThreadProc, this);
	std::thread soundRecord(&ScreenRecordImpl::SoundRecordThreadProc, this);
	screenRecord.detach();
	soundRecord.detach();

	while (1)
	{
		if (m_state == RecordState::Stopped && !done)
			done = true;
		if (done)
		{
			unique_lock<mutex> vBufLock(m_mtxVBuf, std::defer_lock);
			unique_lock<mutex> aBufLock(m_mtxABuf, std::defer_lock);
			std::lock(vBufLock, aBufLock);
			if (av_fifo_size(m_vFifoBuf) < m_vOutFrameSize &&
				av_audio_fifo_size(m_aFifoBuf) < m_nbSamples)
			{
				qDebug() << "both video and audio fifo buf are empty, break";
				break;
			}
		}
		if (av_compare_ts(m_vCurPts, m_oFmtCtx->streams[m_vOutIndex]->time_base,
			m_aCurPts, m_oFmtCtx->streams[m_aOutIndex]->time_base) <= 0)
	/*	if (av_compare_ts(vCurPts, m_vEncodeCtx->time_base,
			aCurPts, m_aEncodeCtx->time_base) <= 0)*/
		{
			if (done)
			{
				lock_guard<mutex> lk(m_mtxVBuf);
				if (av_fifo_size(m_vFifoBuf) < m_vOutFrameSize)
				{
					qDebug() << "video wirte done";
					//break;
					//m_vCurPts = 0x7ffffffffffffffe;	//int64_t最大有符号整数
					m_vCurPts = INT_MAX;
					continue;
				}
			}
			else 
			{
				unique_lock<mutex> lk(m_mtxVBuf);
				m_cvVBufNotEmpty.wait(lk, [this] { return av_fifo_size(m_vFifoBuf) >= m_vOutFrameSize; });
			}
			av_fifo_generic_read(m_vFifoBuf, m_vOutFrameBuf, m_vOutFrameSize, NULL);
			m_cvVBufNotFull.notify_one();

			//设置视频帧参数
			//m_vOutFrame->pts = vFrameIndex * ((m_oFmtCtx->streams[m_vOutIndex]->time_base.den / m_oFmtCtx->streams[m_vOutIndex]->time_base.num) / m_fps);
			m_vOutFrame->pts = vFrameIndex++;
			m_vOutFrame->format = m_vEncodeCtx->pix_fmt;
			m_vOutFrame->width = m_vEncodeCtx->width;
			m_vOutFrame->height = m_vEncodeCtx->height;

			AVPacket pkt = { 0 };
			av_init_packet(&pkt);
			ret = avcodec_send_frame(m_vEncodeCtx, m_vOutFrame);
			if (ret != 0)
			{
				qDebug() << "video avcodec_send_frame failed, ret: " << ret;
				av_packet_unref(&pkt);
				continue;
			}
			ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);
			if (ret != 0)
			{
				qDebug() << "video avcodec_receive_packet failed, ret: " << ret;
				av_packet_unref(&pkt);
				continue;
			}
			pkt.stream_index = m_vOutIndex;
			//将pts从编码层的timebase转成复用层的timebase
			av_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);

			m_vCurPts = pkt.pts;
			qDebug() << "m_vCurPts: " << m_vCurPts;

			ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
			if (ret == 0)
				qDebug() << "Write video packet id: " << ++g_vEncodeFrameCnt;
			else
				qDebug() << "video av_interleaved_write_frame failed, ret:" << ret;
			av_free_packet(&pkt);
		}
		else
		{
			if (done)
			{
				lock_guard<mutex> lk(m_mtxABuf);
				if (av_audio_fifo_size(m_aFifoBuf) < m_nbSamples)
				{
					qDebug() << "audio write done";
					//m_aCurPts = 0x7fffffffffffffff;
					m_aCurPts = INT_MAX;
					continue;
				}
			}
			else
			{
				unique_lock<mutex> lk(m_mtxABuf);
				m_cvABufNotEmpty.wait(lk, [this] { return av_audio_fifo_size(m_aFifoBuf) >= m_nbSamples; });
			}

			int ret = -1;
			AVFrame *aFrame = av_frame_alloc();
			aFrame->nb_samples = m_nbSamples;
			aFrame->channel_layout = m_aEncodeCtx->channel_layout;
			aFrame->format = m_aEncodeCtx->sample_fmt;
			aFrame->sample_rate = m_aEncodeCtx->sample_rate;
			aFrame->pts = m_nbSamples * aFrameIndex++;
			//分配data buf
			ret = av_frame_get_buffer(aFrame, 0);
			av_audio_fifo_read(m_aFifoBuf, (void **)aFrame->data, m_nbSamples);
			m_cvABufNotFull.notify_one();

			AVPacket pkt = { 0 };
			av_init_packet(&pkt);
			ret = avcodec_send_frame(m_aEncodeCtx, aFrame);
			if (ret != 0)
			{
				qDebug() << "audio avcodec_send_frame failed, ret: " << ret;
				av_frame_free(&aFrame);
				av_packet_unref(&pkt);
				continue;
			}
			ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
			if (ret != 0)
			{
				qDebug() << "audio avcodec_receive_packet failed, ret: " << ret;
				av_frame_free(&aFrame);
				av_packet_unref(&pkt);
				continue;
			}
			pkt.stream_index = m_aOutIndex;

			av_packet_rescale_ts(&pkt, m_aEncodeCtx->time_base, m_oFmtCtx->streams[m_aOutIndex]->time_base);

			m_aCurPts = pkt.pts;
			qDebug() << "aCurPts: " << m_aCurPts;

			ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
			if (ret == 0)
				qDebug() << "Write audio packet id: " << ++g_aEncodeFrameCnt;
			else
				qDebug() << "audio av_interleaved_write_frame failed, ret: " << ret;

			av_frame_free(&aFrame);
			av_free_packet(&pkt);
		}
	}
	FlushEncoders();
	av_write_trailer(m_oFmtCtx);
	Release();
	qDebug() << "parent thread exit";
}

void ScreenRecordImpl::ScreenRecordThreadProc()
{
	int ret = -1;
	AVPacket pkt = { 0 };
	av_init_packet(&pkt);
	int y_size = m_width * m_height;
	AVFrame	*oldFrame = av_frame_alloc();
	AVFrame *newFrame = av_frame_alloc();

	int newFrameBufSize = av_image_get_buffer_size(m_vEncodeCtx->pix_fmt, m_width, m_height, 1);
	uint8_t *newFrameBuf = (uint8_t*)av_malloc(newFrameBufSize);
	av_image_fill_arrays(newFrame->data, newFrame->linesize, newFrameBuf,
		m_vEncodeCtx->pix_fmt, m_width, m_height, 1);

	while (m_state != RecordState::Stopped)
	{
		if (m_state == RecordState::Paused)
		{
			unique_lock<mutex> lk(m_mtxPause);
			m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });
		}
		if (av_read_frame(m_vFmtCtx, &pkt) < 0)
		{
			qDebug() << "video av_read_frame < 0";
			continue;
		}
		if (pkt.stream_index != m_vIndex)
		{
			qDebug() << "not a video packet from video input";
			av_packet_unref(&pkt);
		}
		ret = avcodec_send_packet(m_vDecodeCtx, &pkt);
		if (ret != 0)
		{
			qDebug() << "video avcodec_send_packet failed, ret:" << ret;
			av_packet_unref(&pkt);
			continue;
		}
		ret = avcodec_receive_frame(m_vDecodeCtx, oldFrame);
		if (ret != 0)
		{
			qDebug() << "video avcodec_receive_frame failed, ret:" << ret;
			av_packet_unref(&pkt);
			continue;
		}
		++g_vCollectFrameCnt;
		sws_scale(m_swsCtx, (const uint8_t* const*)oldFrame->data, oldFrame->linesize, 0,
			m_vEncodeCtx->height, newFrame->data, newFrame->linesize);

		{
			unique_lock<mutex> lk(m_mtxVBuf);
			m_cvVBufNotFull.wait(lk, [this] { return av_fifo_space(m_vFifoBuf) >= m_vOutFrameSize; });
		}
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[0], y_size, NULL);
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[1], y_size / 4, NULL);
		av_fifo_generic_write(m_vFifoBuf, newFrame->data[2], y_size / 4, NULL);
		m_cvVBufNotEmpty.notify_one();

		av_packet_unref(&pkt);
	}
	FlushVideoDecoder();

	av_free(newFrameBuf);
	av_frame_free(&oldFrame);
	av_frame_free(&newFrame);
	qDebug() << "screen record thread exit";
}

void ScreenRecordImpl::SoundRecordThreadProc()
{
	int ret = -1;
	AVPacket pkt = { 0 };
	av_init_packet(&pkt);
	int nbSamples = m_nbSamples;
	int dstNbSamples, maxDstNbSamples;
	AVFrame *rawFrame = av_frame_alloc();
	AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, nbSamples);

	maxDstNbSamples = dstNbSamples = av_rescale_rnd(nbSamples, 
		m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);

	while (m_state != RecordState::Stopped)
	{
		if (m_state == RecordState::Paused)
		{
			unique_lock<mutex> lk(m_mtxPause);
			m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });
		}
		if (av_read_frame(m_aFmtCtx, &pkt) < 0)
		{
			qDebug() << "audio av_read_frame < 0";
			continue;
		}
		if (pkt.stream_index != m_aIndex)
		{
			qDebug() << "not a audio packet";
			av_packet_unref(&pkt);
			continue;
		}
		ret = avcodec_send_packet(m_aDecodeCtx, &pkt);
		if (ret != 0)
		{
			qDebug() << "audio avcodec_send_packet failed, ret: " << ret;
			av_packet_unref(&pkt);
			continue;
		}
		ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);
		if (ret != 0)
		{
			qDebug() << "audio avcodec_receive_frame failed, ret: " << ret;
			av_packet_unref(&pkt);
			continue;
		}
		++g_aCollectFrameCnt;

		dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
			m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
		if (dstNbSamples > maxDstNbSamples) 
		{
			qDebug() << "audio newFrame realloc";
			av_freep(&newFrame->data[0]);
			//nb_samples*nb_channels*Bytes_sample_fmt
			ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,
				dstNbSamples, m_aEncodeCtx->sample_fmt, 1);
			if (ret < 0)
			{
				qDebug() << "av_samples_alloc failed";
				return;
			}

			maxDstNbSamples = dstNbSamples;
			m_aEncodeCtx->frame_size = dstNbSamples;
			m_nbSamples = newFrame->nb_samples;	//1024
			/*
			 * m_nbSamples = dstNbSamples;		//22050
			 * 如果改为m_nbSamples = dstNbSamples;则av_audio_fifo_write会异常,不明白为什么?
			 * 我觉得应该改为22050,不然编码线程一次编码的帧sample太少了,
			 * 但是用1024生成的音频好像没问题?
			 * 音频是否应该根据采集的nb_samples而重新分配fifo?
			*/
		}

		newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,
			(const uint8_t **)rawFrame->data, rawFrame->nb_samples);
		if (newFrame->nb_samples < 0)
		{
			qDebug() << "swr_convert error";
			return;
		}
		{
			unique_lock<mutex> lk(m_mtxABuf);
			m_cvABufNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });
		}
		if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples)
		{
			qDebug() << "av_audio_fifo_write";
			return;
		}
		m_cvABufNotEmpty.notify_one();
	}
	FlushAudioDecoder();
	av_frame_free(&rawFrame);
	av_frame_free(&newFrame);
	qDebug() << "sound record thread exit";
}

ScreenRecordTest.h

#pragma once
#include <QObject>
#include <QVariant>

class ScreenRecord : public QObject
{
	Q_OBJECT
public:
	ScreenRecord(QObject *parent = Q_NULLPTR);

private:
	QVariantMap m_args;
};

ScreenRecordTest.cpp

#include "ScreenRecordTest.h"
#include "ScreenRecordImpl.h"
#include <QTimer>

ScreenRecord::ScreenRecord(QObject *parent) :
	QObject(parent)
{
	ScreenRecordImpl *sr = new ScreenRecordImpl(this);
	QVariantMap args;
	args["filePath"] = "test.mp4";
	//args["width"] = 1920;
	//args["height"] = 1080;
	args["width"] = 1440;
	args["height"] = 900;
	args["fps"] = 30;
	args["audioBitrate"] = 128000;

	sr->Init(args);

	QTimer::singleShot(1000, sr, SLOT(Start()));
	//QTimer::singleShot(5000, sr, SLOT(Pause()));
	QTimer::singleShot(11000, sr, SLOT(Stop()));
}
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

Qt+FFmpeg录屏录音 的相关文章

  • SPOOLING技术

    SPOOLING技术 xff08 Simultaneous Peripheral Operating On Line 同时联机外围操作技术 xff0c 它是关于慢速字符设备 如何与计算机主机进行数据交换 的一种技术 xff0c 通常又称 假
  • Belady现象

    Belady现象 采用FIFO算法时 xff0c 如果对 个进程未分配它所要求的全部页面 xff0c 有时就会出现分配的页面数增多但缺页率反而提高的异常现象 Belady现象的描述 xff1a 一个进程P要访问M个页 OS分配N N lt
  • 计算结构体的字节数

    结构体中的成员可以是不同的数据类型 xff0c 成员按照定义时的顺序依次存储在连续的内存空间 和数组不一样的是 xff0c 结构体的大小不是所有成员大小简单的相加 xff0c 需要考虑到系统在存储结构体变量时的地址对齐问题 看下面这样的一个
  • 轻松搞定面试中的二叉树题目

    版权所有 xff0c 转载请注明出处 xff0c 谢谢 xff01 http blog csdn net walkinginthewind article details 7518888 树是一种比较重要的数据结构 xff0c 尤其是二叉树
  • 使用Anaconda Navigator无法成功创建虚拟环境问题的解决方案

    1 问题描述 使用anaconda Navigator创建虚拟环境时 xff0c 配置初始名称以及python版本 xff0c Fetching各种包成功后 xff0c 开始loading各种包的过程中闪过cmd黑色窗口 xff0c 然后左
  • QT 后台处理时间过长 主界面卡死解决办法

    之前用WPF开发 xff0c 处理逻辑就是1 xff0c 处理前显示等待窗口 xff0c 2 同步处理改未异步 xff0c 3 处理完毕后关闭等待窗口 Qt应该也是类似的处理逻辑 xff1a 一 创建等待处理窗口 xff08 采用了QMoi
  • 一圈n个人,1-3循环报数,报道3的退出,最后剩下的是几号

    import java util ArrayList import java util List import java util Scanner public class CirCle public static void main St
  • GCD【洛谷P2568】(小左的GCD)

    题目描述 给定整数N xff0c 求1 lt 61 x y lt 61 N且Gcd x y 为素数的数对 x y 有多少对 输入格式 一个整数N 输出格式 答案 输入输出样例 输入 1 复制 4 输出 1 复制 4 说明 提示 对于样例 2
  • C++中的weak_ptr深入解析

    引言 在C 43 43 的智能指针家族中 xff0c weak ptr是一种非常实用且独特的成员 它主要用于解决循环引用问题 xff0c 从而避免内存泄漏 在本文中 xff0c 我们将详细讨论weak ptr的基本概念 功能和应用场景 xf
  • 【Redis】解决WARNING overcommit_memory is set to 0 Background save may fail under low memory condition.

    问题说明 不管是linux直装 xff0c 还是在docker环境中 xff0c 启动redis时 xff0c 报如下错误 WARNING overcommit memory is set to 0 Background save may
  • 运动跟踪算法CMT(续)之层次凝聚聚类算法(HAC)

    熟悉CMT的都知道 xff0c 作者在聚类部分使用了层次凝聚聚类算法 xff08 Hierarchical Agglomerative Clustering xff09 并且使用的是单链 xff08 Single link xff09 xf
  • 使用mysql8.x版本设置远程连接

    主要步骤 xff0c 注意 xff1a 自mysql8 x版本 xff0c 密码的加密方式改为caching sha2 password 登录mysql账号修改root用户登录地址修改root用户密码加密方式 usr local mysql
  • jenkins基础配置之四:读取本地文件

    需要安装的插件 Extended Choice Parameter Plug span class token operator span In span class token operator span External Monitor
  • 初识Btrfs文件系统

    Btrfs 也有一个重要的缺点 xff0c 当 BTree 中某个节点出现错误时 xff0c 文件系统将失去该节点之下的所有的文件信息 而 ext2 3 却避免了这种被称为 错误扩散 的问题 Btrfs相关介绍 xff1a Btrfs 是一
  • 服务器使用笔记本网络连接外网

    由于服务器经常部署在机房 xff0c 并没有外网 xff0c 连不上外网 需要使用自己笔记本的网络供服务器使用 笔记本连接手机热点 xff0c 再分享给服务器 一 首先 xff0c 需要把服务器和笔记本连接到同一网络内 xff0c 可以选择
  • grafana接入openldap认证

    首先两个文件开启ldap的支持 文件1 xff1a etc grafana grafana ini auth ldap enabled 61 true config file 61 etc grafana ldap toml allow s
  • Wireshark的常见提示

    概述 本文主要介绍Wireshark中出现的一些常见提示 详细信息 Wireshark简介 Gerald Combs是堪萨斯城密苏里大学计算机科学专业的毕业生 1998年发布了第一版Ethereal工具 xff0c Ethereal工具使用
  • shell报错bad substitution 解决办法

    bin bash a 61 34 hello 34 b 61 34 hi is a 34 echo b echo a echo a echo a 1 2 执行脚本方式不同出现的结果不同 xff1a 方式1 xff1a sh shell sh
  • centos8软件安装dnf命令

    DNF是新一代的rpm软件包管理器 它首先出现在 Fedora 18 这个发行版中 而目前 xff0c 它取代了yum xff0c 正式成为从 Fedora 22 起 Fedora 版本的包管理器 DNF包管理器克服了YUM包管理器的一些瓶
  • 多目标规则在 Makefile 中的应用与示例

    在 Makefile 中 xff0c 如果一个规则有多个目标 xff0c 而且它们之间用空格分隔 xff0c 我们称之为 34 多目标规则 34 这意味着这个规则适用于列出的所有目标 在目标下面的命令是 C 64 xff0c 它通常与 ma

随机推荐

  • 计算机中内存、cache和寄存器之间的关系及区别

    1 寄存器是中央处理器内的组成部份 寄存器是有限存贮容量的高速存贮部件 xff0c 它们可用来暂存指令 数据和位址 在中央处理器的控制部件中 xff0c 包含的寄存器有指令寄存器 IR 和程序计数器 PC 在中央处理器的算术及逻辑部件中 x
  • dell 台式电脑设置每天定时开机和关机

    每天定时开机设置 xff1a 戴尔电脑通过CMOS设置实现自动开机的设置过程如下 xff1a 1 首先进入 CMOS SETUP 程序 大多数主板是在计算机启动时按DEL或F2键进入 xff1b 2 然后将光条移到 Power Manage
  • windows批处理自动获取电脑配置信息

    39 2 gt nul 3 gt nul amp cls amp 64 echo off 39 amp rem 获取本机系统及硬件配置信息 39 amp set 61 Any question amp set 64 61 WX amp se
  • Centos7搭建cisco ocserv

    一 安装的部分直接yum安装即可 yum y install ocserv 二 配置文件根据实际情况调整 auth方式有两种 1 系统账号认证 配置的话就是 xff1a auth 61 34 pam 34 2 本地文件认证 配置的话就是 x
  • 私有harbor部署(docker方式)

    环境准备 docker compose v Docker Compose version v2 14 2 wget https github com docker compose releases download v2 14 2 dock
  • ORACLE扩展表空间

    一 查询表空间使用情况 SELECT UPPER F TABLESPACE NAME 34 表空间名 34 D TOT GROOTTE MB 34 表空间大小 M 34 D TOT GROOTTE MB F TOTAL BYTES 34 已
  • Oracle 常用性能监控SQL语句

    1 查看表锁 SELECT FROM SYS V SQLAREA WHERE DISK READS gt 100 2 监控事例的等待 SELECT EVENT SUM DECODE WAIT TIME 0 0 1 34 Prev 34 SU
  • Nginx出现“ 413 (499 502 404) Request Entity Too Large”错误解决方法

    1 Nginx413错误的排查 修改上传文件大小限制 在使用上传POST一段数据时 xff0c 被提示413 Request Entity Too Large xff0c 应该是nginx限制了上传数据的大小 解决方法就是 打开nginx主
  • 查看弹出广告来自哪个软件

    打开VS的Spy 43 43 将指针移到广告处 xff0c 然后点OK xff0c 在Process标签页可以看到进程id和线程id将获得的16进制进程id xff08 例如 xff1a 000025F8 xff09 通过计算器转成10进制
  • C++多态虚函数实现原理,对象和虚函数表的内存布局

    基本概念 我们知道C 43 43 动态多态是用虚函数实现的 xff0c 而虚函数的实现方式虽说C 43 43 标准没有要求 xff0c 但是基本都是用虚函数表实现的 xff08 编译器决定 xff09 所以我们有必要了解一下虚函数表的实现原
  • C++ STL中递归锁与普通锁的区别

    在多线程编程中 xff0c 保护共享资源的访问很重要 xff0c 为了实现这个目标 xff0c C 43 43 标准库 xff08 STL xff09 中提供了多种锁 xff0c 如std mutex和std recursive mutex
  • VS+Qt开发环境

    VS Qt下载 VS下载 xff1a https visualstudio microsoft com zh hans vs Qt下载安装 xff1a https www bilibili com video BV1gx4y1M7cM VS
  • windows下使用ShiftMediaProject编译调试FFmpeg

    为什么要编译FFmpeg xff1f 定制模块调试源码 windows下编译 推荐项目ShiftMediaProject FFmpeg 平时总是看到一些人说windows下编译FFmpeg很麻烦 xff0c 这时候我就都是微微一笑 xff0
  • RTSP分析

    RTSP使用TCP来发送控制命令 xff08 OPTIONS DESCRIBE SETUP PLAY xff09 xff0c 因为TCP提供可靠有序的数据传输 xff0c 而且TCP还提供错误检测和纠正 RTSP的报文格式可以参考HTTP的
  • RTP分析

    参考 RTP xff08 A Transport Protocol for Real Time Applications 实时传输协议 xff0c rfc3550 xff09 RTP Payload Format for H 264 Vid
  • VS链接器工具错误 LNK2019:无法解析的外部符号

    常见的问题 以下是一些导致 LNK2019 的常见问题 xff1a 未链接的对象文件或包含符号定义的库 在 Visual Studio 中 xff0c 验证包含定义源代码文件是生成 xff0c 分别链接为项目的一部分 在命令行中 xff0c
  • FFmpeg合并视频流与音频流

    mux h ifndef MUX H define MUX H ifdef cplusplus extern 34 C 34 endif include 34 common h 34 include 34 encode h 34 typed
  • 解决电脑同时使用有线网上内网,无线网上外网的冲突

    由于内网有网络限制 xff08 限制娱乐等 xff09 xff0c 所以肯定要用外网 xff08 无线网卡 xff09 但是有的网站只能用内网访问 xff0c 比如gitlab xff0c oa等 我电脑刚开始连接了wifi后上不了gitl
  • Python斗鱼直播间自动发弹幕脚本

    工具 xff1a Python xff0c Chrome浏览器 因为不会用短信验证码登录 xff0c 所以使用QQ帐号登录 xff0c 必须要斗鱼帐号绑定QQ号 难点主要是帧的切换 查找元素可以通过chrome浏览器鼠标指向该元素 xff0
  • Qt+FFmpeg录屏录音

    欢迎加QQ群309798848交流C C 43 43 linux Qt 音视频 OpenCV 源码 xff1a Qt 43 FFmpeg录屏录音 NanaRecorder 之前的录屏项目ScreenCapture存在音视频同步问题 xff0