WebRTC-集成qsv硬解码实现

2023-05-16

1.Window下QSV硬解码配置

在libavcodec/codec_list.c下添加

 &ff_h264_qsv_decoder,

在ffmpeg_generate.gni下加入

     "libavcodec/h264idct.c",
     "libavcodec/h264qpel.c",
     "libavcodec/startcode.c",
     "libavcodec/h264_mp4toannexb_bsf.c",
   ]
 }
 
 ffmpeg_c_sources += [
     "libavcodec/qsvenc_h264.c",
     "libavcodec/qsvenc.c",
     "libavcodec/qsv.c",
    "libavcodec/qsvdec.c",
    "libavcodec/qsvdec_h2645.c",
  ]

libavcodec/bsf_list.c下

static const AVBitStreamFilter * const bitstream_filters[] = {
    &ff_h264_mp4toannexb_bsf,
     &ff_null_bsf,
     NULL };

修改win-msvc/x64/config.h配置

#define CONFIG_H264_QSV_DECODER 1

2.QSV硬解码实现

h264_decoder_impl_ffmpeg.cc的实现

#include "modules/video_coding/codecs/h264/h264_decoder_impl_ffmpeg.h"

#include <algorithm>
#include <limits>

extern "C" {
#include "third_party/ffmpeg/libavcodec/avcodec.h"
#include "third_party/ffmpeg/libavformat/avformat.h"
#include "third_party/ffmpeg/libavutil/imgutils.h"
#include "third_party/ffmpeg/libavutil/opt.h"
}  // extern "C"

#include "base/checks.h"
#include "base/criticalsection.h"
#include "base/keep_ref_until_done.h"
#include "base/logging.h"
#include "system_wrappers/include/metrics.h"
#include "libyuv/convert.h"

namespace webrtc {
namespace {
#define PRINT_TIME_DECODE_DELAY 0
const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
const size_t kYPlaneIndex = 0;
const size_t kUPlaneIndex = 1;
const size_t kVPlaneIndex = 2;



// Used by histograms. Values of entries should not be changed.
enum H264DecoderImplEvent {
  kH264DecoderEventInit = 0,
  kH264DecoderEventError = 1,
  kH264DecoderEventMax = 16,
};

#if defined(WEBRTC_INITIALIZE_FFMPEG)

rtc::CriticalSection ffmpeg_init_lock;
bool ffmpeg_initialized = false;

// Called by FFmpeg to do mutex operations if initialized using
// |InitializeFFmpeg|.
int LockManagerOperation(void** lock, AVLockOp op)
    EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {
  switch (op) {
    case AV_LOCK_CREATE:
      *lock = new rtc::CriticalSection();
      return 0;
    case AV_LOCK_OBTAIN:
      static_cast<rtc::CriticalSection*>(*lock)->Enter();
      return 0;
    case AV_LOCK_RELEASE:
      static_cast<rtc::CriticalSection*>(*lock)->Leave();
      return 0;
    case AV_LOCK_DESTROY:
      delete static_cast<rtc::CriticalSection*>(*lock);
      *lock = nullptr;
      return 0;
  }
  RTC_NOTREACHED() << "Unrecognized AVLockOp.";
  return -1;
}

void InitializeFFmpeg() {
  LOG_F(LS_INFO);
  rtc::CritScope cs(&ffmpeg_init_lock);
  if (!ffmpeg_initialized) {
    if (av_lockmgr_register(LockManagerOperation) < 0) {
      RTC_NOTREACHED() << "av_lockmgr_register failed.";
      return;
    }
    av_register_all();
    ffmpeg_initialized = true;
  }
}

#endif  // defined(WEBRTC_INITIALIZE_FFMPEG)

}  // namespace

int H264DecoderImplFfmpeg::AVGetBuffer2(
    AVCodecContext* context, AVFrame* av_frame, int flags) {
  // Set in |InitDecode|.
  H264DecoderImplFfmpeg* decoder = static_cast<H264DecoderImplFfmpeg*>(context->opaque);
  // DCHECK values set in |InitDecode|.
  RTC_DCHECK(decoder);
  RTC_DCHECK_EQ(context->pix_fmt, kPixelFormat);
  // Necessary capability to be allowed to provide our own buffers.
  RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);

  // |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
  // actual image's dimensions and may be different from |context->width| and
  // |context->coded_width| due to reordering.
  int width = av_frame->width;
  int height = av_frame->height;
  // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
  // has implications on which resolutions are valid, but we don't use it.
  RTC_CHECK_EQ(context->lowres, 0);
  // Adjust the |width| and |height| to values acceptable by the decoder.
  // Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
  // |height| are larger than the actual image and the image has to be cropped
  // (top-left corner) after decoding to avoid visible borders to the right and
  // bottom of the actual image.
  avcodec_align_dimensions(context, &width, &height);

  RTC_CHECK_GE(width, 0);
  RTC_CHECK_GE(height, 0);
  int ret = av_image_check_size(static_cast<unsigned int>(width),
                                static_cast<unsigned int>(height), 0, nullptr);
  if (ret < 0) {
    LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
    decoder->ReportError();
    return ret;
  }

  // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
  // of a video frame and will be set up to reference |video_frame|'s buffers.
  VideoFrame* video_frame = new VideoFrame();
  // FFmpeg expects the initial allocation to be zero-initialized according to
  // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
  video_frame->set_video_frame_buffer(
      decoder->pool_.CreateBuffer(width, height));
  // DCHECK that we have a continuous buffer as is required.
  RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
      video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
  RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
      video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
  int total_size = video_frame->allocated_size(kYPlane) +
                   video_frame->allocated_size(kUPlane) +
                   video_frame->allocated_size(kVPlane);

  av_frame->format = context->pix_fmt;
  av_frame->reordered_opaque = context->reordered_opaque;

  // Set |av_frame| members as required by FFmpeg.
  av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
  av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
  av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
  av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
  av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
  av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
  RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);

  av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
                                      total_size,
                                      AVFreeBuffer2,
                                      static_cast<void*>(video_frame),
                                      0);
  RTC_CHECK(av_frame->buf[0]);
  return 0;
}

void H264DecoderImplFfmpeg::AVFreeBuffer2(void* opaque, uint8_t* data) {
  // The buffer pool recycles the buffer used by |video_frame| when there are no
  // more references to it. |video_frame| is a thin buffer holder and is not
  // recycled.
  VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
  delete video_frame;
}

H264DecoderImplFfmpeg::H264DecoderImplFfmpeg(bool is_hw) : pool_(true),
                                     decoded_image_callback_(nullptr),
                                     has_reported_init_(false),
                                     has_reported_error_(false),
                                     clock_(Clock::GetRealTimeClock()),
                                     isFirstFrame(true),
                                     is_hw_(is_hw) {
    start_time_ = clock_->TimeInMilliseconds();
}

H264DecoderImplFfmpeg::~H264DecoderImplFfmpeg() {
  Release();
  int64_t deltaTimeSec = (clock_->TimeInMilliseconds() - start_time_)/1000;
  LOG(LS_INFO) << "discard_cnt_:" << discard_cnt_
                << ", decode_cnt_:" << decode_cnt_
                << ", idr_cnt_:" << idr_cnt_
                << ", decoded_cnt_:" << decoded_cnt_
                << ", deltaTimeSec:" << deltaTimeSec
                << ", average framte rate:" << (deltaTimeSec ? (decoded_cnt_/deltaTimeSec) : decoded_cnt_);

}

void H264DecoderImplFfmpeg::PrintDecoderSettings(const VideoCodec* codec_settings, const AVCodecContext* codec_ctx) {
  LOG(LS_INFO) << " ";
  LOG(LS_INFO) << "#############################################################";
  LOG(LS_INFO) << "#               Decoder Parameter Setting:                  #";
  LOG(LS_INFO) << "#############################################################";
  LOG(LS_INFO) << "codec name                               :" << codec_ctx->codec->name;
  LOG(LS_INFO) << "codec type                               :" << codec_ctx->codec_type;
  LOG(LS_INFO) << "codec id                                 :" << codec_ctx->codec_id;
  LOG(LS_INFO) << "codec_settings.width                     :" << codec_settings->width;
  LOG(LS_INFO) << "codec_settings.height                    :" << codec_settings->height;
  LOG(LS_INFO) << "codec_settings.startBitrate              :" << codec_settings->startBitrate;
  LOG(LS_INFO) << "codec_settings.maxBitrate                :" << codec_settings->maxBitrate;
  LOG(LS_INFO) << "codec_settings.minBitrate                :" << codec_settings->minBitrate;
  LOG(LS_INFO) << "codec_settings.targetBitrate             :" << codec_settings->targetBitrate;
  LOG(LS_INFO) << "codec_settings.maxFramerate              :" << static_cast<int32_t>(codec_settings->maxFramerate);
  LOG(LS_INFO) << "------------------------------------------------------------ ";
  LOG(LS_INFO) << "codec_ctx.width                          :" << codec_ctx->width;
  LOG(LS_INFO) << "codec_ctx.height                         :" << codec_ctx->height;
  LOG(LS_INFO) << "codec_ctx.pix_fmt                        :" << codec_ctx->pix_fmt;
  LOG(LS_INFO) << "codec_ctx.flags                          :" << static_cast<uint32_t>(codec_ctx->flags);
  LOG(LS_INFO) << "codec_ctx.bit_rate                       :" << codec_ctx->bit_rate;
  LOG(LS_INFO) << "#############################################################";
}

int32_t H264DecoderImplFfmpeg::InitHwDecode(const VideoCodec* codec_settings) {
  AVCodec* codec = avcodec_find_decoder_by_name("h264_qsv");
  if (!codec) {
    // This is an indication that FFmpeg has not been initialized or it has not
    // been compiled/initialized with the correct set of codecs.
    LOG(LS_ERROR) << "FFmpeg H.264 HW decoder not found.";
    Release();
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }
  LOG(LS_INFO) << "Found decoder codec name " << codec->name;
  av_context_.reset(avcodec_alloc_context3(codec));
  if (codec_settings) {
    av_context_->coded_width = codec_settings->width;
    av_context_->coded_height = codec_settings->height;
  }
  av_context_->pix_fmt = AV_PIX_FMT_NV12;
  av_opt_set(av_context_->priv_data, "async_depth", "1", 0);
  int res = avcodec_open2(av_context_.get(), codec, nullptr);
  if (res < 0) {
    LOG(LS_ERROR) << "avcodec_open2 error: " << res;
    Release();
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }
  
  PrintDecoderSettings(codec_settings, av_context_.get());

  av_frame_.reset(av_frame_alloc());

  
  return WEBRTC_VIDEO_CODEC_OK;
}

int32_t H264DecoderImplFfmpeg::InitDecode(const VideoCodec* codec_settings,
                                    int32_t number_of_cores) {
  LOG_F(LS_INFO);
  ReportInit();
  isFirstFrame = true;
  if (codec_settings &&
      codec_settings->codecType != kVideoCodecH264) {
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }

  // FFmpeg must have been initialized (with |av_lockmgr_register| and
  // |av_register_all|) before we proceed. |InitializeFFmpeg| does this, which
  // makes sense for WebRTC standalone. In other cases, such as Chromium, FFmpeg
  // is initialized externally and calling |InitializeFFmpeg| would be
  // thread-unsafe and result in FFmpeg being initialized twice, which could
  // break other FFmpeg usage. See the |rtc_initialize_ffmpeg| flag.
#if defined(WEBRTC_INITIALIZE_FFMPEG)
  // Make sure FFmpeg has been initialized. Subsequent |InitializeFFmpeg| calls
  // do nothing.
  InitializeFFmpeg();
#endif

  // Release necessary in case of re-initializing.
  int32_t ret = Release();
  if (ret != WEBRTC_VIDEO_CODEC_OK) {
    ReportError();
    return ret;
  }
  RTC_DCHECK(!av_context_);

  if (is_hw_) {
    return InitHwDecode(codec_settings);
  };
  // Initialize AVCodecContext.
  av_context_.reset(avcodec_alloc_context3(nullptr));

  av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
  av_context_->codec_id = AV_CODEC_ID_H264;

  if (codec_settings) {
    av_context_->coded_width = codec_settings->width;
    av_context_->coded_height = codec_settings->height;
  }
  av_context_->pix_fmt = kPixelFormat;
  av_context_->extradata = nullptr;
  av_context_->extradata_size = 0;

  // If this is ever increased, look at |av_context_->thread_safe_callbacks| and
  // make it possible to disable the thread checker in the frame buffer pool.
  av_context_->thread_count = av_cpu_count() + 1;;
  av_context_->thread_type = FF_THREAD_SLICE;

  // Function used by FFmpeg to get buffers to store decoded frames in.
  av_context_->get_buffer2 = AVGetBuffer2;
  // |get_buffer2| is called with the context, there |opaque| can be used to get
  // a pointer |this|.
  av_context_->opaque = this;
  // Use ref counted frames (av_frame_unref).
  av_context_->refcounted_frames = 1;  // true

  AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);

  if (!codec) {
    // This is an indication that FFmpeg has not been initialized or it has not
    // been compiled/initialized with the correct set of codecs.
    LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
    Release();
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }

  int res = avcodec_open2(av_context_.get(), codec, nullptr);
  if (res < 0) {
    LOG(LS_ERROR) << "avcodec_open2 error: " << res;
    Release();
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }

  PrintDecoderSettings(codec_settings, av_context_.get());
  av_frame_.reset(av_frame_alloc());
  return WEBRTC_VIDEO_CODEC_OK;
}

int32_t H264DecoderImplFfmpeg::Release() {
  avcodec_close(av_context_.get());
  av_context_.reset();
  av_frame_.reset();
  return WEBRTC_VIDEO_CODEC_OK;
}

int32_t H264DecoderImplFfmpeg::RegisterDecodeCompleteCallback(
    DecodedImageCallback* callback) {
  decoded_image_callback_ = callback;
  return WEBRTC_VIDEO_CODEC_OK;
}

int32_t H264DecoderImplFfmpeg::Decode(const EncodedImage& input_image,
                                bool /*missing_frames*/,
                                const RTPFragmentationHeader* /*fragmentation*/,
                                const CodecSpecificInfo* codec_specific_info,
                                int64_t /*render_time_ms*/) {
  if (!IsInitialized()) {
    ReportError();
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }
  if (!decoded_image_callback_) {
    LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
        "has not been set with RegisterDecodeCompleteCallback()";
    ReportError();
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }
  if (!input_image._buffer || !input_image._length) {
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if (codec_specific_info &&
      codec_specific_info->codecType != kVideoCodecH264) {
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if ((input_image._frameType != kVideoFrameKey) && isFirstFrame) {
    LOG_F(LS_WARNING) <<" first Frame must be IDR frame";
    ++discard_cnt_;
    return WEBRTC_VIDEO_CODEC_ERROR;
  }
  if (input_image._frameType == kVideoFrameKey) {
    ++idr_cnt_;
  }
  isFirstFrame = false;

  
#if PRINT_TIME_DECODE_DELAY
  int64_t h264_decode_start_time = clock_->TimeInMilliseconds();
#endif

  // FFmpeg requires padding due to some optimized bitstream readers reading 32
  // or 64 bits at once and could read over the end. See avcodec_decode_video2.
  RTC_CHECK_GE(input_image._size, input_image._length +
                   EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));

  // "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
  // bitstreams could cause overread and segfault." See
  // AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
  memset(input_image._buffer + input_image._length,
         0,
         EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));

  AVPacket packet;
  av_init_packet(&packet);
  packet.data = input_image._buffer;
  if (input_image._length >
      static_cast<size_t>(std::numeric_limits<int>::max())) {
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }
  packet.size = static_cast<int>(input_image._length);
  av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000;  // ms -> us
  
  decode_cnt_++;
  int frame_decoded = 0;
  RTC_CHECK(av_frame_.get());
  int result = avcodec_decode_video2(av_context_.get(),
                                     av_frame_.get(),
                                     &frame_decoded,
                                     &packet);
  if (result < 0) {
    LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }
  // |result| is number of bytes used, which should be all of them.
  if (result != packet.size) {
    LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes "
        "when " << packet.size << " bytes were expected.";
    ReportError();
    return WEBRTC_VIDEO_CODEC_ERROR;
  }

  if (!frame_decoded) {
    LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was "
        "decoded.";
    return WEBRTC_VIDEO_CODEC_OK;
  }
  decoded_cnt_++;
#if PRINT_TIME_DECODE_DELAY
  int64_t h264_decode_end_time = clock_->TimeInMilliseconds();
  int64_t h264_decode_use_time = h264_decode_end_time - h264_decode_start_time;
  LOG(LS_INFO) << "Decode: hardware enable: " << is_hw_ << " use_time_ms:" << h264_decode_use_time;
#endif

  if (is_hw_) {
    if (!temp_frame_) {
        temp_frame_.reset(av_frame_alloc());
        if (!temp_frame_) {
            LOG(LS_ERROR) << "Could not allocate video frame";
            return WEBRTC_VIDEO_CODEC_ERROR;
        }
        temp_frame_->format = AV_PIX_FMT_YUV420P; // FIXED
        temp_frame_->width = av_frame_->width;
        temp_frame_->height = av_frame_->height;
        int ret = av_frame_get_buffer(temp_frame_.get(), 32);
        if (ret < 0) {
            LOG(LS_ERROR) << "Could not allocate the video frame data";
            return WEBRTC_VIDEO_CODEC_ERROR;
        }
    }
    // Convert NV12 to YUV420
    int ret = libyuv::NV12ToI420(av_frame_->data[kYPlane], av_frame_->linesize[0],
                  av_frame_->data[kUPlane], av_frame_->linesize[1],
                  temp_frame_->data[kYPlane], av_frame_->linesize[0],
                  temp_frame_->data[kUPlane], av_frame_->linesize[1] / 2,
                  temp_frame_->data[kVPlane], av_frame_->linesize[1] / 2,
                  av_frame_->width, av_frame_->height);

    LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length
                  << "], input_image._size[" << input_image._size
                  << "], decode number[" << decoded_cnt_
                  << "], timestamp[" << input_image._timeStamp
                  << "], temp_frame width[" << temp_frame_->width
                  << "], temp_frame height[" << temp_frame_->height
                  << "], temp_frame strideY[" << temp_frame_->linesize[0]
                  << "], temp_frame strideU[" << temp_frame_->linesize[1]
                  << "], AVFrame width[" << av_frame_->width
                  << "], AVFrame height[" << av_frame_->height
                  << "], AVFrame lines[0][" << av_frame_->linesize[0]
                  << "], AVFrame lines[1][" << av_frame_->linesize[1] << "].";

    decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);
    uint8_t *dst_y = decoded_frame_.buffer(kYPlane);
    uint8_t *src_y = temp_frame_->data[kYPlane];

    uint8_t *dst_u = decoded_frame_.buffer(kUPlane);
    uint8_t *src_u = temp_frame_->data[kUPlane];

    uint8_t *dst_v = decoded_frame_.buffer(kVPlane);
    uint8_t *src_v = temp_frame_->data[kVPlane];

    memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);

    memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);

    memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);

    decoded_frame_.set_timestamp(input_image._timeStamp);
    decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);    
    decoded_frame_.SetFrameCnt(decode_cnt_);
    ret = decoded_image_callback_->Decoded(decoded_frame_);

    // Stop referencing it, possibly freeing |video_frame|.
    av_frame_unref(av_frame_.get());
    if (ret) {
      LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
      return ret;
    }
    return WEBRTC_VIDEO_CODEC_OK;
  } // end of is_hw_

  // Obtain the |video_frame| containing the decoded image.
  VideoFrame* video_frame = static_cast<VideoFrame*>(
      av_buffer_get_opaque(av_frame_->buf[0]));
  RTC_DCHECK(video_frame);
  RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
  RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
  RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
  video_frame->set_timestamp(input_image._timeStamp);
  video_frame->SetIncomingTimeMs(input_image._incomingTimeMs);
  LOG(LS_VERBOSE) << "Decoded Video Frame. input_image._length[" << input_image._length
                  << "], input_image._size[" << input_image._size
                  << "], decode number[" << decode_cnt_
                  << "], timestamp[" << input_image._timeStamp
                  << "], pointer[" << (void*)(video_frame->video_frame_buffer()->DataY())
                  << "],video frame width[" << video_frame->width()
                  << "],video frame height[" << video_frame->height()
                  << "],video frame strideY[" << video_frame->stride(kYPlane)
                  << "],video frame strideU[" << video_frame->stride(kUPlane)
                  << "],AVFrame width[" << av_frame_->width
                  << "],AVFrame height[" << av_frame_->height
                  << "],AVFrame lines[0][" << av_frame_->linesize[0]
                  << "],AVFrame lines[1][" << av_frame_->linesize[1] << "].";

  int32_t ret = 0;
  // The decoded image may be larger than what is supposed to be visible, see
  // |AVGetBuffer2|'s use of |avcodec_align_dimensions|. This crops the image
  // without copying the underlying buffer.
  rtc::scoped_refptr<VideoFrameBuffer> buf = video_frame->video_frame_buffer();
  if((av_frame_->width != buf->width()) || (av_frame_->height != buf->height())) {
    decoded_frame_.CreateEmptyFrame(av_frame_->width, av_frame_->height, av_frame_->width, av_frame_->width/2, av_frame_->width/2);

    uint8_t *dst_y = decoded_frame_.buffer(kYPlane);
    uint8_t *src_y = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataY());

    uint8_t *dst_u = decoded_frame_.buffer(kUPlane);
    uint8_t *src_u = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataU());

    uint8_t *dst_v = decoded_frame_.buffer(kVPlane);
    uint8_t *src_v = const_cast<uint8_t*>(video_frame->video_frame_buffer()->DataV());

    if(av_frame_->width == buf->width()) {
      memcpy(dst_y, src_y, av_frame_->width * av_frame_->height);

      memcpy(dst_u, src_u, av_frame_->width * av_frame_->height/4);

      memcpy(dst_v, src_v, av_frame_->width * av_frame_->height/4);
    } else {
      for(int i = 0; i < av_frame_->height; i++){
        memcpy(dst_y, src_y, av_frame_->width);
        dst_y += av_frame_->width;
        src_y += buf->width();
      }

      for(int i = 0; i < av_frame_->height/2; i++){
        memcpy(dst_u, src_u, av_frame_->width/2);
        dst_u += av_frame_->width/2;
        src_u += buf->width()/2;
      }

      for(int i = 0; i < av_frame_->height/2; i++){
        memcpy(dst_v, src_v, av_frame_->width/2);
        dst_v += av_frame_->width/2;
        src_v += buf->width()/2;
      }
    }

    decoded_frame_.set_timestamp(input_image._timeStamp);
    decoded_frame_.SetIncomingTimeMs(input_image._incomingTimeMs);    
    decoded_frame_.SetFrameCnt(decode_cnt_);
    ret = decoded_image_callback_->Decoded(decoded_frame_);
  } else {
    //now not reach here
    LOG(LS_ERROR) << "reach error area";

    video_frame->SetFrameCnt(decode_cnt_);
    ret = decoded_image_callback_->Decoded(*video_frame);
  }

  // Stop referencing it, possibly freeing |video_frame|.
  av_frame_unref(av_frame_.get());
  video_frame = nullptr;

  if (ret) {
    LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
    return ret;
  }
  return WEBRTC_VIDEO_CODEC_OK;
}

bool H264DecoderImplFfmpeg::IsInitialized() const {
  return av_context_ != nullptr;
}

void H264DecoderImplFfmpeg::ReportInit() {
  if (has_reported_init_)
    return;
  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
                            kH264DecoderEventInit,
                            kH264DecoderEventMax);
  has_reported_init_ = true;
}

void H264DecoderImplFfmpeg::ReportError() {
  if (has_reported_error_)
    return;
  RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event",
                            kH264DecoderEventError,
                            kH264DecoderEventMax);
  has_reported_error_ = true;
}

}  // namespace webrtc


3.问题分析和总结

qsv硬解码,解码器中会缓存2帧视频,按照fps=15算的话,一帧60ms,2帧的话会延迟120ms左右。用在播放器中可以,用在RTC中会导致时延变大,也可能有对应的优化参数,目前还没找到。

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

WebRTC-集成qsv硬解码实现 的相关文章

  • mybatis中resultMap使用之返回分组数据

    1 resultMap 1 1 引言 resultMap是mybatis最重要的强大元素 通过描述数据的关系结构 xff0c 将结果集进行映射到java类或java bean中 xff0c 达到结果集复杂处理的目的 本文解决的主要问题的分组
  • 项目中使用JPush推送,遇到的问题

    JPush初始化错误 Android版本是4 0以上的 xff0c JPush包是1 60的 报错 xff1a 02 24 18 46 54 306 E AndroidRuntime 22522 FATAL EXCEPTION Thread
  • HIDL最全编译流程

    想了解HIDL介绍的可以参考 HIDL概述 xff0c 本篇文章主要介绍HIDL的详细编译流程及简单的客户端应用 xff08 C 43 43 跟Android客户端的应用 xff09 一 准备工作 整一套源码 xff0c Android O
  • 【mysql】如何在MySQL中导入超大的SQL文件?

    mysql 如何在MySQL中导入超大的SQL文件 xff1f 方法 1 在navicat中导入 xff08 速度慢 xff09 2 使用source命令导入 xff08 速度快 xff09 第一种很简单 xff0c 本文只介绍第二种 步骤
  • (转)imageIO异常:Unsupported Image Type, 不支持图像类型

    是不是在使用iamgeio导入图片的时候出现了这个异常呢 xff1a javax imageio IIOException Unsupported Image Type 如果你确定图片格式没有错 xff0c 那我想你可能使用过photosh
  • 遗传算法与进化算法

    引言 1858年7月1日C R 达尔文与A R 华莱士在伦敦林奈学会上宣读了进化论的论文 xff0c 至此进化理论深入人心 xff0c 为广大吃瓜群众开辟了一个思想的新的天地 而我们的机器学习大师们向来喜欢从生物学家那里找灵感 xff0c
  • Git使用手册/Git教程:git pull origin 拉取代码到本地,解决拉取代码时发生的文件冲突

    相关文章 xff1a 关于验证是否存在ssh配置以及生成SSH Key的方法可以参照文章 xff1a Git使用手册 生成SSH Key 关于SSH Key的使用和公钥在gitHub gitLab的配置等 xff0c 请参考文章 xff1a
  • 基于SSM的社区团购小程序的设计与实现

    社区团购的设计与实现 该项目含有源码 论文等资料 配套开发软件 软件安装教程 项目发布教程等 系统功能完整 xff0c 适合作为毕业设计 课程设计 数据库大作业学习使用 项目功能介绍 社区团购系统中的功能模块主要是实现管理员服务端 xff1
  • AFNetWorking(3.0)源码分析(五)——AFHTTPRequestSerializer & AFHTTPResponseSerializer

    在前面的几篇博客中 xff0c 我们分析了AFURLSessionMangerd以及它的子类AFHTTPSessionManager 我们对AF的主要两个类 xff0c 有了一个比较全面的了解 对于AFHTTPSessionManager
  • java String 最长长度和占用内存大小

    一 序 String在内存中的最大长度理论上是int型变量的最大值 xff0c Integer MAX VALUE String的字面常量的最大长度为CONSTANT Utf8 info表决定 xff0c 一般为65535 二 介绍 1 S
  • Android常用检查判断方法

    自己工作中比较常用的一些判断检测 span class hljs keyword import span android app ActivityManager span class hljs keyword import span and
  • 小米、魅族状态栏字体变色整理

    span class hljs javadoc 设置小米手机状态栏字体图标颜色模式 xff0c 需要MIUIV6以上 span class hljs javadoctag 64 param span window 需要设置的窗口 span
  • C语言学习:初接触

    C程序结构 C程序结构主要包括以下部分 xff1a 预处理指令器 函数 变量 语句 amp 函数体 注释 用一个简单的 Hello World 代码说明 xff1a span class hljs preprocessor include
  • C语言学习:基本语法

    分号 如果你有其它编程语言的基础 xff0c 相信你已经明白了分号的意义 分号在C语言中与多数语言相同 xff0c 它代表了语句的结束 也就是说 xff0c 一个完整的语句必须以分号结尾 注释 注释就像是帮助文件一样 xff0c 它可以帮助
  • C语言学习:数据类型

    在C语言中 xff0c 数据类型可以分为以下几种 xff1a 类型描述基本类型C语言中的算术类型 xff0c 包含整数型和浮点型枚举类型C语言中的算术类型 xff0c 用来定义在程序中只能赋予其一定的离散整数值的变量 void类型类型说明符
  • 典型相关分析(CCA)

    CCA是数据挖掘中重要的算法 xff0c 可以挖掘出数据间的关联关系的算法 基础知识 如何衡量两个变量之间的相关性呢 xff1f 我们有相关系数 xff0c 如下所示 xff1a X Y 61 c o v X Y D X D Y X
  • Android 按键模拟输入事件和Monitor工具的使用

    有时候 xff0c 进行Android开发 xff0c 会遇到屏幕会失灵的情况 xff0c 但是显示无问题 xff0c 这时候可以使用一些工具 手段 xff0c 在电脑端控制模拟屏幕输入 xff0c 或者使用adb 相关命令模拟按键事件输入
  • Android APK获取平台系统签名权限

    1 修改AndroidManifest xml xff0c 改变uid为android uid system xff0c 使之与Settings能够共享数据空间 lt xml version 61 34 1 0 34 encoding 61
  • gradlew编译时出现Unsupported major.minor version 52.0

    Android apk命令行编译时 xff0c 出现如下错误 xff1a Unsupported major minor version 52 0 先摆上结论 xff1a 1 有可能是compileSdkVersion和buildToolV
  • Android NE发生定位辅助之addr2line

    当发生NE时 xff0c 可以通过addr2line来辅助定位发生点 举个例子 Exception Class Native NE Exception Type SIGABRT Current Executing Process pid 3

随机推荐

  • Android N编译之Out of memory error

    之前本地环境编译一直是正常的 xff0c 后来更新代码后 xff0c 出现编译不过 提示out of memory 但是查看swap和内存都还是够的 里面有个提示 xff0c try increasing heap size with ja
  • Android R源码Settings之NFC与Tap&pay

    Android R 又对 Tap amp pay菜单 进行了更新 xff0c 变得更加合理化 xff0c 人性化了 编辑于2020 4 20 12 24 10 xff09 Android R Tap amp pay菜单 如图可知 xff0c
  • [NOTE]Android N SmartLock缺少很多功能

    有个Android项目刚启动不久时 xff0c 测试SmartLock时 xff0c 发现里面只有On body detection xff0c Trusted places Trusted devices Trusted face和Tru
  • Launcher壁纸来源

    Launcher是个特殊APK xff0c 但说到底还是个应用 xff0c 想要在上面展示壁纸 xff0c 自然是来自应用本身 xff0c 要么就是Framework public资源 首先 xff0c 根据长按Launcher主界面空白处
  • Android N之hasSystemFeature

    当我们判断某一功能打开与否时 xff0c 一般会有个确认本功能是否支持的过程 xff0c 以便与为相关的功能初始化其他的环境 xff0c 例如 xff1a 蓝牙 NFC 例如 NFC HCE 两个的声明如下 xff1a Feature fo
  • Android Go项目预置应用Google GTS测试testPreloadedAppsTargetSdkVersion失败

    Android GO项目中预置的一个Weather应用 xff0c GTS测试通不过 据log提示 xff0c 是兼容的SDK目标版本过低导致 xff0c GO版本要求必须为API 26 43 含26 xff09 LOG如下 xff1a 0
  • 多维缩放算法(MDS)

    算法思想 MDS算法思想很简单 xff0c 一句话就是保持样本在原空间和低维空间的距离不变 因为距离是样本之间一个很好的分离属性 xff0c 对于大多数聚类算法来说 xff0c 距离是将样本分类的重要属性 xff0c 因此当我们降维后 xf
  • Gerrit 安装lfs插件

    一 下载lfs插件 https gerrit ci gerritforge com job plugin lfs bazel stable 2 16 这个是直接编译好的 二 安装插件 将下载的插件放在 GERRIT SITE plugins
  • 反编译so库破解so

    所需工具 1 IDA Pro v6 8 and Hex Rays Decompiler 2 WinHex 3 ARM ASM 背景 xff1a I2C通讯时报log CameraHal Marvin HAL MOCKUP HalReadI2
  • 长虹官方刷机包和刷机教程

    为了解决部分朋友因应用引起的电视死机 无法开机 系统被破坏等情形 xff0c 长虹电视团队特开此帖为朋友们提供刷机方法 xff0c 但刷机有风险 xff0c 如完全不懂刷机技巧的朋友需要谨慎操作哦 xff0c 如有疑问可以微信留言给我们 下
  • Android终端通过adb 配置静态IP和DNS

    有时我们需要使用命令行来配置eth0的IP信息 xff0c 这在linux系统是非常简单的 xff0c 网上也有很多资料 但是在Android系统 xff0c 就非常困难 xff0c 因为Android精简掉了很多linux命令 xff0c
  • 【官方】下载最新adb及安装驱动的方法

    Only adb 驱动 xff1a https adbdriver com downloads adb工具 xff1a https adbshell com upload adb zip https adbshell com downloa
  • 中芯微随身WIFI破解实体SIM卡槽(不拆机,无需切卡密码)

    目前网上卖的一些随身WIFI是中芯微的方案 MF782 部分产品限制用户使用实体SIM卡 只能使用内置eSIM 下面谈谈解决方案 1 中沃的没有限制 实体SIM卡优先 检测到插的有实体SIM卡 就使用实体SIM卡网络 2 另外一部分网上提供
  • 高通Android随身WIFI屏蔽商家远程控制断网

    nbsp nbsp nbsp nbsp 部分随身WIFI商家后台会监测用户是否使用的是自家的eSIM 若使用了外置卡槽或eSIM的ICCID改变就会断网 主要表现是先联网后突然变成飞行模式 或联网后开热点变飞行模式 这就是商家后台做了监测
  • Linux kernel make clean时忽略部分文件(不被删除)

    有时我们在运行make clean 时 xff0c 需要保留某些 o 文件 xff0c 这就需要我们修改 Makefile 文件 xff0c 下面以 linux 2 6 18 的 Makefile 为例 xff1a Files to ign
  • Audio参数讲解

    一 音频基础参数 frame bits 一帧数据的位数比如 xff1a 16bits 2ch frame bits 61 16 2 sample bits 采样位数 比如16bit 24bit 32bit period size 指一个周期
  • linux ALSA 驱动架构

    一 kernel Audio驱动架构主流有两大类 xff0c 一类是SOC Machine架构 xff0c 另一类是simple card架构 MTK QCom主要采用machine架构 xff0c rockchip采用simple car
  • adaboost提升算法

    引言 俗话说得好 xff0c 三个臭皮匠赛过诸葛亮 更主要的是三个臭皮匠好找 xff0c 一个诸葛亮太难找了 在机器学习里面也是一样的 我们可以设计出各种分类器 xff0c 然而分类器的效果确实不一而同的 xff0c 相对而言 xff0c
  • 长虹电视刷机固件包汇总

    为了解决部分朋友因应用引起的电视死机 无法开机 系统被破坏等情形 xff0c 快客服务特开此帖为朋友们提供刷机方法 xff0c 但刷机有风险 xff0c 如完全不懂刷机技巧的朋友需要谨慎操作 xff0c 用户自行刷机所产生问题自行负责 xf
  • WebRTC-集成qsv硬解码实现

    1 Window下QSV硬解码配置 在libavcodec codec list c下添加 amp ff h264 qsv decoder 在ffmpeg generate gni下加入 34 libavcodec h264idct c 3