概述
WebM由Google提出,是一个开放、免费的媒体文件格式。其中Google将其拥有的VP8视频编码技术以类似BSD授权开源。WebM VP8实现了完全的免费开源与授权开放,并且,经过Google持续性的技术优化,其解码速度与开发工具显著增强,在压缩效率和性能方面的表现较发布初期显著提升。
在Qt中的具体实现方法是继承QAbstractVideoSurface,然后在类Encoding中创建编码线程,最终通过信号与槽来进行数据传递。ffmpeg库自己网上搜索下载,内部包含有vp8编解码器。以下代码能实现vp8的初始化配置以及视频编码的具体过程。整个工程需要的私信我~
具体代码------cameravideosurface.h文件
#include <QAbstractVideoSurface>
#include <QTemporaryFile>
#include <QThread>
#include <QDebug>
#include <QTime>
#define BIT_RATE 8000000
#define PTS_TIMES 40 //Video of 25fps looks better in VLC, 25fps=40ms
extern "C" //FFmpeg libary
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <libavutil/avutil.h>
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
}
class Encoding : public QObject
{
Q_OBJECT
public :
Encoding(AVFormatContext *p_AVFormatContext, AVCodecContext *p_AVCodecContext, AVFrame *p_AVFrame, AVPacket *p_AVPacket, struct SwsContext *p_image_convert_ctx);
~Encoding();
public slots:
void slot_encodeImage(QImage m_image, bool m_isFileClosed);
private:
AVFormatContext *m_AVFormatContext = nullptr;
AVCodecContext *m_AVCodecContext = nullptr;
AVFrame *m_AVFrame = nullptr;
AVPacket *m_AVPacket = nullptr;
struct SwsContext *image_convert_ctx = nullptr;
int frame_index = 0;
};
class CameraVideoSurface : public QAbstractVideoSurface
{
Q_OBJECT
public:
CameraVideoSurface(int width = 1280, int heigth = 720, int frameRate = 30, bool image_reverse = false);
~CameraVideoSurface();
void fileFinished(QString correctFileName = "No_name");
bool present(const QVideoFrame &frame);
QList<QVideoFrame::PixelFormat> supportedPixelFormats(QAbstractVideoBuffer::HandleType type = QAbstractVideoBuffer::NoHandle) const;
void TriggerSignalTrue();
signals:
void signal_showFrame(QImage image);
void signal_encodeImage(QImage m_image, bool isFileClosed = false);
private:
QImage m_image;
AVFormatContext *m_AVFormatContext;
AVStream *m_AVStream;
AVCodec *m_AVCodec;
AVCodecContext *m_AVCodecContext;
AVFrame *m_AVFrame;
AVPacket *m_AVPacket;
QTemporaryFile *pTempFile;
struct SwsContext *image_convert_ctx = nullptr;
bool m_triggerSignal = false;
bool isFileClosed = false;
int ret = 0;
bool m_image_reverse = false;
Encoding *m_encoding = nullptr;
QThread workerThread;
};
具体代码------cameravideosurface.cpp文件
CameraVideoSurface::CameraVideoSurface(int width, int height, int frameRate, bool image_reverse) : QAbstractVideoSurface()
{
av_register_all(); //Register all codecs in FFmpeg
m_image_reverse = image_reverse;
pTempFile = new QTemporaryFile;
pTempFile->open();
QString outputFile = pTempFile->fileName();
ret = avformat_alloc_output_context2(&m_AVFormatContext, nullptr, "webm", outputFile.toLocal8Bit().data()); //Allocate an AVFormatContext for Webm.
if (ret != 0)
return;
m_AVCodec = avcodec_find_encoder(AV_CODEC_ID_VP8); //Find VP8 encoder
if(m_AVCodec == nullptr)
return;
m_AVCodecContext = avcodec_alloc_context3(m_AVCodec); //Allocate an AVCodecContext for VP8
if(m_AVCodecContext == nullptr)
return;
m_AVCodecContext->bit_rate = BIT_RATE; //Encoding bitrate, affect the clarity of video
m_AVCodecContext->width = width; //frame width, height set by outside
m_AVCodecContext->height = height;
m_AVCodecContext->frame_number = 1; //do not change
m_AVCodecContext->time_base.num = 1; //do not change
m_AVCodecContext->time_base.den = frameRate; //set by outside(in seconds), internal timebase
m_AVCodecContext->gop_size = 0; //Group of picture, Interval between two I_frames, do not change otherwise you can not seek frame correctly.
m_AVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P; //do not change
m_AVCodecContext->max_b_frames = 0; //Set B_frame = 0, do not change
if (m_AVFormatContext->oformat->flags & AVFMT_GLOBALHEADER) //Some formats want stream headers to be separated.
m_AVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_opt_set(m_AVCodecContext->priv_data, "quality", "realtime", 0); //VP8 must be set
AVDictionary *param = nullptr;
if(avcodec_open2(m_AVCodecContext, m_AVCodec, ¶m) < 0) //Open encoder
return;
m_AVStream = avformat_new_stream(m_AVFormatContext, m_AVCodec); //Create output stream
if (m_AVStream == nullptr)
return;
m_AVStream->time_base = m_AVCodecContext->time_base; // 1/frameRate eg: 30frames per seconds
m_AVStream->codec = m_AVCodecContext;
if(avio_open(&m_AVFormatContext->pb, outputFile.toStdString().c_str(), AVIO_FLAG_READ_WRITE) < 0 ) //Open output file
return;
if(avformat_write_header(m_AVFormatContext, nullptr) < 0) //Write header of package format
return;
m_AVFrame = av_frame_alloc();
m_AVFrame->width = m_AVCodecContext->width;
m_AVFrame->height = m_AVCodecContext->height;
m_AVFrame->format = m_AVCodecContext->pix_fmt;
m_AVFrame->pts = 0;
m_AVFrame->format = AV_PIX_FMT_YUV420P;
av_image_alloc(m_AVFrame->data, m_AVFrame->linesize, m_AVFrame->width, m_AVFrame->height, m_AVCodecContext->pix_fmt, 32);
image_convert_ctx = sws_getContext(m_AVCodecContext->width, m_AVCodecContext->height, AV_PIX_FMT_RGB32, //Data source of input image
m_AVCodecContext->width, m_AVCodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr); //Data source of output image
if(image_convert_ctx == nullptr)
return;
m_AVPacket = av_packet_alloc();
av_init_packet(m_AVPacket);
if( m_encoding != nullptr )
{
delete m_encoding;
m_encoding = nullptr;
}
m_encoding = new Encoding(m_AVFormatContext, m_AVCodecContext, m_AVFrame, m_AVPacket, image_convert_ctx);
m_encoding->moveToThread(&workerThread);
connect(this, SIGNAL(signal_encodeImage(QImage, bool)), m_encoding, SLOT(slot_encodeImage(QImage, bool)));
workerThread.start();
}
QList<QVideoFrame::PixelFormat>CameraVideoSurface::supportedPixelFormats(QAbstractVideoBuffer::HandleType type) const
{
Q_UNUSED(type);
QList<QVideoFrame::PixelFormat> pixelFormats;
pixelFormats<<QVideoFrame::Format_RGB32; //Add two pixel formats which I use
pixelFormats<<QVideoFrame::Format_YUV420P;
return pixelFormats;
}
bool CameraVideoSurface::present(const QVideoFrame &frame) //Execute when camera gets one frame. If you set cameraRate 30fps, present() will be executed every 33.33ms
{
if(frame.isValid()) //Identifies whether a video frame is valid.
{
QVideoFrame cloneFrame(frame);
cloneFrame.map(QAbstractVideoBuffer::ReadOnly); //Map the frame to memory
QImage m_image(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(), QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat())); //Form a picture
m_image = m_image.mirrored(m_image_reverse, true); //horizontal, vertical
if( m_triggerSignal ) //Get the trigger signal from outside or timer
{
emit signal_encodeImage(m_image, isFileClosed);
m_triggerSignal = false;
}
emit signal_showFrame(m_image);
cloneFrame.unmap();
return true;
}
else
return false;
}
void CameraVideoSurface::fileFinished(QString correctFileName) //QString correctFileName
{
av_write_trailer(m_AVFormatContext); //Write tail of package format
avio_closep(&m_AVFormatContext->pb); //Close and free avio_open()
if(pTempFile->open())
{
pTempFile->copy(correctFileName); //Copy pTempFile's content to the file named correctFileName
delete pTempFile;
pTempFile = nullptr;
}
isFileClosed = true; //File state flag must be set
}
CameraVideoSurface::~CameraVideoSurface()
{
if(m_encoding != nullptr)
{
workerThread.exit(0);
delete m_encoding;
m_encoding = nullptr;
}
avformat_close_input(&m_AVFormatContext);
av_frame_free(&m_AVFrame);
}
void CameraVideoSurface::TriggerSignalTrue() //single trigger
{
m_triggerSignal = true;
}
Encoding::Encoding(AVFormatContext *p_AVFormatContext, AVCodecContext *p_AVCodecContext, AVFrame *p_AVFrame, AVPacket *p_AVPacket, struct SwsContext *p_image_convert_ctx)
{
m_AVFormatContext = p_AVFormatContext;
m_AVCodecContext = p_AVCodecContext;
m_AVFrame = p_AVFrame;
m_AVPacket = p_AVPacket;
image_convert_ctx = p_image_convert_ctx;
}
Encoding::~Encoding()
{
m_AVFormatContext = nullptr;
m_AVCodecContext = nullptr;
m_AVFrame = nullptr;
m_AVPacket = nullptr;
image_convert_ctx = nullptr;
}
void Encoding::slot_encodeImage(QImage m_image, bool isFileClosed)
{
const uint8_t *data[AV_NUM_DATA_POINTERS] = {nullptr};
data[0] = m_image.constBits(); //Get image raw data
int linesize[AV_NUM_DATA_POINTERS] = {0};
linesize[0] = m_AVCodecContext->width * 4; //RGB32 occupies 4 bytes, do not change
sws_scale(image_convert_ctx, data, linesize, 0, m_AVCodecContext->height, m_AVFrame->data, m_AVFrame->linesize); //Do not change
frame_index++;
m_AVFrame->pts = frame_index * PTS_TIMES; //Set pts
m_AVPacket->data = nullptr;
m_AVPacket->size = 0;
QTime timer;
timer.start();
int ret = avcodec_send_frame(m_AVCodecContext, m_AVFrame); //codec context, m_AVFrame contains the raw video frame. Supply a raw video frame to the encoder.
qDebug() << "Time 1 duration: " << timer.elapsed();
QTime m_timer;
m_timer.start();
while(ret == 0)
{
ret = avcodec_receive_packet(m_AVCodecContext, m_AVPacket); //codec context, m_AVPacket is the packet buffer of encoder. Read encoded data from the encoder.
if( ret == AVERROR(EAGAIN) )
break;
else if( ret < 0 )
break;
if( !isFileClosed ) //If the output file has been closed, the last packet can not write into file and error
av_interleaved_write_frame(m_AVFormatContext, m_AVPacket); //Write a packet to an output file
av_packet_unref(m_AVPacket); //Wipe the packet
}
qDebug() << "Time 2 duration: " << m_timer.elapsed();
}
最后
以上就是优美跳跳糖为你收集整理的Qt +ffmpeg(vp8) 记录视频每一帧并生成webm文件格式的全部内容,希望文章能够帮你解决Qt +ffmpeg(vp8) 记录视频每一帧并生成webm文件格式所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复