說明
Qt可以通過QOpenGLWidget使用OpenGL渲染顯示視頻,在渲染的視頻畫面上歧蕉,有可能需要繪制一些幾何圖案汰蓉。目前試了3種不同的方式在畫面上繪制矩形委煤,記錄下過程堂油。
通過QPainter繪制
Qt的繪制函數(shù)paintEvent(QPaintEvent *event)
在QOpenGLWidget中可以繪制,并且和OpenGL的內(nèi)容疊在一起碧绞,只需要在繪制之前先調(diào)用下基類的paintEvent(QPaintEvent *event)
即可府框,可以理解為先在畫布上畫好視頻,再在畫布上畫個(gè)矩形讥邻。這種方式靈活性最好迫靖。
void RenderWidget::paintEvent(QPaintEvent *event)
{
QOpenGLWidget::paintEvent(event);
qreal offset = sin(m_nCount * 0.1); //m_nCount是渲染的幀數(shù)
QPainter painter(this);
painter.setRenderHints(QPainter::SmoothPixmapTransform);
painter.save();
painter.setPen(QPen(QColor("#4FE3C1"), 1, Qt::SolidLine, Qt::RoundCap, Qt::RoundJoin));
painter.drawRect(QRectF(width() * offset, height() * 0.7, width() * 0.25, height() * 0.25));
painter.restore();
}
通過OpenGL繪制
通過不同的QOpenGLShaderProgram,可以指定不同的著色器程序來實(shí)現(xiàn)矩形的繪制兴使。這種方式繪制的時(shí)候系宜,偏移量這些變化參數(shù)要通過Uniform傳遞給OpenGL的頂點(diǎn)著色器,如果圖形復(fù)雜或者帶3D可以考慮发魄。
void RenderWidget::initializeGL()
{
initializeOpenGLFunctions();
const char *vsrc =
"attribute vec4 vertexIn; \
attribute vec4 textureIn; \
varying vec4 textureOut; \
void main(void) \
{ \
gl_Position = vertexIn; \
textureOut = textureIn; \
}";
const char *fsrc =
"varying mediump vec4 textureOut;\
uniform sampler2D tex_y; \
uniform sampler2D tex_u; \
uniform sampler2D tex_v; \
void main(void) \
{ \
vec3 yuv; \
vec3 rgb; \
yuv.x = texture2D(tex_y, textureOut.st).r; \
yuv.y = texture2D(tex_u, textureOut.st).r - 0.5; \
yuv.z = texture2D(tex_v, textureOut.st).r - 0.5; \
rgb = mat3( 1, 1, 1, \
0, -0.39465, 2.03211, \
1.13983, -0.58060, 0) * yuv; \
gl_FragColor = vec4(rgb, 1); \
}";
const char *rcvsrc = "#version 330 core\n \
layout(location = 0) in vec3 aPos;\n \
uniform vec2 offsetP; \
void main(){\n \
gl_Position = vec4(aPos.x + offsetP.x, aPos.y + offsetP.y, aPos.z, 1.0f);\n \
}\n ";
const char *rcfsrc = "#version 330 core\n \
out vec4 FragColor;\n \
void main(){\n \
FragColor = vec4(1.0f, 0.0f, 0.0f, 1.0f);\n \
}\n ";
videoProgram.addCacheableShaderFromSourceCode(QOpenGLShader::Vertex,vsrc);
videoProgram.addCacheableShaderFromSourceCode(QOpenGLShader::Fragment,fsrc);
videoProgram.link();
rcProgram.addShaderFromSourceCode(QOpenGLShader::Vertex,rcvsrc);
rcProgram.addShaderFromSourceCode(QOpenGLShader::Fragment,rcfsrc);
rcProgram.link();
{
QOpenGLVertexArrayObject::Binder vaoBind(&rcvao);
GLfloat rcPoints[]{
0.25f, 0.25f, 0.0f, // top right
0.25f, -0.25f, 0.0f, // bottom right
-0.25f, -0.25f, 0.0f, // bottom left
-0.25f, 0.25f, 0.0f, // top left
};
rcvbo.create();
rcvbo.bind();
rcvbo.allocate(rcPoints, sizeof(rcPoints));
int attr = -1;
attr = rcProgram.attributeLocation("aPos");
rcProgram.setAttributeBuffer(attr, GL_FLOAT, 0, 3, sizeof(GLfloat) * 3);
rcProgram.enableAttributeArray(attr);
rcvbo.release();
}
{
QOpenGLVertexArrayObject::Binder vaoBind(&vao);
GLfloat points[]{
-1.0f, 1.0f,
1.0f, 1.0f,
1.0f, -1.0f,
-1.0f, -1.0f,
0.0f,0.0f,
1.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f
};
vbo.create();
vbo.bind();
vbo.allocate(points,sizeof(points));
m_pTextureY = std::make_unique<QOpenGLTexture>(QOpenGLTexture::Target2D);
m_pTextureU = std::make_unique<QOpenGLTexture>(QOpenGLTexture::Target2D);
m_pTextureV = std::make_unique<QOpenGLTexture>(QOpenGLTexture::Target2D);
m_pTextureY->create();
m_pTextureU->create();
m_pTextureV->create();
int attr = -1;
attr = videoProgram.attributeLocation("vertexIn");
videoProgram.setAttributeBuffer(attr, GL_FLOAT, 0, 2, 2*sizeof(GLfloat));
videoProgram.enableAttributeArray(attr);
attr = videoProgram.attributeLocation("textureIn");
videoProgram.setAttributeBuffer(attr, GL_FLOAT, 2 * 4 * sizeof(GLfloat),2,2*sizeof(GLfloat));
videoProgram.enableAttributeArray(attr);
vbo.release();
}
videoProgram.release();
rcProgram.release();
}
void RenderWidget::render(uchar *yuvPtr, int w, int h)
{
m_nCount++;
// glDisable(GL_DEPTH_TEST);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
if(nullptr == yuvPtr || 0 >= w || 0 >= h){
return;
}
videoProgram.bind();
{
QOpenGLVertexArrayObject::Binder vaoBind(&vao);
//Y
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, m_pTextureY->textureId());
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED,GL_UNSIGNED_BYTE, yuvPtr);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//U
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, m_pTextureU->textureId());
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w >> 1, h >> 1, 0, GL_RED,GL_UNSIGNED_BYTE, yuvPtr + w * h);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//V
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_pTextureV->textureId());
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w >> 1, h >> 1, 0, GL_RED, GL_UNSIGNED_BYTE, yuvPtr + w * h * 5 / 4);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
videoProgram.setUniformValue("tex_y",2);
videoProgram.setUniformValue("tex_u",1);
videoProgram.setUniformValue("tex_v",0);
glDrawArrays(GL_QUADS, 0, 4);
}
videoProgram.release();
rcProgram.bind();
{
QOpenGLVertexArrayObject::Binder rcvaoBind(&rcvao);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
float offset = sin(m_nCount * 0.1f);
rcProgram.setUniformValue("offsetP", QVector2D(offset, 0.7f));
glDrawArrays(GL_QUADS, 0, 4);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
rcProgram.release();
}
void RenderWidget::resizeGL(int w, int h)
{
glViewport(0, 0, w, h);
}
通過AVFilter繪制
ffmpeg的濾鏡有很多強(qiáng)大的功能蜈首,這里就用了個(gè)最簡(jiǎn)單的drawbox, 使用濾鏡的話初始化要做很多設(shè)置,另外我還沒找到在哪設(shè)置可以改變繪制矩形的位置欠母,filter_descr
就在初始化的時(shí)候用了一次。這種方式會(huì)直接將繪制內(nèi)容輸出到最后的視頻數(shù)據(jù)上吆寨,如果不是需要保留包含繪制內(nèi)容的視頻的話更傾向于上面兩種方式赏淌。
初始化
...
static const char *filter_descr = "drawbox=x=200:y=400:w=200:h=200:color=blue";
AVFilterContext* buffersrc_ctx = nullptr;
AVFilterContext* buffersink_ctx = nullptr;
AVFilterGraph* filter_graph = nullptr;
...
int InitFilters(const char *filters_descr)
{
int ret;
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
QString args = QString("video_size=%1x%2:pix_fmt=%3:time_base=%4/%5:pixel_aspect=%6/%7")
.arg(m_pCodecContext->width).arg(m_pCodecContext->height).arg(m_pCodecContext->pix_fmt)
.arg(/*m_pCodecContext->time_base.num*/1).arg(30)
.arg(/*m_pCodecContext->sample_aspect_ratio.num*/16).arg(/*m_pCodecContext->sample_aspect_ratio.den*/9);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args.toStdString().c_str(), nullptr, filter_graph);
if (ret < 0) {
qDebug() << "Cannot create buffer source " << AVErr2QString(ret);
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
nullptr, buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0) {
qDebug() << "Cannot create buffer sink " << AVErr2QString(ret);
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = nullptr;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = nullptr;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, nullptr)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0)
return ret;
return 0;
}
解碼后的AVFrame應(yīng)用Filter
if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) {
qDebug("Error while add frame.\n");
return;
}
/* pull filtered pictures from the filtergraph */
int ret = av_buffersink_get_frame(buffersink_ctx, m_pFrameOut);//frameout中的數(shù)據(jù)就是加好濾鏡的內(nèi)容了
if (ret < 0)
return;
運(yùn)行效果
綠的QPainter繪制,紅的OpenGL繪制啄清,藍(lán)色AVFilter繪制