[TOC]
開始前的BB
開始準(zhǔn)備搞播放器了赴魁,還不知道怎么跟大佬們講卸奉,頭疼
想來想去,我感覺先實(shí)現(xiàn)一個(gè)簡(jiǎn)單的視頻播放器颖御,視頻和音頻自同步榄棵,來讓各位大佬們先來體驗(yàn)一下,有個(gè)大體的脈絡(luò)
老夫擼碼就是一把梭
我們先粗暴的分為兩個(gè)線程潘拱,一個(gè)負(fù)責(zé)音頻的播放疹鳄,一個(gè)負(fù)責(zé)視頻的播放,根據(jù)之前的我們寫過的東西芦岂,我們來改一改
在chapter_09/
中新建兩個(gè)類VideoThread
和AudioThread
瘪弓,一個(gè)負(fù)責(zé)視頻的解碼,一個(gè)負(fù)責(zé)音頻的解碼禽最,渲染的話我們新建一個(gè)AVRender
腺怯,專門負(fù)責(zé)渲染以及窗口事件的管理
千言萬(wàn)語(yǔ)注釋中
AVRender 渲染以及事件處理
AVRender.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AVRENDER_H
#define LEARNFFMPEG_AVRENDER_H
#define WINDOW_WIDTH 1080
#define WINDOW_HEIGHT 720
#include <iostream>
extern "C" {
#include <SDL2/SDL.h>
#include <libavcodec/avcodec.h>
}
/** 音視頻渲染器 **/
class AVRender {
public:
AVRender();
~AVRender();
/**
* 打開音頻
*
* @param sample_rate 采樣率
* @param channel 通道數(shù)
* @param samples 采樣大小(一幀的音頻數(shù)據(jù)大小)
* @param userdata 用戶數(shù)據(jù)
* @param fillaudio 回調(diào)函數(shù)
*/
void openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *codecContext, Uint8 *stream, int len));
/** 循環(huán)獲取事件 **/
void loopEvent();
/** 渲染視頻
*
* @param frame 視頻幀
* @param duration 幀持續(xù)的時(shí)間
*/
void renderVideo(AVFrame *frame,Uint32 duration);
private:
/** SDL窗口 **/
SDL_Window *window;
/** SDL渲染者 **/
SDL_Renderer *render;
/** SDL紋理 **/
SDL_Texture *texture;
/** 顯示區(qū)域 **/
SDL_Rect rect;
/** 自己想要的輸出的音頻格式 **/
SDL_AudioSpec wantSpec;
};
#endif //LEARNFFMPEG_AVRENDER_H
AVRender.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AVRender.h"
AVRender::AVRender() {
//初始化SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_EVENTS)) {
std::cout << "[error] SDL Init error !" << std::endl;
return;
}
//創(chuàng)建window
window = SDL_CreateWindow("LearnFFmpeg", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WINDOW_WIDTH,
WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
if (!window) {
std::cout << "[error] SDL Create window error!" << std::endl;
return;
}
//創(chuàng)建Render
render = SDL_CreateRenderer(window, -1, 0);
//創(chuàng)建Texture
texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, WINDOW_WIDTH, WINDOW_HEIGHT);
//初始化Rect
rect.x = 0;
rect.y = 0;
rect.w = WINDOW_WIDTH;
rect.h = WINDOW_HEIGHT;
}
AVRender::~AVRender() {
SDL_CloseAudio();
SDL_Quit();
if(render)SDL_DestroyRenderer(render);
if(texture)SDL_DestroyTexture(texture);
if(window)SDL_DestroyWindow(window);
}
void AVRender::loopEvent() {
SDL_Event event;
for (;;) {
SDL_PollEvent(&event);
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
}
break;
case SDL_QUIT:
return;
default:
break;
}
}
}
void AVRender::renderVideo(AVFrame *frame, Uint32 duration) {
if (frame == nullptr)return;
//上傳YUV到Texture
SDL_UpdateYUVTexture(texture, &rect,
frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]
);
SDL_RenderClear(render);
SDL_RenderCopy(render, texture, NULL, &rect);
SDL_RenderPresent(render);
SDL_Delay(duration);
}
void AVRender::openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *, Uint8 *, int)) {
//初始化SDL中自己想設(shè)置的參數(shù)
wantSpec.freq = sample_rate;
wantSpec.format = AUDIO_S16SYS;
wantSpec.channels = channel;
wantSpec.silence = 0;
wantSpec.samples = samples;
wantSpec.callback = fill_audio;
wantSpec.userdata = userdata;
//打開音頻之后wantSpec的值可能會(huì)有改動(dòng)袱饭,返回實(shí)際設(shè)備的參數(shù)值
if (SDL_OpenAudio(&wantSpec, NULL) < 0) {
std::cout << "[error] open audio error" << std::endl;
return;
}
SDL_PauseAudio(0);
}
VideoThread 視頻解碼
視頻解碼類VideoThread.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_VIDEOTHREAD_H
#define LEARNFFMPEG_VIDEOTHREAD_H
#include <pthread.h>
#include <iostream>
#include "AVRender.h"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
};
/** 視頻線程 **/
class VideoThread {
public:
VideoThread();
~VideoThread();
/** 設(shè)置視頻路徑 **/
void setUrl(const char *url);
/** 設(shè)置渲染器 **/
void setRender(AVRender *render);
/** 開始運(yùn)行線程 **/
void start();
private:
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
const char *url;
int video_index;
pthread_t pid;
pthread_mutex_t mutex;
AVRender *avRender;
double last_pts = 0;
/** 幀間距同步 **/
bool is_interval_sync = true;
static void *start_thread(void *arg);
void run();
/** 初始化解碼器 **/
void prepare_codec();
/** 解碼數(shù)據(jù)幀 **/
void decodec_frame();
/**
* 根據(jù)幀率獲取顯示時(shí)間
* @param frame_rate 幀率
* @return 需要顯示的時(shí)長(zhǎng)
*/
Uint32 sync_frame_rate(double frame_rate);
/**
* 根據(jù)幀間隔獲取一幀顯示的時(shí)長(zhǎng)
* @param timebase
* @param pts 秒
* @return
*/
double sync_frame_interval(AVRational timebase, int pts);
};
#endif //LEARNFFMPEG_VIDEOTHREAD_H
VideoThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "VideoThread.h"
VideoThread::VideoThread() {
}
VideoThread::~VideoThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
}
void VideoThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化視頻線程失敗!" << std::endl;
return;
}
}
void *VideoThread::start_thread(void *arg) {
VideoThread *audioThread = (VideoThread *) arg;
audioThread->run();
return nullptr;
}
void VideoThread::run() {
std::cout << "視頻線程運(yùn)行中..." << std::endl;
decodec_frame();
}
void VideoThread::setRender(AVRender *render) {
this->avRender = render;
}
void VideoThread::setUrl(const char *url) {
this->url = url;
}
void VideoThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//打開輸入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//讀取媒體文件信息
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//尋找到視頻流的下標(biāo)
video_index = av_find_best_stream(format_context, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
//將視頻流的的編解碼信息拷貝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[video_index]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查找解碼器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//打開解碼器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一個(gè)packet
packet = av_packet_alloc();
//初始化一個(gè)Frame
frame = av_frame_alloc();
}
void VideoThread::decodec_frame() {
int sendcode = 0;
//計(jì)算幀率
double frameRate = av_q2d(format_context->streams[video_index]->avg_frame_rate);
//計(jì)算顯示的時(shí)間
Uint32 display_time_ms = 0;
if (!is_interval_sync) {
display_time_ms = sync_frame_rate(frameRate);
}
//記錄幀間延遲
clock_t start = 0, finish = 0;
//讀取包
while (av_read_frame(format_context, packet) == 0) {
if (packet->stream_index != video_index)continue;
//接受解碼后的幀數(shù)據(jù)
while (avcodec_receive_frame(codec_context, frame) == 0) {
/**
* 如果開啟幀間隔同步模式,那么是根據(jù)
*
* 顯示時(shí)長(zhǎng) = 當(dāng)前幀 - 上一幀 - 單幀解碼耗時(shí)
*
* 可得出當(dāng)前幀真正要顯示的時(shí)間
*
* **/
if (is_interval_sync) {
//計(jì)算上一幀與當(dāng)前幀的延時(shí)
display_time_ms = (Uint32) (
sync_frame_interval(format_context->streams[video_index]->time_base, frame->pts) * 1000);
//幀解碼結(jié)束時(shí)間
finish = clock();
double diff_time = (finish - start) / 1000;
//減去幀間解碼時(shí)差 幀解碼開始時(shí)間 - 幀解碼結(jié)束時(shí)間
if (display_time_ms > diff_time)display_time_ms = display_time_ms - (Uint32) diff_time;
}
//繪制圖像
if (avRender)avRender->renderVideo(frame, display_time_ms);
av_frame_unref(frame);
//幀解碼開始時(shí)間
start = clock();
}
//發(fā)送解碼前的包數(shù)據(jù)
sendcode = avcodec_send_packet(codec_context, packet);
//根據(jù)發(fā)送的返回值判斷狀態(tài)
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
Uint32 VideoThread::sync_frame_rate(double frame_rate) {
return 1 * 1000 / frame_rate;
}
double VideoThread::sync_frame_interval(AVRational timebase, int pts) {
double display = (pts - last_pts) * av_q2d(timebase);
last_pts = pts;
std::cout << "pts : " << pts * av_q2d(timebase) << " -- display :" << display << std::endl;
return display;
}
AudioThread 音頻解碼
AudioThread
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AUDIOTHREAD_H
#define LEARNFFMPEG_AUDIOTHREAD_H
#include <pthread.h>
#include <iostream>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
};
#include "AVRender.h"
/**
* 音頻線程
*/
class AudioThread {
public:
AudioThread();
~AudioThread();
void setUrl(const char *url);
/** 開啟線程 **/
void start();
/** 設(shè)置渲染器 **/
void setRender(AVRender *render);
private:
/** 重采樣上下文 **/
SwrContext *convert_context;
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
int audioIndex = -1;
uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO; //輸出的通道布局 雙聲道
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; //輸出的聲音格式
int out_sample_rate = 44100; //輸出的采樣率
int out_nb_samples = -1; //輸出的音頻采樣
int out_channels = -1; //輸出的通道數(shù)
int out_buffer_size = -1; //輸出buff大小
unsigned char *outBuff = NULL;//輸出的Buffer數(shù)據(jù)
uint64_t in_chn_layout = -1; //輸入的通道布局
pthread_t pid;
pthread_mutex_t mutex;
AVRender *av_render;
const char *url;
static void *start_thread(void *arg);
void run();
/** 初始化解碼器 **/
void prepare_codec();
};
#endif //LEARNFFMPEG_AUDIOTHREAD_H
AudioThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AudioThread.h"
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio 48000 * (32/8)
//一幀PCM的數(shù)據(jù)長(zhǎng)度
unsigned int audioLen = 0;
unsigned char *audioChunk = nullptr;
//當(dāng)前讀取的位置
unsigned char *audioPos = nullptr;
/** 被SDL2調(diào)用的回調(diào)函數(shù) 當(dāng)需要獲取數(shù)據(jù)喂入硬件播放的時(shí)候調(diào)用 **/
void fill_audio(void *codecContext, Uint8 *stream, int len) {
//SDL2中必須首先使用SDL_memset()將stream中的數(shù)據(jù)設(shè)置為0
SDL_memset(stream, 0, len);
if (audioLen == 0)
return;
len = (len > audioLen ? audioLen : len);
//將數(shù)據(jù)合并到 stream 里
SDL_MixAudio(stream, audioPos, len, SDL_MIX_MAXVOLUME);
//一幀的數(shù)據(jù)控制
audioPos += len;
audioLen -= len;
}
AudioThread::AudioThread() {
}
AudioThread::~AudioThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
if (convert_context != nullptr) swr_free(&convert_context);
}
void AudioThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化音頻線程失敗!" << std::endl;
return;
}
}
void *AudioThread::start_thread(void *arg) {
AudioThread *audioThread = (AudioThread *) arg;
audioThread->run();
return nullptr;
}
void AudioThread::run() {
std::cout << "音頻線程已啟動(dòng)" << std::endl;
//循環(huán)讀取packet并且解碼
int sendcode = 0;
while (av_read_frame(format_context, packet) >= 0) {
if (packet->stream_index != audioIndex)continue;
//接受解碼后的音頻數(shù)據(jù)
while (avcodec_receive_frame(codec_context, frame) == 0) {
swr_convert(convert_context, &outBuff, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) frame->data,
frame->nb_samples);
//如果沒有播放完就等待1ms
while (audioLen > 0)
SDL_Delay(1);
//同步數(shù)據(jù)
audioChunk = (unsigned char *) outBuff;
audioPos = audioChunk;
audioLen = out_buffer_size;
av_frame_unref(frame);
}
//發(fā)送解碼前的包數(shù)據(jù)
sendcode = avcodec_send_packet(codec_context, packet);
//根據(jù)發(fā)送的返回值判斷狀態(tài)
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
void AudioThread::setRender(AVRender *render) {
this->av_render = render;
}
void AudioThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//打開輸入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//讀取媒體文件信息
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//尋找到音頻流的下標(biāo)
audioIndex = av_find_best_stream(format_context, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
//將視頻流的的編解碼信息拷貝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[audioIndex]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查找解碼器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//打開解碼器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一個(gè)packet
packet = av_packet_alloc();
//初始化一個(gè)Frame
frame = av_frame_alloc();
/** ########## 獲取實(shí)際音頻的參數(shù) ##########**/
//單個(gè)通道中的采樣數(shù)
out_nb_samples = codec_context->frame_size;
//輸出的聲道數(shù)
out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
//輸出音頻的布局
in_chn_layout = av_get_default_channel_layout(codec_context->channels);
/** 計(jì)算重采樣后的實(shí)際數(shù)據(jù)大小,并分配空間 **/
//計(jì)算輸出的buffer的大小
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
//分配輸出buffer的空間
outBuff = (unsigned char *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2); //雙聲道
//初始化SDL中自己想設(shè)置的參數(shù)
if (av_render)av_render->openAudio(out_sample_rate, out_channels, out_nb_samples, codec_context, fill_audio);
convert_context = swr_alloc_set_opts(NULL, out_chn_layout, out_sample_fmt, out_sample_rate,
in_chn_layout, codec_context->sample_fmt, codec_context->sample_rate, 0,
NULL);
//初始化SwResample的Context
swr_init(convert_context);
}
void AudioThread::setUrl(const char *url) {
this->url = url;
}
我們?cè)?code>Main方法中
#ifdef chapter_09
//實(shí)例化渲染器
AVRender* render = new AVRender();
//初始化視頻線程
VideoThread *videoThread = new VideoThread();
videoThread->setRender(render);
videoThread->setUrl(url);
//初始化音頻線程
AudioThread *audioThread = new AudioThread();
audioThread->setRender(render);
audioThread->setUrl(url);
//開啟音視頻線程
videoThread->start();
audioThread->start();
//事件循環(huán)
render->loopEvent();
#endif
如果沒錯(cuò),那么就應(yīng)該正常的播放視頻了呛占。虑乖。。
祝各位大佬們好運(yùn)
未完持續(xù) ...