FFMPEG在Android libstagefright上的擴(kuò)展使用分析(一)

1.文章介紹

FFMPEG作為傳統(tǒng)音視頻編解碼處理的開(kāi)源項(xiàng)目被廣泛使用,各種音視頻終端應(yīng)用如VLC,暴風(fēng)影音等,又如各種手機(jī)終端播放音視頻的應(yīng)用等钧敞,都會(huì)多多少少使用FFMPEG某個(gè)特性,如decode麸粮,encode溉苛,filter,mux等弄诲,這篇文章就來(lái)分析在Android libstagefright框架中是如何擴(kuò)展和使用FFMPEG的愚战。

2.干貨

本文以FFMPEG如何在Android源碼中擴(kuò)展并使用為路線(xiàn)來(lái)分析。


Amlogic Android SDK源碼為例:

\frameworks\av\media\libstagefright中使用了FFMPEG組件
AmFFmpegAdapterFFMPEG組件擴(kuò)展的入口,MediaPlayer通過(guò)StagefrightPlayer的調(diào)用至libstagefright寂玲,libstagefright組件中則包含libamffmpegadapter組件塔插。

例如:在libamffmpegadapter組件中AmSimpleMediaExtractorPluginMediaExtractor擴(kuò)展:
MediaExtractor中聲明了幾個(gè)可能使用的情況,例如hevc/h.265

/**MediaExtractor.cpp*/
sp<MediaExtractor> MediaExtractor::CreateEx(const sp<DataSource> &dataSource, bool isHEVC) 
{
    float confidence = 0;
    String8 mime("");
    sp<AMessage> meta(NULL);
    if (!dataSource->sniff(&mime, &confidence, &meta)) {
        confidence = 0;
    }

    float am_confidence = 0;
    String8 am_mime("");
    sp<AMessage> am_meta(NULL);
    if(!sniffAmExtFormat(dataSource, &am_mime, &am_confidence, &am_meta)) {
        am_confidence = 0;
    }

    if((!strcmp(mime.string(),MEDIA_MIMETYPE_AUDIO_WMA) || !strcmp(mime.string(),MEDIA_MIMETYPE_AUDIO_WMAPRO))
        && confidence>0
        && !strcmp(am_mime.string(),MEDIA_MIMETYPE_CONTAINER_ASF)
        && am_confidence>0)
    {
        //since amffpeg extractor is well performaced,why not use it,any quesion for this modification,contact me-->BUG#94436
           ALOGI("[%s %d]for WMA: force useing Amffmpeg extractor[am_confidence/%f confidence/%f ammine/%s mime/%s]\n",
         __FUNCTION__,__LINE__,am_confidence,confidence,am_mime.string(),mime.string());
        confidence=0;
    }
    
    sp<MediaExtractor> extractor = NULL;
    if(am_confidence > confidence || isHEVC) {     // if hevc/h.265, use ffmpeg extractor anyhow.
        mime = am_mime;
        extractor = createAmMediaExtractor(dataSource, mime.string());
    }

    if(NULL == extractor.get()) {
        extractor = MediaExtractor::Create(dataSource, mime.string());
    }

    return extractor;       
}
/**AmSimpleMediaExtractorPlugin.cpp*/
bool sniffAmExtFormat(
        const android::sp<android::DataSource> &source,
        android::String8 *mimeType, float *confidence,
        android::sp<android::AMessage> *msg) {
    return android::SniffAmFFmpeg(source, mimeType, confidence, msg);
}

android::sp<android::MediaExtractor> createAmMediaExtractor(
        const android::sp<android::DataSource> &source, const char *mime) {
    android::MediaExtractor *ret = NULL;    
    ret = new android::AmFFmpegExtractor(source);
    return ret;
}

android::sp<android::MediaExtractor> createAmMediaExtractorFromFd(int fd) {
   ALOGD("[%s:%d], fd:%d", __FUNCTION__, __LINE__, fd);
   android::MediaExtractor *ret = NULL;
   ret = new android::AmFFmpegExtractor(fd);
   return ret;
}
/**AmFFmpegExtractor.cpp*/
bool SniffAmFFmpeg(
        const sp<DataSource> &source, String8 *mimeType, float *confidence,
        sp<AMessage> *) {
    av_register_all();
    AVInputFormat *inputFormat = probeFormat(source);
    if (NULL != inputFormat) {
        const char *mimeDetected = convertInputFormatToMimeType(inputFormat);
        if (NULL != mimeDetected) {
            if(!strcmp(mimeDetected,MEDIA_MIMETYPE_CONTAINER_MATROSKA)){
                if(get_codec_id(source, inputFormat)==1){
                    ALOGI("using MatroskaExtractor\n");
                    return false;
                }
            }
            *mimeType = mimeDetected;
            // only available when stagefright not support
            *confidence = 0.05f;
            if(!strcmp(mimeDetected,MEDIA_MIMETYPE_VIDEO_RM10) || !strcmp(mimeDetected,MEDIA_MIMETYPE_VIDEO_RM20) || !strcmp(mimeDetected,MEDIA_MIMETYPE_VIDEO_RM40)){
                *confidence = 0.21f;
                ALOGV("[%s %d] confidence 0.21", __FUNCTION__, __LINE__);
            }
            return true;
        }
    }
    return false;
}

可以看到在使用libstagefright組件解復(fù)用h265的視頻時(shí)拓哟,就會(huì)使用ffmpeg組件,在AmFFmpegExtractor::feedMore總算看到了熟悉的avcodec接口(比如說(shuō)av_read_frame):

/**AmFFmpegExtractor.cpp*/
status_t AmFFmpegExtractor::feedMore() {
    Mutex::Autolock autoLock(mLock);
    status_t ret = OK;
    AVPacket *packet = new AVPacket();
    while (true) {
        int res = av_read_frame(mFFmpegContext, packet);
        if (res >= 0) {
            uint32_t sourceIdx = kInvalidSourceIdx;
            if (static_cast<size_t>(packet->stream_index) < mStreamIdxToSourceIdx.size()) {
                sourceIdx = mStreamIdxToSourceIdx[packet->stream_index];
            }
            if (sourceIdx == kInvalidSourceIdx
                    || !mSources[sourceIdx].mIsActive || packet->size <= 0/* || packet->pts < 0*/) {
                av_free_packet(packet);
                continue;
            }
            av_dup_packet(packet);
            mSources[sourceIdx].mSource->queuePacket(packet);
        } else {
            delete packet;
            ALOGV("No more packets from ffmpeg.");
            ret = ERROR_END_OF_STREAM;
        }
        break;
    }
    return ret;
}

另外在AmFFmpegUtils組件中可以看到把libstagefright組件的數(shù)據(jù)類(lèi)型轉(zhuǎn)換為FFmpeg組件的數(shù)據(jù)類(lèi)型:

AVInputFormat *probeFormat(const sp<DataSource> &source) {
    AVInputFormat *format = NULL;
    
    for (uint32_t bufSize = kProbeStartBufSize;
            bufSize <= kProbeMaxBufSize; bufSize *= 2) {
        // TODO: use av_probe_input_buffer() once we upgrade FFmpeg library
        //       instead of having a loop here.
        AVProbeData probe_data;
        probe_data.filename = kDummyFileName;
        probe_data.buf = new unsigned char[bufSize];
        //probe_data.s = NULL;
        if (NULL == probe_data.buf) {
            ALOGE("failed to allocate memory for probing file format.");
            return NULL;
        }
        int32_t amountRead = source->readAt(0, probe_data.buf, bufSize);
        probe_data.buf_size = amountRead;

        int32_t score = 0;
        format = av_probe_input_format2(&probe_data, 1, &score);
        delete[] probe_data.buf;

        if (format != NULL && score > AVPROBE_SCORE_MAX / 4) {
            break;
        }
    }
    return format;
}

其中source->readAt()就是libstagefright組件讀取碼流數(shù)據(jù)(具體如何讀取可以參考我的文章MediaExtractor源碼分析)想许,把讀取的buffer傳遞到av_probe_input_format2,實(shí)現(xiàn)了FFMPEG API的調(diào)用彰檬,在這個(gè)地方是根據(jù)輸入的數(shù)據(jù)利用FFMPEG分析文件格式伸刃,至于AVInputFormat有什么用,那就看每個(gè)人對(duì)FFMPEG的熟悉程度逢倍。

AVFormatContext* openAVFormatContext(
        AVInputFormat *inputFormat, AmFFmpegByteIOAdapter *adapter) {
    CHECK(inputFormat != NULL);
    CHECK(adapter != NULL);

    ALOGD("[%s:%d]", __FUNCTION__, __LINE__);

    AVFormatContext* context = avformat_alloc_context();
    context->interrupt_callback.callback = interrupt_cb;
    context->interrupt_callback.opaque = context;

    int64_t start_time_bak = context->start_time;// just for backup
    start_time = ALooper::GetNowUs();
    context->flags |= AVFMT_FLAG_NONBLOCK;
    context->flags |= AVFMT_NOFILE;

    ALOGD("[%s:%d]", __FUNCTION__, __LINE__);

    if (context == NULL) {
        ALOGE("Failed to allocate AVFormatContext.");
        return NULL;
    }

    ALOGD("[%s:%d]", __FUNCTION__, __LINE__);
    context->pb = adapter->getContext();
    ALOGD("try avformat_open_input");
    int res = avformat_open_input(
            &context,
            kDummyFileName,  // need to pass a filename
            inputFormat,  // probe the container format.
            NULL);  // no special parameters

    if (res < 0) {
        ALOGE("Failed to open the input stream.");
        avformat_free_context(context);
        return NULL;
    } else
        ALOGD("avformat_open_input success");

    //context->probesize = 512 * 1024;
    ALOGD("call avformat_find_stream_info");
    res = avformat_find_stream_info(context, NULL);

    if (res < 0 && strcmp(inputFormat->name, "hevc")) {
        ALOGE("Failed to find stream information.");
        ALOGD("inputFormat->name: %s",inputFormat->name);
        avformat_close_input(&context);
        return NULL;
    } else
        ALOGD("openAVFormatContext completes with: %p", context);

    instance_counter++;
    return context;
}

openAVFormatContext是很重要的函數(shù),根據(jù)輸入流創(chuàng)建了一個(gè)有效的AVFormatContext景图,AVFormatContextFFMPEG組件中的重要性就不用多說(shuō)了(反正大大的好较雕,不清楚AVFormatContext有什么用的,建議先參考我的文章FFMPEG Android移植進(jìn)階里面關(guān)于轉(zhuǎn)碼的代碼部分)挚币。

status_t AmFFmpegSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    ALOGV("%s %d", __FUNCTION__, __LINE__);
    *out = NULL;

    sp<AmFFmpegExtractor> extractor = mExtractor.promote();
    if (NULL == extractor.get()) {
        // The client should hold AmFFmpegExtractor while it is using source.
        ALOGE("AmFFmpegExtractor has been released before stop using sources.");
        return UNKNOWN_ERROR;
    }

    int64_t seekTimeUs;
    ReadOptions::SeekMode seekMode;
    AVPacket *packet = NULL;
    if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
        // hevc decoder may fail because of no extradata when seek instantly after start.
        if(!strcmp(mMime, MEDIA_MIMETYPE_VIDEO_HEVC) && mStartRead == false
        && mStream->codec->extradata_size == 0) {
         packet = dequeuePacket();
            while (packet == NULL) {
                if (ERROR_END_OF_STREAM == extractor->feedMore()) {
                    return ERROR_END_OF_STREAM;
                }
                packet = dequeuePacket();
            }
         int32_t cast_size = castHEVCSpecificData(packet->data, packet->size);
         if(cast_size > 0) {
                av_shrink_packet(packet, cast_size);
            }
            ALOGI("Need send hevc specific data first, size : %d", packet->size);
     }

        extractor->seekTo(seekTimeUs + mStartTimeUs, seekMode);
    }

    mStartRead = true;

    if(packet == NULL) {
        packet = dequeuePacket();
        while (packet == NULL) {
            if (ERROR_END_OF_STREAM == extractor->feedMore()) {
                return ERROR_END_OF_STREAM;
            }
            packet = dequeuePacket();
        }
    }

    MediaBuffer *buffer = NULL;
    status_t ret = mGroup->acquire_buffer(&buffer);
    if (ret != OK) {
        return ret;
    }

    uint32_t requiredLen =
            mFormatter->computeNewESLen(packet->data, packet->size);

    int32_t hevc_header_size = 0;
    if(mFirstPacket && !strcmp(mMime, MEDIA_MIMETYPE_VIDEO_HEVC) && mStream->codec->extradata_size > 0) {
        hevc_header_size = 10 + mStream->codec->extradata_size;
        requiredLen += hevc_header_size;
    }
    if (buffer->size() < requiredLen) {
        size_t newSize = buffer->size();
        while (newSize < requiredLen) {
            newSize = 2 * newSize;
            if (newSize > kMaxFrameBufferSize) {
                break;
            }
        }
        buffer->release();
        buffer = NULL;
        if (newSize > kMaxFrameBufferSize) {
            return ERROR_BUFFER_TOO_SMALL;
        }
        resetBufferGroup(newSize);
        status_t ret = mGroup->acquire_buffer(&buffer);
        if (ret != OK) {
            return ret;
        }
    }

    int32_t filledLength = 0;
    if(mFirstPacket && !strcmp(mMime, MEDIA_MIMETYPE_VIDEO_HEVC) && hevc_header_size > 0) {
        const char * tag = "extradata";
        memcpy(static_cast<uint8_t *>(buffer->data()), tag, 9);
     static_cast<uint8_t *>(buffer->data())[9] = mStream->codec->extradata_size;
     memcpy(static_cast<uint8_t *>(buffer->data()) + 10, static_cast<uint8_t *>(mStream->codec->extradata), mStream->codec->extradata_size);
        filledLength = mFormatter->formatES(
            packet->data, packet->size,
            static_cast<uint8_t *>(buffer->data()) + hevc_header_size, buffer->size());
     filledLength += hevc_header_size;
    } else {
        filledLength = mFormatter->formatES(
                packet->data, packet->size,
                static_cast<uint8_t *>(buffer->data()), buffer->size());
    }
    mFirstPacket = false;
    if (filledLength <= 0) {
        ALOGE("Failed to format packet data.");
        buffer->release();
        buffer = NULL;
        return ERROR_MALFORMED;
    }

    if(AV_NOPTS_VALUE == packet->pts) {
        packet->pts = mLastValidPts + 1;
        packet->dts = mLastValidDts + 1;
        mLastValidPts = packet->pts;
        mLastValidDts = packet->dts;
        ALOGE("meet invalid pts, set last pts to current frame pts:%lld dts:%lld",
            mLastValidPts, mLastValidDts);
    } else {
        mLastValidPts = packet->pts;
        mLastValidDts = packet->dts;
    }

    buffer->set_range(0, filledLength);
    const bool isKeyFrame = (packet->flags & AV_PKT_FLAG_KEY) != 0;
    const int64_t ptsFromFFmpeg =
            (packet->pts == static_cast<int64_t>(AV_NOPTS_VALUE))
            ? kUnknownPTS : convertStreamTimeToUs(packet->pts);
    const int64_t dtsFromFFmpeg =
            (packet->dts == static_cast<int64_t>(AV_NOPTS_VALUE))
            ? kUnknownPTS : convertStreamTimeToUs(packet->dts);
    const int64_t predictedPTSInUs = mPTSPopulator->computePTS(
            packet->stream_index, ptsFromFFmpeg, dtsFromFFmpeg, isKeyFrame);
    const int64_t normalizedPTSInUs = (predictedPTSInUs == kUnknownPTS)?
            dtsFromFFmpeg - mStartTimeUs : ((predictedPTSInUs - mStartTimeUs < 0
            && predictedPTSInUs - mStartTimeUs > -10) ? 0 : predictedPTSInUs - mStartTimeUs); // starttime may exceed pts a little in some ugly streams.

    buffer->meta_data()->setInt64(kKeyPTSFromContainer, ptsFromFFmpeg);
    buffer->meta_data()->setInt64(kKeyDTSFromContainer, dtsFromFFmpeg);
    buffer->meta_data()->setInt64(kKeyMediaTimeOffset, -mStartTimeUs);

    // TODO: Make decoder know that this sample has no timestamp by setting
    // OMX_BUFFERFLAG_TIMESTAMPINVALID flag once we move to OpenMax IL 1.2.
    buffer->meta_data()->setInt64(kKeyTime, normalizedPTSInUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, isKeyFrame ? 1 : 0);
    *out = buffer;
    av_free_packet(packet);
    delete packet;
    return OK;
}

再來(lái)看一看對(duì)Formatter的實(shí)現(xiàn):

/**AmFFmpegSource.cpp*/
status_t AmFFmpegSource::init(
        AVStream *stream, AVInputFormat *inputFormat,
        AmFFmpegExtractor *extractor) {
    ...
    mFormatter = StreamFormatter::Create(stream->codec, inputFormat);
    mFormatter->addCodecMeta(mMeta);
    return OK;
}
//static
sp<StreamFormatter> StreamFormatter::Create(
        AVCodecContext *codec, AVInputFormat *format) {
    ALOGI("Creating formatter for codec id : %u extradata size : %d",
            codec->codec_id, codec->extradata_size);

    const char *codecMime = convertCodecIdToMimeType(codec);
    if (!strcmp(codecMime, MEDIA_MIMETYPE_VIDEO_AVC)
            && (format == av_find_input_format("mp4")
                    || format == av_find_input_format("flv")
                    || format == av_find_input_format("matroska"))) {
        // Double check the extradata really includes AVCC (14496-15) structure
        // because some matroska streams are already Annex-B framed and does not
        // have AVCC. In this case, we fall back to the default formatter.
        if (codec->extradata_size >= 7
                && reinterpret_cast<uint8_t *>(codec->extradata)[0] == 0x01) {
            return new AVCCFormatter(codec);
        }
    } else if (!strcmp(codecMime, MEDIA_MIMETYPE_VIDEO_HEVC)
            && (format == av_find_input_format("mp4")
                    || format == av_find_input_format("flv")
                    || format == av_find_input_format("matroska"))) {
        if (codec->extradata_size >= 22) {
            return new HVCCFormatter(codec);
        }
    } else if (!strcmp(codecMime, MEDIA_MIMETYPE_AUDIO_AAC)
            && (format == av_find_input_format("mp4")
                    || format == av_find_input_format("avi")
                    || format == av_find_input_format("flv")
                    || format == av_find_input_format("matroska"))
            && codec->extradata_size > 0) {
        return new AACFormatter(codec);
    } else if (!strcmp(codecMime, MEDIA_MIMETYPE_AUDIO_WMA)) {
        return new WMAFormatter(codec);
    } else if (!strcmp(codecMime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
        return new VorbisFormatter(codec);
    } else if (codec->codec_id == AV_CODEC_ID_PCM_BLURAY) {
        return new PCMBlurayFormatter(codec);
    } else if(!strcmp(codecMime, MEDIA_MIMETYPE_AUDIO_APE)){
        return new APEFormatter(codec);
    }
    return new PassthruFormatter(codec);
}

比如拿WMAFormatter來(lái)說(shuō)亮蒋,WMAFormatter就是基于FFMPEG的對(duì)WMA音頻的擴(kuò)展:

/**WMAFormatter.cpp*/
WMAFormatter::WMAFormatter(AVCodecContext *codec)
    : PassthruFormatter(codec),
      mBlockAlign(0),
      mBitsPerSample(0),
      mFormatTag(0),
      mInitCheck(false) {

    if (    codec->codec_tag == 0x0160
            ||codec->codec_tag == 0x0161            // WMA
            || codec->codec_tag == 0x0162     // WMA Pro
            || codec->codec_tag == 0x0163) {  // WMA Lossless
        mBlockAlign = codec->block_align;
        mBitsPerSample = codec->bits_per_coded_sample;
        mFormatTag = codec->codec_tag;
        mInitCheck = true;
    } else {
        ALOGW("Unsupported format tag %x", codec->codec_tag);
    }
}

bool WMAFormatter::addCodecMeta(const sp<MetaData> &meta) const {
    if (mInitCheck) {
        meta->setInt32(kKeyWMABlockAlign, mBlockAlign);
        meta->setInt32(kKeyWMABitsPerSample, mBitsPerSample);
        meta->setInt32(kKeyWMAFormatTag, mFormatTag);
        meta->setData(kKeyCodecSpecific, 0, mExtraData, mExtraSize);
        return true;
    }
    return false;
}

AmFFmpegExtractor組件的read方法中會(huì)調(diào)用computeNewESLenformatES等方法,而WMAFormatter是繼承于PassthruFormatter

/**AmFFmpegExtractor.cpp*/
status_t AmFFmpegSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    ...
    uint32_t requiredLen =
            mFormatter->computeNewESLen(packet->data, packet->size);
    ...
    int32_t filledLength = 0;
    if(mFirstPacket && !strcmp(mMime, MEDIA_MIMETYPE_VIDEO_HEVC) && hevc_header_size > 0) {
        const char * tag = "extradata";
        memcpy(static_cast<uint8_t *>(buffer->data()), tag, 9);
        static_cast<uint8_t *>(buffer->data())[9] = mStream->codec->extradata_size;
        memcpy(static_cast<uint8_t *>(buffer->data()) + 10, static_cast<uint8_t *>(mStream->codec->extradata), mStream->codec->extradata_size);
        filledLength = mFormatter->formatES(
            packet->data, packet->size,
            static_cast<uint8_t *>(buffer->data()) + hevc_header_size, buffer->size());
        filledLength += hevc_header_size;
    } else {
        filledLength = mFormatter->formatES(
                packet->data, packet->size,
                static_cast<uint8_t *>(buffer->data()), buffer->size());
    }
    ...
    return OK;
}

/**PassthruFormatter.cpp*/
uint32_t PassthruFormatter::computeNewESLen(
        const uint8_t* in, uint32_t inAllocLen) const {
    return inAllocLen;
}

int32_t PassthruFormatter::formatES(
        const uint8_t* in, uint32_t inAllocLen, uint8_t* out,
        uint32_t outAllocLen) const {
    if (!inAllocLen || inAllocLen > outAllocLen) {
        return -1;
    }
    CHECK(in);
    CHECK(out);
    CHECK(in != out);
    memcpy(out, in, inAllocLen);
    return inAllocLen;
}

這就把FFMPEG解復(fù)用出得buffer轉(zhuǎn)換成了我們平時(shí)開(kāi)發(fā)需要的數(shù)據(jù)類(lèi)型并使用妆毕,比如:

//如獲取數(shù)據(jù)流
for (size_t i = 0; i < extractor->countTracks(); ++i) {
    ...
}
//如對(duì)read的簡(jiǎn)單封裝  讀取數(shù)據(jù)
err = extractor->readSampleData(buffer);

//如獲取Meta數(shù)據(jù)
sp<MetaData> meta;
err = extractor->getSampleMeta(&meta);
CHECK_EQ(err, (status_t)OK);

實(shí)現(xiàn)了這些接口就可以通過(guò)FFMPEG來(lái)解復(fù)用慎玖,這就實(shí)現(xiàn)了FFMPEGAndroid SDK上對(duì)應(yīng)的音視頻解復(fù)用擴(kuò)展。

總結(jié)下自己的理解
音視頻數(shù)據(jù)在不同的封裝方式時(shí)數(shù)據(jù)格式是不同的笛粘,比如TS流:


每188個(gè)字節(jié)對(duì)字節(jié)流作解析趁怔。

playload負(fù)載中的PES層,stream id就可以區(qū)分是音頻(音頻取0xc0-0xdf薪前,通常為0xc0)還是視頻(視頻取值0xe0-0xef润努,通常為0xe0)負(fù)載。

每個(gè)TS包是188個(gè)字節(jié)的數(shù)據(jù)流示括,那么要通過(guò)Extractor區(qū)分哪個(gè)包是音頻流铺浇,哪個(gè)包是視頻流,那么就要按這個(gè)規(guī)范去解析垛膝,所以在Android Extractor不支持某些特殊格式的解復(fù)用時(shí)鳍侣,使用FFMPEG擴(kuò)展的Extractor是為了能把那些特殊格式的音視頻流解析出哪個(gè)流是音頻,哪個(gè)流是視頻吼拥,并且獲取音視頻的信息倚聚,這樣才能在編解碼時(shí)作出區(qū)別。這么解釋不知道讀者能不能清楚 ╮(╯▽╰)╭

3.結(jié)束語(yǔ)

整個(gè)框架調(diào)用的流程其實(shí)就是MediaCodec ----> ACodec ----> Open OMX Layer ----> ObjectCodec扔罪。在其他芯片SDK方案中秉沼,比如Hisi,AML,Mstar也會(huì)依賴(lài)FFMPEG來(lái)擴(kuò)展某些編碼唬复,解碼矗积,音視頻合成等,畢竟每個(gè)平臺(tái)不一樣敞咧,建議根據(jù)條件允許時(shí)根據(jù)個(gè)人經(jīng)驗(yàn)選擇性來(lái)學(xué)習(xí)棘捣。

下一篇打算從編解碼器擴(kuò)展來(lái)分析FFMPEG是如何使用在我們的框架之中的,謝謝關(guān)注休建。

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末乍恐,一起剝皮案震驚了整個(gè)濱河市,隨后出現(xiàn)的幾起案子测砂,更是在濱河造成了極大的恐慌茵烈,老刑警劉巖,帶你破解...
    沈念sama閱讀 221,695評(píng)論 6 515
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件砌些,死亡現(xiàn)場(chǎng)離奇詭異呜投,居然都是意外死亡,警方通過(guò)查閱死者的電腦和手機(jī)存璃,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 94,569評(píng)論 3 399
  • 文/潘曉璐 我一進(jìn)店門(mén)仑荐,熙熙樓的掌柜王于貴愁眉苦臉地迎上來(lái),“玉大人纵东,你說(shuō)我怎么就攤上這事粘招。” “怎么了偎球?”我有些...
    開(kāi)封第一講書(shū)人閱讀 168,130評(píng)論 0 360
  • 文/不壞的土叔 我叫張陵洒扎,是天一觀的道長(zhǎng)。 經(jīng)常有香客問(wèn)我甜橱,道長(zhǎng)逊笆,這世上最難降的妖魔是什么? 我笑而不...
    開(kāi)封第一講書(shū)人閱讀 59,648評(píng)論 1 297
  • 正文 為了忘掉前任岂傲,我火速辦了婚禮难裆,結(jié)果婚禮上,老公的妹妹穿的比我還像新娘镊掖。我一直安慰自己乃戈,他們只是感情好,可當(dāng)我...
    茶點(diǎn)故事閱讀 68,655評(píng)論 6 397
  • 文/花漫 我一把揭開(kāi)白布亩进。 她就那樣靜靜地躺著症虑,像睡著了一般。 火紅的嫁衣襯著肌膚如雪归薛。 梳的紋絲不亂的頭發(fā)上谍憔,一...
    開(kāi)封第一講書(shū)人閱讀 52,268評(píng)論 1 309
  • 那天匪蝙,我揣著相機(jī)與錄音抓狭,去河邊找鬼聪建。 笑死,一個(gè)胖子當(dāng)著我的面吹牛屯远,可吹牛的內(nèi)容都是我干的苫昌。 我是一名探鬼主播颤绕,決...
    沈念sama閱讀 40,835評(píng)論 3 421
  • 文/蒼蘭香墨 我猛地睜開(kāi)眼,長(zhǎng)吁一口氣:“原來(lái)是場(chǎng)噩夢(mèng)啊……” “哼祟身!你這毒婦竟也來(lái)了奥务?” 一聲冷哼從身側(cè)響起,我...
    開(kāi)封第一講書(shū)人閱讀 39,740評(píng)論 0 276
  • 序言:老撾萬(wàn)榮一對(duì)情侶失蹤袜硫,失蹤者是張志新(化名)和其女友劉穎氯葬,沒(méi)想到半個(gè)月后,有當(dāng)?shù)厝嗽跇?shù)林里發(fā)現(xiàn)了一具尸體婉陷,經(jīng)...
    沈念sama閱讀 46,286評(píng)論 1 318
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡溢谤,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 38,375評(píng)論 3 340
  • 正文 我和宋清朗相戀三年,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了憨攒。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點(diǎn)故事閱讀 40,505評(píng)論 1 352
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡阀参,死狀恐怖肝集,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情蛛壳,我是刑警寧澤杏瞻,帶...
    沈念sama閱讀 36,185評(píng)論 5 350
  • 正文 年R本政府宣布,位于F島的核電站衙荐,受9級(jí)特大地震影響捞挥,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜忧吟,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 41,873評(píng)論 3 333
  • 文/蒙蒙 一砌函、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧溜族,春花似錦讹俊、人聲如沸。這莊子的主人今日做“春日...
    開(kāi)封第一講書(shū)人閱讀 32,357評(píng)論 0 24
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)。三九已至寡壮,卻和暖如春贩疙,著一層夾襖步出監(jiān)牢的瞬間讹弯,已是汗流浹背。 一陣腳步聲響...
    開(kāi)封第一講書(shū)人閱讀 33,466評(píng)論 1 272
  • 我被黑心中介騙來(lái)泰國(guó)打工这溅, 沒(méi)想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留组民,地道東北人。 一個(gè)月前我還...
    沈念sama閱讀 48,921評(píng)論 3 376
  • 正文 我出身青樓芍躏,卻偏偏與公主長(zhǎng)得像邪乍,于是被迫代替她去往敵國(guó)和親。 傳聞我的和親對(duì)象是個(gè)殘疾皇子对竣,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 45,515評(píng)論 2 359

推薦閱讀更多精彩內(nèi)容