Skia現(xiàn)在為來自After Effects的Bodymovin插件的JSON動(dòng)畫提供了高性能述呐,安全的本機(jī)播放器寺惫。它可以在使用Skia的任何平臺(tái)上使用舀寓,包括Android和iOS。
該播放器的目標(biāo)是在當(dāng)今廣泛用于動(dòng)畫的Lottie播放器的基礎(chǔ)上肌蜻,為我們的客戶改善性能,功能集和平臺(tái)凝聚力必尼。我們是Bodymovin格式的忠實(shí)擁護(hù)者蒋搜,并在可能的情況下為Bodymovin / Lottie做出貢獻(xiàn)。
https://skia.org/user/modules/skottie
AE的一些基本概念
skottie代碼結(jié)構(gòu)
解析得到RenderTree
● AnimationBuilder::parse
● CompositionBuilder::CompositionBuilder
● CompositionBuilder::build
● LayerBuilder::buildRenderTree
AnimationBuilder::parse解析得到Animation
Animation = Animation::Builder::makeFromFile(lottie_json) {
// 解析json第一層信息:版本判莉、寬高豆挽、幀率、開始結(jié)束幀
// parse v/w/h/fr/ip/op/...
// 解析json得到AnimationInfo
struct AnimationInfo {
std::unique_ptr<sksg::Scene> fScene;
AnimatorScope fAnimators;
};
internal::AnimationBuilder builder(std::move(resolvedProvider), fFontMgr,
std::move(fPropertyObserver),
std::move(fLogger),
std::move(fMarkerObserver),
std::move(fPrecompInterceptor),
&fStats, size, duration, fps, fFlags,
audio_collector);
auto ainfo = builder.parse(json);
// 創(chuàng)建Animation對(duì)象
return sk_sp<Animation>(new Animation(std::move(ainfo.fScene),
std::move(ainfo.fAnimators),
std::move(version),
size,
inPoint,
outPoint,
duration,
fps,
flags,
audio_collector));
}
AnimationBuilder::AnimationInfo AnimationBuilder::parse(const skjson::ObjectValue& jroot) {
// 解析markers字段
this->dispatchMarkers(jroot["markers"]);
// 解析assets字段
this->parseAssets(jroot["assets"]);
// 解析fonts字段券盅、chars字段帮哈?
this->parseFonts(jroot["fonts"], jroot["chars"]);
// CompositionBuilder.build
// 生成 sksg::Group::Make(std::move(layers));
// 生成 animators
AutoScope ascope(this);
auto root = CompositionBuilder(*this, fCompSize, jroot).build(*this);
auto animators = ascope.release();
fStats->fAnimatorCount = animators.size();
// 返回AnimationInfo
return { sksg::Scene::Make(std::move(root)), std::move(animators) };
}
CompositionBuilder構(gòu)造函數(shù)解析得到fLayerBuilders & fCameraTransform
CompositionBuilder::CompositionBuilder(const AnimationBuilder& abuilder,
const SkSize& size,
const skjson::ObjectValue& jcomp)
: fSize(size) {
// 可選 motion blur參數(shù)
// Optional motion blur params.
if (const skjson::ObjectValue* jmb = jcomp["mb"]) {
static constexpr size_t kMaxSamplesPerFrame = 64;
fMotionBlurSamples = std::min(ParseDefault<size_t>((*jmb)["spf"], 1ul),
kMaxSamplesPerFrame);
fMotionBlurAngle = SkTPin(ParseDefault((*jmb)["sa"], 0.0f), 0.0f, 720.0f);
fMotionBlurPhase = SkTPin(ParseDefault((*jmb)["sp"], 0.0f), -360.0f, 360.0f);
}
int camera_builder_index = -1;
// 準(zhǔn)備生成layer builders
// Prepare layer builders.
if (const skjson::ArrayValue* jlayers = jcomp["layers"]) {
fLayerBuilders.reserve(SkToInt(jlayers->size()));
for (const skjson::ObjectValue* jlayer : *jlayers) {
if (!jlayer) continue;
const auto lbuilder_index = fLayerBuilders.size();
fLayerBuilders.emplace_back(*jlayer, fSize);
const auto& lbuilder = fLayerBuilders.back();
fLayerIndexMap.set(lbuilder.index(), lbuilder_index);
// Keep track of the camera builder.
if (lbuilder.isCamera()) {
// 目前只支持單個(gè)Camera
// We only support one (first) camera for now.
if (camera_builder_index < 0) {
camera_builder_index = SkToInt(lbuilder_index);
} else {
abuilder.log(Logger::Level::kWarning, jlayer,
"Ignoring duplicate camera layer.");
}
}
}
}
// 如果有需要,生成一個(gè)camera/3d layer的fCameraTransform
// Attach a camera transform upfront, if needed (required to build
// all other 3D transform chains).
if (camera_builder_index >= 0) {
// Explicit camera.
fCameraTransform = fLayerBuilders[camera_builder_index].buildTransform(abuilder, this);
} else if (ParseDefault<int>(jcomp["ddd"], 0)) {
// Default/implicit camera when 3D layers are present.
fCameraTransform = CameraAdaper::DefaultCameraTransform(fSize);
}
}
CompositionBuilder::build解析得到layers
// 1) layerBuilder保存layer transform
// 2) build render tree
// 把transform放到render node中
// buildRenderTree {
// layer = sksg::TransformEffect::Make(std::move(layer), fLayerTransform);
// }
sk_sp<sksg::RenderNode> CompositionBuilder::build(const AnimationBuilder& abuilder) {
// 第一步:可傳遞地附加圖層變換鏈锰镀。
// First pass - transitively attach layer transform chains.
for (auto& lbuilder : fLayerBuilders) {
lbuilder.buildTransform(abuilder, this);
}
// 第二步:附加實(shí)際的圖層內(nèi)容并最終確定圖層渲染樹娘侍。
// Second pass - attach actual layer contents and finalize the layer render tree.
std::vector<sk_sp<sksg::RenderNode>> layers;
layers.reserve(fLayerBuilders.size());
LayerBuilder* prev_layer = nullptr;
for (auto& lbuilder : fLayerBuilders) {
if (auto layer = lbuilder.buildRenderTree(abuilder, this, prev_layer)) {
layers.push_back(std::move(layer));
}
prev_layer = &lbuilder;
}
if (layers.empty()) {
return nullptr;
}
if (layers.size() == 1) {
return std::move(layers[0]);
}
// Layers are painted in bottom->top order.
std::reverse(layers.begin(), layers.end());
layers.shrink_to_fit();
return sksg::Group::Make(std::move(layers));
}
***LayerBuilder::buildRenderTree解析得到RenderTree
sk_sp<sksg::RenderNode> LayerBuilder::buildRenderTree(const AnimationBuilder& abuilder,
CompositionBuilder* cbuilder,
const LayerBuilder* prev_layer) {
const AnimationBuilder::AutoPropertyTracker apt(&abuilder, fJlayer);
using LayerBuilder =
sk_sp<sksg::RenderNode> (AnimationBuilder::*)(const skjson::ObjectValue&,
AnimationBuilder::LayerInfo*) const;
// AE is annoyingly inconsistent in how effects interact with layer transforms: depending on
// the layer type, effects are applied before or after the content is transformed.
//
// Empirically, pre-rendered layers (for some loose meaning of "pre-rendered") are in the
// former category (effects are subject to transformation), while the remaining types are in
// the latter.
enum : uint32_t {
kTransformEffects = 0x01, // The layer transform also applies to its effects.
kForceSeek = 0x02, // Dispatch all seek() events even when the layer is inactive.
};
static constexpr struct {
LayerBuilder fBuilder;
uint32_t fFlags;
} gLayerBuildInfo[] = {
{ &AnimationBuilder::attachPrecompLayer, kTransformEffects }, // 'ty': 0 -> precomp
{ &AnimationBuilder::attachSolidLayer , kTransformEffects }, // 'ty': 1 -> solid
{ &AnimationBuilder::attachFootageLayer, kTransformEffects }, // 'ty': 2 -> image
{ &AnimationBuilder::attachNullLayer , 0 }, // 'ty': 3 -> null
{ &AnimationBuilder::attachShapeLayer , 0 }, // 'ty': 4 -> shape
{ &AnimationBuilder::attachTextLayer , 0 }, // 'ty': 5 -> text
{ &AnimationBuilder::attachAudioLayer , kForceSeek }, // 'ty': 6 -> audio
{ nullptr , 0 }, // 'ty': 7 -> pholderVideo
{ nullptr , 0 }, // 'ty': 8 -> imageSeq
{ &AnimationBuilder::attachFootageLayer, kTransformEffects }, // 'ty': 9 -> video
{ nullptr , 0 }, // 'ty': 10 -> pholderStill
{ nullptr , 0 }, // 'ty': 11 -> guide
{ nullptr , 0 }, // 'ty': 12 -> adjustment
{ &AnimationBuilder::attachNullLayer , 0 }, // 'ty': 13 -> camera
{ nullptr , 0 }, // 'ty': 14 -> light
};
const auto type = SkToSizeT(fType);
if (type >= SK_ARRAY_COUNT(gLayerBuildInfo)) {
return nullptr;
}
const auto& build_info = gLayerBuildInfo[type];
// Switch to the layer animator scope (which at this point holds transform-only animators).
AnimationBuilder::AutoScope ascope(&abuilder, std::move(fLayerScope));
// Potentially null.
sk_sp<sksg::RenderNode> layer;
// Build the layer content fragment.
if (build_info.fBuilder) {
layer = (abuilder.*(build_info.fBuilder))(fJlayer, &fInfo);
}
// Clip layers with explicit dimensions.
float w = 0, h = 0;
if (Parse<float>(fJlayer["w"], &w) && Parse<float>(fJlayer["h"], &h)) {
layer = sksg::ClipEffect::Make(std::move(layer),
sksg::Rect::Make(SkRect::MakeWH(w, h)),
true);
}
// AE|蒙版咖刃、遮罩、軌道遮罩全面解析 https://zhuanlan.zhihu.com/p/56928976
// (蒙版作為路徑是可以添加效果的憾筏,比如音頻波形嚎杨、描邊、填充氧腰、勾畫等枫浙,是否支持?)
//
// 蒙版: 作用到當(dāng)前圖層
// Optional layer mask.
layer = AttachMask(fJlayer["masksProperties"], &abuilder, std::move(layer));
// Does the transform apply to effects also?
// (AE quirk: it doesn't - except for solid layers)
const auto transform_effects = (build_info.fFlags & kTransformEffects);
// 如有需要古拴,在特效之前附加transform
// Attach the transform before effects, when needed.
if (fLayerTransform && !transform_effects) {
layer = sksg::TransformEffect::Make(std::move(layer), fLayerTransform);
}
// 附加特效到layer
// Optional layer effects.
if (const skjson::ArrayValue* jeffects = fJlayer["ef"]) {
layer = EffectBuilder(&abuilder, fInfo.fSize, cbuilder)
.attachEffects(*jeffects, std::move(layer));
}
// 如有需要箩帚,在特效之后附加transform
// Attach the transform after effects, when needed.
if (fLayerTransform && transform_effects) {
layer = sksg::TransformEffect::Make(std::move(layer), std::move(fLayerTransform));
}
// Optional layer styles.
if (const skjson::ArrayValue* jstyles = fJlayer["sy"]) {
layer = EffectBuilder(&abuilder, fInfo.fSize, cbuilder)
.attachStyles(*jstyles, std::move(layer));
}
// Optional layer opacity.
// TODO: de-dupe this "ks" lookup with matrix above.
if (const skjson::ObjectValue* jtransform = fJlayer["ks"]) {
layer = abuilder.attachOpacity(*jtransform, std::move(layer));
}
// 存儲(chǔ)layer,用于后續(xù)遮罩
// Stash the content tree in case it is needed for later mattes.
fContentTree = layer;
if (ParseDefault<bool>(fJlayer["hd"], false)) {
layer = nullptr;
}
const auto has_animators = !abuilder.fCurrentAnimatorScope->empty();
const auto force_seek_count = build_info.fFlags & kForceSeek
? abuilder.fCurrentAnimatorScope->size()
: fTransformAnimatorCount;
sk_sp<Animator> controller = sk_make_sp<LayerController>(ascope.release(),
layer,
force_seek_count,
fInfo.fInPoint,
fInfo.fOutPoint);
// Optional motion blur.
if (layer && has_animators && this->hasMotionBlur(cbuilder)) {
// Wrap both the layer node and the controller.
auto motion_blur = MotionBlurEffect::Make(std::move(controller), std::move(layer),
cbuilder->fMotionBlurSamples,
cbuilder->fMotionBlurAngle,
cbuilder->fMotionBlurPhase);
controller = sk_make_sp<MotionBlurController>(motion_blur);
layer = std::move(motion_blur);
}
abuilder.fCurrentAnimatorScope->push_back(std::move(controller));
// 遮罩: 作用到下一個(gè)圖層
if (ParseDefault<bool>(fJlayer["td"], false)) {
// |layer| is a track matte. We apply it as a mask to the next layer.
return nullptr;
}
// 可選 遮罩
// Optional matte.
size_t matte_mode;
if (prev_layer && Parse(fJlayer["tt"], &matte_mode)) {
static constexpr sksg::MaskEffect::Mode gMatteModes[] = {
sksg::MaskEffect::Mode::kAlphaNormal, // tt: 1
sksg::MaskEffect::Mode::kAlphaInvert, // tt: 2
sksg::MaskEffect::Mode::kLumaNormal, // tt: 3
sksg::MaskEffect::Mode::kLumaInvert, // tt: 4
};
if (matte_mode > 0 && matte_mode <= SK_ARRAY_COUNT(gMatteModes)) {
// The current layer is masked with the previous layer *content*.
layer = sksg::MaskEffect::Make(std::move(layer),
prev_layer->fContentTree,
gMatteModes[matte_mode - 1]);
} else {
abuilder.log(Logger::Level::kError, nullptr,
"Unknown track matte mode: %zu\n", matte_mode);
}
}
// Finally, attach an optional blend mode.
// NB: blend modes are never applied to matte sources (layer content only).
return abuilder.attachBlendMode(fJlayer, std::move(layer));
}
skia支持本地視頻解碼
skia/experimental/ffmpeg/SkVideoDecoder
假如自己實(shí)現(xiàn)
- 使用FFmpeg打開url
- 將AVFrame轉(zhuǎn)成SkBitmap用于最終繪制到畫布上
skia實(shí)現(xiàn)過程中可以借鑒的地方
- 使用SkData
- 使用FFmpeg自定義讀取文件方式打開url
- AVFrame轉(zhuǎn)成SkImage過程中判斷ColorSpace
sk_sp<SkData> SkData::MakeFromFILE(FILE* f) {
size_t size;
void* addr = sk_fmmap(f, &size);
if (nullptr == addr) {
return nullptr;
}
return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
}
sk_sp<SkData> SkData::MakeFromFileName(const char path[]) {
FILE* f = path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr;
if (nullptr == f) {
return nullptr;
}
auto data = MakeFromFILE(f);
sk_fclose(f);
return data;
}
void* sk_fdmmap(int fd, size_t* size) {
struct stat status;
if (0 != fstat(fd, &status)) {
return nullptr;
}
if (!S_ISREG(status.st_mode)) {
return nullptr;
}
if (!SkTFitsIn<size_t>(status.st_size)) {
return nullptr;
}
size_t fileSize = static_cast<size_t>(status.st_size);
void* addr = mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0);
if (MAP_FAILED == addr) {
return nullptr;
}
*size = fileSize;
return addr;
}
這里使用了mmap這個(gè)函數(shù)
https://www.zhihu.com/question/48161206
mmap的主要的好處在于黄痪,減少一次內(nèi)存拷貝紧帕。在我們平時(shí)vfs的read/write系統(tǒng)調(diào)用中,文件內(nèi)容的拷貝要多經(jīng)歷內(nèi)核緩沖區(qū)這個(gè)階段满力,所以比mmap多了一次內(nèi)存拷貝焕参,mmap只有用戶空間的內(nèi)存拷貝(這個(gè)階段read/write也有)。正是因?yàn)闇p少了從Linux的頁緩存到用戶空間的緩沖區(qū)的這一次拷貝油额,所以mmap大大提高了性能叠纷,mmap也被稱為zero-copy技術(shù)。
https://www.gnu.org/software/libc/manual/html_node/Memory_002dmapped-I_002fO.html
This is more efficient than read
or write
, as only the regions of the file that a program actually accesses are loaded. Accesses to not-yet-loaded parts of the mmapped region are handled in the same way as swapped out pages.
https://nieyong.github.io/wiki_cpu/mmap%E8%AF%A6%E8%A7%A3.html
FFmpeg自定義讀取
static int skstream_read_packet(void* ctx, uint8_t* dstBuffer, int dstSize) {
SkStream* stream = (SkStream*)ctx;
int result = (int)stream->read(dstBuffer, dstSize);
if (result == 0) {
result = AVERROR_EOF;
}
return result;
}
static int64_t skstream_seek_packet(void* ctx, int64_t pos, int whence) {
SkStream* stream = (SkStream*)ctx;
switch (whence) {
case SEEK_SET:
break;
case SEEK_CUR:
pos = (int64_t)stream->getPosition() + pos;
break;
case SEEK_END:
pos = (int64_t)stream->getLength() + pos;
break;
default:
return -1;
}
return stream->seek(SkToSizeT(pos)) ? pos : -1;
}
{
int bufferSize = 4 * 1024;
uint8_t* buffer = (uint8_t*)av_malloc(bufferSize);
if (!buffer) {
return false;
}
fStream = std::move(stream);
fStreamCtx = avio_alloc_context(buffer, bufferSize, 0, fStream.get(),
skstream_read_packet, nullptr, skstream_seek_packet);
if (!fStreamCtx) {
av_freep(buffer);
this->reset();
return false;
}
fFormatCtx = avformat_alloc_context();
if (!fFormatCtx) {
this->reset();
return false;
}
fFormatCtx->pb = fStreamCtx;
int err = avformat_open_input(&fFormatCtx, nullptr, nullptr, nullptr);
if (err < 0) {
SkDebugf("avformat_open_input failed %d\n", err);
return false;
}
}
AVFrame轉(zhuǎn)成SkImage過程中判斷ColorSpace
static SkYUVColorSpace get_yuvspace(AVColorSpace space) {
// this is pretty incomplete -- TODO: look to convert more AVColorSpaces
switch (space) {
case AVCOL_SPC_RGB: return kIdentity_SkYUVColorSpace;
case AVCOL_SPC_BT709: return kRec709_SkYUVColorSpace;
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_SMPTE240M:
case AVCOL_SPC_BT470BG: return kRec601_SkYUVColorSpace;
default: break;
}
return kRec709_SkYUVColorSpace;
}
struct av_transfer_characteristics {
// if x < beta delta * x
// else alpha * (x^gama)
float alpha, beta, gamma, delta;
};
// Tables extracted from vf_colorspace.c
const av_transfer_characteristics gTransfer[AVCOL_TRC_NB] = {
[AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
[AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
[AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
[AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
[AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
};