diff --git a/build-windows-win64 b/build-windows-win64 index 60ebade28b..01808596ba 100755 --- a/build-windows-win64 +++ b/build-windows-win64 @@ -9,7 +9,7 @@ for dep in docker make; do done # Use the latest distro for toolchains -distro="ubuntu:lunar" +distro="ubuntu:mantic" image_name="jellyfin-ffmpeg-build-windows-win64" package_temporary_dir="$( mktemp -d )" current_user="$( whoami )" diff --git a/build.yaml b/build.yaml index e8c814d58c..e37393073b 100644 --- a/build.yaml +++ b/build.yaml @@ -1,7 +1,7 @@ --- # We just wrap `build` so this is really it name: "jellyfin-ffmpeg" -version: "6.0.1-1" +version: "6.0.1-2" packages: - buster-amd64 - buster-armhf diff --git a/builder/scripts.d/10-xorg-macros.sh b/builder/scripts.d/10-xorg-macros.sh index 6cb4611774..949295b7d4 100755 --- a/builder/scripts.d/10-xorg-macros.sh +++ b/builder/scripts.d/10-xorg-macros.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/util/macros.git" -SCRIPT_COMMIT="cb147377e9341af05232f95814022abdecf14024" +SCRIPT_COMMIT="1031f8cc5c7a170e278372ccdf2e70151b096ef7" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/20-libxml2.sh b/builder/scripts.d/20-libxml2.sh index 4d6998cb0e..334e077e49 100755 --- a/builder/scripts.d/20-libxml2.sh +++ b/builder/scripts.d/20-libxml2.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/GNOME/libxml2.git" -SCRIPT_COMMIT="da703eaaea152f6dd9e871cbe7f0ae46322c583d" +SCRIPT_COMMIT="12ce9b5ffeba776ede786c075795a4dbae94bfa1" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/20-zlib.sh b/builder/scripts.d/20-zlib.sh index 96e8ef6978..12d1abc8d7 100755 --- a/builder/scripts.d/20-zlib.sh +++ b/builder/scripts.d/20-zlib.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/madler/zlib.git" -SCRIPT_COMMIT="15c45adb76e81a7e3a8a9e17b2a56eb90f668f44" +SCRIPT_COMMIT="b14484997a50c01b8d78f9db32516423573fc083" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/25-fftw3f.sh b/builder/scripts.d/25-fftw3f.sh index c010f496b1..3a54d32ae6 100755 --- a/builder/scripts.d/25-fftw3f.sh +++ b/builder/scripts.d/25-fftw3f.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/FFTW/fftw3.git" -SCRIPT_COMMIT="38ea230e25e69e7a3f35b957b815bac4f9aa22b0" +SCRIPT_COMMIT="d0ce926f1523d95daed48cd7c69572e068dbbfb3" ffbuild_enabled() { # Dependency of GPL-Only librubberband diff --git a/builder/scripts.d/25-freetype.sh b/builder/scripts.d/25-freetype.sh index 4a6d3ff687..5face36890 100755 --- a/builder/scripts.d/25-freetype.sh +++ b/builder/scripts.d/25-freetype.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/freetype/freetype.git" -SCRIPT_COMMIT="028b0d5c1be1b1fe9305e073760ba6ac614ba6d8" +SCRIPT_COMMIT="47574f7ea445c8bb751da0fa716424c9c29a6807" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/25-fribidi.sh b/builder/scripts.d/25-fribidi.sh index 92a1d5d20e..252ac26c11 100755 --- a/builder/scripts.d/25-fribidi.sh +++ b/builder/scripts.d/25-fribidi.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/fribidi/fribidi.git" -SCRIPT_COMMIT="b54871c339dabb7434718da3fed2fa63320997e5" +SCRIPT_COMMIT="5b9a242cbbb0cf27d20da9941667abfc63808c19" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/35-fontconfig.sh b/builder/scripts.d/35-fontconfig.sh index ff207da222..540734e43f 100755 --- a/builder/scripts.d/35-fontconfig.sh +++ b/builder/scripts.d/35-fontconfig.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/fontconfig/fontconfig.git" -SCRIPT_COMMIT="a264a2c0ca0be120c0fd2325f0d67ca4d5e81bd0" +SCRIPT_COMMIT="14d466b30a8ab4a9d789977ed94f2c30e7209267" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/45-harfbuzz.sh b/builder/scripts.d/45-harfbuzz.sh index 55d99b555c..f10f718ecc 100755 --- a/builder/scripts.d/45-harfbuzz.sh +++ b/builder/scripts.d/45-harfbuzz.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/harfbuzz/harfbuzz.git" -SCRIPT_COMMIT="2b5af6f42e6ba4b6eb67f807267fd5821744c519" +SCRIPT_COMMIT="846d5204c07b006adcca2ca0937e5d6935d66334" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/45-x11/30-libxcb.sh b/builder/scripts.d/45-x11/30-libxcb.sh index d1eab1107f..6bc3b272be 100755 --- a/builder/scripts.d/45-x11/30-libxcb.sh +++ b/builder/scripts.d/45-x11/30-libxcb.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/lib/libxcb.git" -SCRIPT_COMMIT="02a7bbed391859c79864b9aacf040d84f103d38a" +SCRIPT_COMMIT="3c946010c8521497b0fba2c8bc9bde184622345a" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/45-x11/40-libx11.sh b/builder/scripts.d/45-x11/40-libx11.sh index f1143a9948..47291fe37a 100755 --- a/builder/scripts.d/45-x11/40-libx11.sh +++ b/builder/scripts.d/45-x11/40-libx11.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/lib/libx11.git" -SCRIPT_COMMIT="c745719e23af44a4b40ab4508447637b35d91a1e" +SCRIPT_COMMIT="ae3eca18cec44a953789c7f77ffab888713ed132" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/45-x11/50-libxxf86vm.sh b/builder/scripts.d/45-x11/50-libxxf86vm.sh index e18c26d324..2e20481153 100755 --- a/builder/scripts.d/45-x11/50-libxxf86vm.sh +++ b/builder/scripts.d/45-x11/50-libxxf86vm.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/lib/libxxf86vm.git" -SCRIPT_COMMIT="cfda59347e3a04415340a99f925a9cd85c0531b2" +SCRIPT_COMMIT="546c0e93adc535dfaaa66277928887c8dc1f8e13" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/45-x11/60-libglvnd.sh b/builder/scripts.d/45-x11/60-libglvnd.sh index 392c24835d..f4a5744ffd 100755 --- a/builder/scripts.d/45-x11/60-libglvnd.sh +++ b/builder/scripts.d/45-x11/60-libglvnd.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/glvnd/libglvnd.git" -SCRIPT_COMMIT="62176c235ceca2c20f30777f5bc4195bd70391c4" +SCRIPT_COMMIT="908086d22dc307d17d0eb35c522c35fd190718cc" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/45-x11/60-libxcursor.sh b/builder/scripts.d/45-x11/60-libxcursor.sh index 65bd54e390..0c67337ad5 100755 --- a/builder/scripts.d/45-x11/60-libxcursor.sh +++ b/builder/scripts.d/45-x11/60-libxcursor.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/lib/libxcursor.git" -SCRIPT_COMMIT="5e0f8347cebef2b3a9f5d75ca254aabaa0bca259" +SCRIPT_COMMIT="3783190da9aaa5ddd23aad6060469fd3f3ebece9" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/50-amf.sh b/builder/scripts.d/50-amf.sh index d9257a435e..9217bf5b2b 100755 --- a/builder/scripts.d/50-amf.sh +++ b/builder/scripts.d/50-amf.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/GPUOpen-LibrariesAndSDKs/AMF.git" -SCRIPT_COMMIT="2f326350e849894a929296854f5290e66197c97c" +SCRIPT_COMMIT="8787d3ef5d938425d094170c1b2fec87762683b6" ffbuild_enabled() { [[ $TARGET == *arm64 ]] && return -1 diff --git a/builder/scripts.d/50-dav1d.sh b/builder/scripts.d/50-dav1d.sh index 4151ee33f6..df7f029cea 100755 --- a/builder/scripts.d/50-dav1d.sh +++ b/builder/scripts.d/50-dav1d.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://code.videolan.org/videolan/dav1d.git" -SCRIPT_COMMIT="2179b30c84571ae5a4ecfe60821b2dd0050f355f" +SCRIPT_COMMIT="16ed8e8b99f2fcfffe016e929d3626e15267ad3e" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-libass.sh b/builder/scripts.d/50-libass.sh index 3c3334fc01..b263164ff9 100755 --- a/builder/scripts.d/50-libass.sh +++ b/builder/scripts.d/50-libass.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/libass/libass.git" -SCRIPT_COMMIT="9f4e6afeec54c0d240a9b8377f27e6c5b7c2bde0" +SCRIPT_COMMIT="58a8f09cccb4829239855791a305d1336d8ee773" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-libmp3lame.sh b/builder/scripts.d/50-libmp3lame.sh index e5418f2874..36d1b5abd1 100755 --- a/builder/scripts.d/50-libmp3lame.sh +++ b/builder/scripts.d/50-libmp3lame.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://svn.code.sf.net/p/lame/svn/trunk/lame" -SCRIPT_REV="6507" +SCRIPT_REV="6531" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-onevpl.sh b/builder/scripts.d/50-libvpl.sh similarity index 82% rename from builder/scripts.d/50-onevpl.sh rename to builder/scripts.d/50-libvpl.sh index b68d90477c..090ec3717e 100755 --- a/builder/scripts.d/50-onevpl.sh +++ b/builder/scripts.d/50-libvpl.sh @@ -1,7 +1,7 @@ #!/bin/bash -SCRIPT_REPO="https://github.com/oneapi-src/oneVPL.git" -SCRIPT_COMMIT="ca5bbbb057a6e84b103aca807612afb693ad046c" +SCRIPT_REPO="https://github.com/intel/libvpl.git" +SCRIPT_COMMIT="18e890e7a5d6355306b8f1046b46c378ced453ff" ffbuild_enabled() { [[ $TARGET == *arm64 ]] && return -1 @@ -9,8 +9,8 @@ ffbuild_enabled() { } ffbuild_dockerbuild() { - git-mini-clone "$SCRIPT_REPO" "$SCRIPT_COMMIT" onevpl - cd onevpl + git-mini-clone "$SCRIPT_REPO" "$SCRIPT_COMMIT" libvpl + cd libvpl mkdir build && cd build diff --git a/builder/scripts.d/50-libvpx.sh b/builder/scripts.d/50-libvpx.sh index c9dde354f4..044cdf7b55 100755 --- a/builder/scripts.d/50-libvpx.sh +++ b/builder/scripts.d/50-libvpx.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://chromium.googlesource.com/webm/libvpx" -SCRIPT_COMMIT="b7d847d0e7c754ca90c15eaca08515b959ff0359" +SCRIPT_COMMIT="433577ae317ac3c9f9f6efe0e22de8e2fa7b9e58" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-libwebp.sh b/builder/scripts.d/50-libwebp.sh index 9a786d08e2..efc98f344c 100755 --- a/builder/scripts.d/50-libwebp.sh +++ b/builder/scripts.d/50-libwebp.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://chromium.googlesource.com/webm/libwebp" -SCRIPT_COMMIT="24d7f9cb6ef1ef90a04d7b6c15d3477813f75ee0" +SCRIPT_COMMIT="5efd6300dc3c4891a8ac928151c3484f9bc7d8fc" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-openmpt.sh b/builder/scripts.d/50-openmpt.sh index b6c2402e99..9f307367db 100755 --- a/builder/scripts.d/50-openmpt.sh +++ b/builder/scripts.d/50-openmpt.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://source.openmpt.org/svn/openmpt/trunk/OpenMPT" -SCRIPT_REV="19911" +SCRIPT_REV="20108" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-srt.sh b/builder/scripts.d/50-srt.sh index ae222b663a..dab39180c3 100755 --- a/builder/scripts.d/50-srt.sh +++ b/builder/scripts.d/50-srt.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/Haivision/srt.git" -SCRIPT_COMMIT="4a8067cf38656926494ae6b150cf7d148213c024" +SCRIPT_COMMIT="3dba3f441890b430380fba25848e8ff4a4db4bf3" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-svtav1.sh b/builder/scripts.d/50-svtav1.sh index a96acbb261..ecbff4d335 100755 --- a/builder/scripts.d/50-svtav1.sh +++ b/builder/scripts.d/50-svtav1.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.com/AOMediaCodec/SVT-AV1.git" -SCRIPT_COMMIT="ba13fac241f1b54954935f2cb200efc07f3de13a" +SCRIPT_COMMIT="fd71fc49c2737ab390fa9d09b49b3a1c92f629e1" ffbuild_enabled() { [[ $TARGET == win32 ]] && return -1 diff --git a/builder/scripts.d/50-vaapi/30-libpciaccess.sh b/builder/scripts.d/50-vaapi/30-libpciaccess.sh index 69aa19f534..00496a8faf 100755 --- a/builder/scripts.d/50-vaapi/30-libpciaccess.sh +++ b/builder/scripts.d/50-vaapi/30-libpciaccess.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://gitlab.freedesktop.org/xorg/lib/libpciaccess.git" -SCRIPT_COMMIT="6cd5a4afbb70868c7746de8d50dea59e02e9acf2" +SCRIPT_COMMIT="c74d0a4b630f115e797cbb159ac13e0dc78f31f5" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 @@ -12,19 +12,18 @@ ffbuild_dockerbuild() { git-mini-clone "$SCRIPT_REPO" "$SCRIPT_COMMIT" libpciaccess cd libpciaccess - autoreconf -fi + mkdir build && cd build local myconf=( --prefix="$FFBUILD_PREFIX" - --enable-shared - --disable-static - --with-pic - --with-zlib + --buildtype=release + --default-library=shared + -Dzlib=enabled ) if [[ $TARGET == linux* ]]; then myconf+=( - --host="$FFBUILD_TOOLCHAIN" + --cross-file=/cross.meson ) else echo "Unknown target" @@ -34,12 +33,12 @@ ffbuild_dockerbuild() { export CFLAGS="$RAW_CFLAGS" export LDFLAFS="$RAW_LDFLAGS" - ./configure "${myconf[@]}" - make -j$(nproc) - make install + meson setup "${myconf[@]}" .. + ninja -j$(nproc) + ninja install gen-implib "$FFBUILD_PREFIX"/lib/{libpciaccess.so.0,libpciaccess.a} - rm "$FFBUILD_PREFIX"/lib/libpciaccess{.so*,.la} + rm "$FFBUILD_PREFIX"/lib/libpciaccess.so* echo "Libs: -ldl" >> "$FFBUILD_PREFIX"/lib/pkgconfig/pciaccess.pc } diff --git a/builder/scripts.d/50-vaapi/50-libva.sh b/builder/scripts.d/50-vaapi/50-libva.sh index 7d363746d4..265356787c 100755 --- a/builder/scripts.d/50-vaapi/50-libva.sh +++ b/builder/scripts.d/50-vaapi/50-libva.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/intel/libva.git" -SCRIPT_COMMIT="633746e717ef8daf82856b9909a4ffd0163df4b4" +SCRIPT_COMMIT="b8c7eed68846a8b9cdf1040be414880a4e0924a8" ffbuild_enabled() { [[ $TARGET != linux* ]] && return -1 diff --git a/builder/scripts.d/50-vulkan/45-vulkan.sh b/builder/scripts.d/50-vulkan/45-vulkan.sh index 445d26ff19..5828b7eebe 100755 --- a/builder/scripts.d/50-vulkan/45-vulkan.sh +++ b/builder/scripts.d/50-vulkan/45-vulkan.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/KhronosGroup/Vulkan-Headers.git" -SCRIPT_COMMIT="v1.3.270" +SCRIPT_COMMIT="v1.3.276" SCRIPT_TAGFILTER="v?.*.*" ffbuild_enabled() { diff --git a/builder/scripts.d/50-vulkan/55-spirv-cross.sh b/builder/scripts.d/50-vulkan/55-spirv-cross.sh index c8481062df..bfcec0c6ba 100755 --- a/builder/scripts.d/50-vulkan/55-spirv-cross.sh +++ b/builder/scripts.d/50-vulkan/55-spirv-cross.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://github.com/KhronosGroup/SPIRV-Cross.git" -SCRIPT_COMMIT="4818f7e7ef7b7078a3a7a5a52c4a338e0dda22f4" +SCRIPT_COMMIT="03b485dc47c6e84a15936601e8b121d84d8ddadf" ffbuild_enabled() { return 0 diff --git a/builder/scripts.d/50-x264.sh b/builder/scripts.d/50-x264.sh index 38997cb434..e0e2f4f1ec 100755 --- a/builder/scripts.d/50-x264.sh +++ b/builder/scripts.d/50-x264.sh @@ -1,7 +1,7 @@ #!/bin/bash -SCRIPT_REPO="https://github.com/mirror/x264.git" -SCRIPT_COMMIT="eaa68fad9e5d201d42fde51665f2d137ae96baf0" +SCRIPT_REPO="https://code.videolan.org/videolan/x264.git" +SCRIPT_COMMIT="4815ccadb1890572f2bf8b9d9553d56f6c9122ad" ffbuild_enabled() { [[ $VARIANT == lgpl* ]] && return -1 diff --git a/builder/scripts.d/50-x265.sh b/builder/scripts.d/50-x265.sh index 68936e12bf..d61b4f81f7 100755 --- a/builder/scripts.d/50-x265.sh +++ b/builder/scripts.d/50-x265.sh @@ -1,7 +1,7 @@ #!/bin/bash SCRIPT_REPO="https://bitbucket.org/multicoreware/x265_git.git" -SCRIPT_COMMIT="8ee01d45b05cdbc9da89b884815257807a514bc8" +SCRIPT_COMMIT="74abf80c70a3969fca2e112691cecfb50c0c2259" ffbuild_enabled() { [[ $VARIANT == lgpl* ]] && return -1 diff --git a/debian/changelog b/debian/changelog index df1188484e..31855894db 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,11 @@ +jellyfin-ffmpeg (6.0.1-2) unstable; urgency=medium + + * Add full HWA pipeline for Rockchip RK3588 platform + * Fix libx265 encoded fMP4 HLS playback on Safari + * Update build scripts and dependencies + + -- nyanmisaka Sun, 17 Dec 2023 22:41:39 +0800 + jellyfin-ffmpeg (6.0.1-1) unstable; urgency=medium * New upstream version 6.0.1 diff --git a/debian/control b/debian/control index 26a3e5cbfa..2f5b784752 100644 --- a/debian/control +++ b/debian/control @@ -50,7 +50,7 @@ Build-Depends: # --enable-cuda-llvm clang [!armhf !arm64], # --enable-opencl - ocl-icd-opencl-dev [!armhf !arm64], + ocl-icd-opencl-dev, # --enable-omx # libomxil-bellagio-dev # omx headers are fully included in raspberrypi/firmware. diff --git a/debian/patches/0010-add-d3d11-opencl-interop-for-amd.patch b/debian/patches/0010-add-d3d11-opencl-interop-for-amd.patch index e2aabe622b..dea0732f79 100644 --- a/debian/patches/0010-add-d3d11-opencl-interop-for-amd.patch +++ b/debian/patches/0010-add-d3d11-opencl-interop-for-amd.patch @@ -203,7 +203,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c nb_planes = 2; if (src_fc->initial_pool_size == 0) { -@@ -2511,7 +2557,7 @@ static void opencl_unmap_from_d3d11(AVHW +@@ -2511,15 +2557,25 @@ static void opencl_unmap_from_d3d11(AVHW { AVOpenCLFrameDescriptor *desc = hwmap->priv; OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv; @@ -211,9 +211,20 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + OpenCLFramesContext *frames_priv = dst_fc->internal->priv; cl_event event; cl_int cle; ++ const cl_mem *mem_objs; ++ cl_uint num_objs; ++ ++ if (!(device_priv->d3d11_map_amd || ++ device_priv->d3d11_map_intel)) ++ return; ++ ++ num_objs = device_priv->d3d11_map_amd ? 1 : desc->nb_planes; ++ mem_objs = device_priv->d3d11_map_amd ? &desc->planes[desc->nb_planes - 1] ++ : desc->planes; -@@ -2519,7 +2565,7 @@ static void opencl_unmap_from_d3d11(AVHW - frames_priv->command_queue, desc->nb_planes, desc->planes, + cle = device_priv->clEnqueueReleaseD3D11ObjectsKHR( +- frames_priv->command_queue, desc->nb_planes, desc->planes, ++ frames_priv->command_queue, num_objs, mem_objs, 0, NULL, &event); if (cle != CL_SUCCESS) { - av_log(dst_fc, AV_LOG_ERROR, "Failed to release surface " @@ -221,52 +232,46 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c "handle: %d.\n", cle); } -@@ -2534,7 +2580,7 @@ static int opencl_map_from_d3d11(AVHWFra +@@ -2534,7 +2590,9 @@ static int opencl_map_from_d3d11(AVHWFra AVOpenCLFrameDescriptor *desc; cl_event event; cl_int cle; - int err, index, i; ++ const cl_mem *mem_objs; ++ cl_uint num_objs; + int err, index, i, nb_planes; index = (intptr_t)src->data[1]; if (index >= frames_priv->nb_mapped_frames) { -@@ -2548,20 +2594,36 @@ static int opencl_map_from_d3d11(AVHWFra +@@ -2543,16 +2601,25 @@ static int opencl_map_from_d3d11(AVHWFra + return AVERROR(EINVAL); + } + ++ if (!(device_priv->d3d11_map_amd || ++ device_priv->d3d11_map_intel)) ++ return AVERROR(ENOSYS); ++ + av_log(dst_fc, AV_LOG_DEBUG, "Map D3D11 texture %d to OpenCL.\n", + index); desc = &frames_priv->mapped_frames[index]; ++ nb_planes = device_priv->d3d11_map_amd ? (desc->nb_planes - 1) ++ : desc->nb_planes; ++ num_objs = device_priv->d3d11_map_amd ? 1 : desc->nb_planes; ++ mem_objs = device_priv->d3d11_map_amd ? &desc->planes[nb_planes] ++ : desc->planes; -- cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( + cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( - frames_priv->command_queue, desc->nb_planes, desc->planes, -- 0, NULL, &event); -- if (cle != CL_SUCCESS) { ++ frames_priv->command_queue, num_objs, mem_objs, + 0, NULL, &event); + if (cle != CL_SUCCESS) { - av_log(dst_fc, AV_LOG_ERROR, "Failed to acquire surface " -- "handle: %d.\n", cle); -- return AVERROR(EIO); -+ nb_planes = device_priv->d3d11_map_amd ? (desc->nb_planes - 1) -+ : desc->nb_planes; -+ -+ if (device_priv->d3d11_map_amd) { -+ cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( -+ frames_priv->command_queue, 1, &desc->planes[nb_planes], -+ 0, NULL, &event); -+ if (cle != CL_SUCCESS) { -+ av_log(dst_fc, AV_LOG_ERROR, "Failed to acquire texture " -+ "handle: %d.\n", cle); -+ return AVERROR(EIO); -+ } -+ } else if (device_priv->d3d11_map_intel) { -+ cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( -+ frames_priv->command_queue, nb_planes, desc->planes, -+ 0, NULL, &event); -+ if (cle != CL_SUCCESS) { -+ av_log(dst_fc, AV_LOG_ERROR, "Failed to acquire texture " -+ "handle: %d.\n", cle); -+ return AVERROR(EIO); -+ } -+ } else { -+ return AVERROR(ENOSYS); ++ av_log(dst_fc, AV_LOG_ERROR, "Failed to acquire texture " + "handle: %d.\n", cle); + return AVERROR(EIO); } - - err = opencl_wait_events(dst_fc, &event, 1); +@@ -2561,7 +2628,7 @@ static int opencl_map_from_d3d11(AVHWFra if (err < 0) goto fail; @@ -275,11 +280,20 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c dst->data[i] = (uint8_t*)desc->planes[i]; err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src, -@@ -2591,16 +2653,26 @@ static int opencl_frames_derive_from_d3d +@@ -2576,7 +2643,7 @@ static int opencl_map_from_d3d11(AVHWFra + + fail: + cle = device_priv->clEnqueueReleaseD3D11ObjectsKHR( +- frames_priv->command_queue, desc->nb_planes, desc->planes, ++ frames_priv->command_queue, num_objs, mem_objs, + 0, NULL, &event); + if (cle == CL_SUCCESS) + opencl_wait_events(dst_fc, &event, 1); +@@ -2591,16 +2658,26 @@ static int opencl_frames_derive_from_d3d AVD3D11VAFramesContext *src_hwctx = src_fc->hwctx; OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv; OpenCLFramesContext *frames_priv = dst_fc->internal->priv; -+ cl_mem planeUI; ++ cl_mem plane_uint; cl_mem_flags cl_flags; cl_int cle; int err, i, p, nb_planes; @@ -307,7 +321,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c if (src_fc->initial_pool_size == 0) { av_log(dst_fc, AV_LOG_ERROR, "Only fixed-size pools are supported " -@@ -2623,27 +2695,94 @@ static int opencl_frames_derive_from_d3d +@@ -2623,27 +2700,94 @@ static int opencl_frames_derive_from_d3d for (i = 0; i < frames_priv->nb_mapped_frames; i++) { AVOpenCLFrameDescriptor *desc = &frames_priv->mapped_frames[i]; desc->nb_planes = nb_planes; @@ -338,9 +352,9 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + cl_image_format image_fmt; + + // get plane from AMD in CL_UNSIGNED_INT8|16 type. -+ planeUI = device_priv->clGetPlaneFromImageAMD( ++ plane_uint = device_priv->clGetPlaneFromImageAMD( + dst_dev->context, desc->planes[nb_planes - 1], p, &cle); -+ if (!planeUI) { ++ if (!plane_uint) { + av_log(dst_fc, AV_LOG_ERROR, "Failed to create CL image " + "from plane %d of image created from D3D11 " + "texture index %d: %d.\n", p, i, cle); @@ -349,7 +363,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + } + + cle = clGetImageInfo( -+ planeUI, CL_IMAGE_FORMAT, sizeof(cl_image_format), &image_fmt, NULL); ++ plane_uint, CL_IMAGE_FORMAT, sizeof(cl_image_format), &image_fmt, NULL); + if (cle != CL_SUCCESS) { + av_log(dst_fc, AV_LOG_ERROR, "Failed to query image format of CL image " + "from plane %d of image created from D3D11 " @@ -373,7 +387,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + + // convert plane from CL_UNSIGNED_INT8|16 to CL_UNORM_INT8|16. + desc->planes[p] = device_priv->clConvertImageAMD( -+ dst_dev->context, planeUI, &image_fmt, &cle); ++ dst_dev->context, plane_uint, &image_fmt, &cle); + if (!desc->planes[p]) { + av_log(dst_fc, AV_LOG_ERROR, "Failed to convert data type of CL image " + "from plane %d of image created from D3D11 texture index %d " @@ -382,7 +396,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + goto fail; + } + -+ clReleaseMemObject(planeUI); ++ clReleaseMemObject(plane_uint); + } + } else if (device_priv->d3d11_map_intel) { + for (p = 0; p < nb_planes; p++) { @@ -409,8 +423,8 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c return 0; fail: -+ if (planeUI) -+ clReleaseMemObject(planeUI); ++ if (plane_uint) ++ clReleaseMemObject(plane_uint); for (i = 0; i < frames_priv->nb_mapped_frames; i++) { AVOpenCLFrameDescriptor *desc = &frames_priv->mapped_frames[i]; for (p = 0; p < desc->nb_planes; p++) { diff --git a/debian/patches/0056-sync-intel-d3d11va-textures-before-mapping-to-opencl.patch b/debian/patches/0056-sync-intel-d3d11va-textures-before-mapping-to-opencl.patch index f543219726..e0e0554c8d 100644 --- a/debian/patches/0056-sync-intel-d3d11va-textures-before-mapping-to-opencl.patch +++ b/debian/patches/0056-sync-intel-d3d11va-textures-before-mapping-to-opencl.patch @@ -230,7 +230,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c for (p = 0; p < nb_planes; p++) { UINT subresource = 2 * i + p; -@@ -2806,6 +2920,10 @@ static void opencl_unmap_from_d3d11(AVHW +@@ -2816,6 +2930,10 @@ static void opencl_unmap_from_d3d11(AVHW static int opencl_map_from_d3d11(AVHWFramesContext *dst_fc, AVFrame *dst, const AVFrame *src, int flags) { @@ -241,9 +241,9 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv; OpenCLFramesContext *frames_priv = dst_fc->internal->priv; AVOpenCLFrameDescriptor *desc; -@@ -2828,6 +2946,14 @@ static int opencl_map_from_d3d11(AVHWFra - nb_planes = device_priv->d3d11_map_amd ? (desc->nb_planes - 1) - : desc->nb_planes; +@@ -2846,6 +2964,14 @@ static int opencl_map_from_d3d11(AVHWFra + mem_objs = device_priv->d3d11_map_amd ? &desc->planes[nb_planes] + : desc->planes; + if (src_hwctx->require_sync && + frames_priv->sync_point && frames_priv->sync_tex_2x2) { @@ -253,18 +253,18 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c + dst_fc); + } + - if (device_priv->d3d11_map_amd) { - cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( - frames_priv->command_queue, 1, &desc->planes[nb_planes], -@@ -2881,6 +3007,7 @@ static int opencl_frames_derive_from_d3d + cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR( + frames_priv->command_queue, num_objs, mem_objs, + 0, NULL, &event); +@@ -2885,6 +3011,7 @@ fail: + static int opencl_frames_derive_from_d3d11(AVHWFramesContext *dst_fc, AVHWFramesContext *src_fc, int flags) { - AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx; + AVD3D11VADeviceContext *device_hwctx = src_fc->device_ctx->hwctx; + AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx; AVD3D11VAFramesContext *src_hwctx = src_fc->hwctx; OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv; - OpenCLFramesContext *frames_priv = dst_fc->internal->priv; -@@ -2923,6 +3050,14 @@ static int opencl_frames_derive_from_d3d +@@ -2928,6 +3055,14 @@ static int opencl_frames_derive_from_d3d if (!frames_priv->mapped_frames) return AVERROR(ENOMEM); diff --git a/debian/patches/0058-add-full-hwa-pipeline-for-rockchip-rk3588-platform.patch b/debian/patches/0058-add-full-hwa-pipeline-for-rockchip-rk3588-platform.patch new file mode 100644 index 0000000000..418fe01c6f --- /dev/null +++ b/debian/patches/0058-add-full-hwa-pipeline-for-rockchip-rk3588-platform.patch @@ -0,0 +1,6484 @@ +Index: jellyfin-ffmpeg/configure +=================================================================== +--- jellyfin-ffmpeg.orig/configure ++++ jellyfin-ffmpeg/configure +@@ -349,6 +349,7 @@ External library support: + --enable-omx enable OpenMAX IL code [no] + --enable-omx-rpi enable OpenMAX IL code for Raspberry Pi [no] + --enable-rkmpp enable Rockchip Media Process Platform code [no] ++ --enable-rkrga enable Rockchip 2D Raster Graphic Acceleration code [no] + --disable-v4l2-m2m disable V4L2 mem2mem code [autodetect] + --disable-vaapi disable Video Acceleration API (mainly Unix/Intel) code [autodetect] + --disable-vdpau disable Nvidia Video Decode and Presentation API for Unix code [autodetect] +@@ -1786,6 +1787,7 @@ EXTERNAL_LIBRARY_VERSION3_LIST=" + libvo_amrwbenc + mbedtls + rkmpp ++ rkrga + " + + EXTERNAL_LIBRARY_GPLV3_LIST=" +@@ -1913,6 +1915,7 @@ HWACCEL_LIBRARY_LIST=" + mmal + omx + opencl ++ rkmpp + " + + DOCUMENT_LIST=" +@@ -3163,8 +3166,10 @@ av1_mediacodec_decoder_deps="mediacodec" + av1_mediacodec_decoder_extralibs="-landroid" + av1_nvenc_encoder_deps="nvenc NV_ENC_PIC_PARAMS_AV1" + av1_nvenc_encoder_select="atsc_a53" ++av1_rkmpp_decoder_deps="rkmpp" + h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m" + h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m" ++h263_rkmpp_decoder_deps="rkmpp" + h264_amf_encoder_deps="amf" + h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser" + h264_cuvid_decoder_deps="cuvid" +@@ -3184,6 +3189,7 @@ h264_qsv_decoder_select="h264_mp4toannex + h264_qsv_encoder_select="atsc_a53 qsvenc" + h264_rkmpp_decoder_deps="rkmpp" + h264_rkmpp_decoder_select="h264_mp4toannexb_bsf" ++h264_rkmpp_encoder_deps="rkmpp" + h264_vaapi_encoder_select="atsc_a53 cbs_h264 vaapi_encode" + h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m" + h264_v4l2m2m_decoder_select="h264_mp4toannexb_bsf" +@@ -3204,6 +3210,7 @@ hevc_qsv_decoder_select="hevc_mp4toannex + hevc_qsv_encoder_select="hevcparse qsvenc" + hevc_rkmpp_decoder_deps="rkmpp" + hevc_rkmpp_decoder_select="hevc_mp4toannexb_bsf" ++hevc_rkmpp_encoder_deps="rkmpp" + hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC" + hevc_vaapi_encoder_select="atsc_a53 cbs_h265 vaapi_encode" + hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m" +@@ -3218,6 +3225,7 @@ mjpeg_vaapi_encoder_select="cbs_jpeg jpe + mp3_mf_encoder_deps="mediafoundation" + mpeg1_cuvid_decoder_deps="cuvid" + mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m" ++mpeg1_rkmpp_decoder_deps="rkmpp" + mpeg2_crystalhd_decoder_select="crystalhd" + mpeg2_cuvid_decoder_deps="cuvid" + mpeg2_mmal_decoder_deps="mmal" +@@ -3226,6 +3234,7 @@ mpeg2_qsv_decoder_select="qsvdec" + mpeg2_qsv_encoder_select="qsvenc" + mpeg2_vaapi_encoder_select="cbs_mpeg2 vaapi_encode" + mpeg2_v4l2m2m_decoder_deps="v4l2_m2m mpeg2_v4l2_m2m" ++mpeg2_rkmpp_decoder_deps="rkmpp" + mpeg4_crystalhd_decoder_select="crystalhd" + mpeg4_cuvid_decoder_deps="cuvid" + mpeg4_mediacodec_decoder_deps="mediacodec" +@@ -3233,6 +3242,8 @@ mpeg4_mmal_decoder_deps="mmal" + mpeg4_omx_encoder_deps="omx" + mpeg4_v4l2m2m_decoder_deps="v4l2_m2m mpeg4_v4l2_m2m" + mpeg4_v4l2m2m_encoder_deps="v4l2_m2m mpeg4_v4l2_m2m" ++mpeg4_rkmpp_decoder_deps="rkmpp" ++mpeg4_rkmpp_decoder_select="mpeg4_unpack_bframes_bsf" + msmpeg4_crystalhd_decoder_select="crystalhd" + vc1_crystalhd_decoder_select="crystalhd" + vc1_cuvid_decoder_deps="cuvid" +@@ -3709,6 +3720,7 @@ overlay_qsv_filter_deps="libmfx" + overlay_qsv_filter_select="qsvvpp" + overlay_vaapi_filter_deps="vaapi VAProcPipelineCaps_blend_flags" + overlay_vulkan_filter_deps="vulkan spirv_compiler" ++overlay_rkrga_filter_deps="rkrga" + owdenoise_filter_deps="gpl" + pad_opencl_filter_deps="opencl" + pan_filter_deps="swresample" +@@ -3731,6 +3743,7 @@ scale_filter_deps="swscale" + scale_opencl_filter_deps="opencl" + scale_qsv_filter_deps="libmfx" + scale_qsv_filter_select="qsvvpp" ++scale_rkrga_filter_deps="rkrga" + scdet_filter_select="scene_sad" + select_filter_select="scene_sad" + sharpness_vaapi_filter_deps="vaapi" +@@ -3771,6 +3784,7 @@ scale_vaapi_filter_deps="vaapi" + scale_vulkan_filter_deps="vulkan spirv_compiler" + vpp_qsv_filter_deps="libmfx" + vpp_qsv_filter_select="qsvvpp" ++vpp_rkrga_filter_deps="rkrga" + xfade_opencl_filter_deps="opencl" + yadif_cuda_filter_deps="ffnvcodec" + yadif_cuda_filter_deps_any="cuda_nvcc cuda_llvm" +@@ -3817,14 +3831,14 @@ cws2fws_extralibs="zlib_extralibs" + + # libraries, in any order + avcodec_deps="avutil" +-avcodec_suggest="libm stdatomic" ++avcodec_suggest="libm stdatomic rkrga" + avdevice_deps="avformat avcodec avutil" + avdevice_suggest="libm stdatomic" + avfilter_deps="avutil" + avfilter_suggest="libm stdatomic" + avformat_deps="avcodec avutil" + avformat_suggest="libm network zlib stdatomic" +-avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl user32 vaapi vulkan videotoolbox corefoundation corevideo coremedia bcrypt stdatomic" ++avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl rkmpp user32 vaapi vulkan videotoolbox corefoundation corevideo coremedia bcrypt stdatomic" + postproc_deps="avutil gpl" + postproc_suggest="libm stdatomic" + swresample_deps="avutil" +@@ -6795,11 +6809,16 @@ enabled openssl && { { check_p + check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 || + die "ERROR: openssl not found"; } + enabled pocketsphinx && require_pkg_config pocketsphinx pocketsphinx pocketsphinx/pocketsphinx.h ps_init +-enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/rk_mpi.h mpp_create && +- require_pkg_config rockchip_mpp "rockchip_mpp >= 1.3.7" rockchip/rk_mpi.h mpp_create && ++enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/rk_mpi.h mpp_create && ++ require_pkg_config rockchip_mpp "rockchip_mpp >= 1.3.8" rockchip/rk_mpi.h mpp_create && + { enabled libdrm || + die "ERROR: rkmpp requires --enable-libdrm"; } + } ++enabled rkrga && require rkrga rga/RgaApi.h c_RkRgaBlit -lrga && ++ { require rkrga rga/im2d.h querystring -lrga && ++ { enabled rkmpp || ++ die "ERROR: rkrga requires --enable-rkmpp"; } ++ } + enabled vapoursynth && require_pkg_config vapoursynth "vapoursynth-script >= 42" VSScript.h vsscript_init + + +@@ -6985,7 +7004,7 @@ fi + if enabled_all opencl libdrm ; then + check_type "CL/cl_intel.h" "clCreateImageFromFdINTEL_fn" && + enable opencl_drm_beignet +- check_func_headers "CL/cl_ext.h" clImportMemoryARM && ++ enabled_any arm aarch64 && + enable opencl_drm_arm + fi + +Index: jellyfin-ffmpeg/libavcodec/Makefile +=================================================================== +--- jellyfin-ffmpeg.orig/libavcodec/Makefile ++++ jellyfin-ffmpeg/libavcodec/Makefile +@@ -256,6 +256,7 @@ OBJS-$(CONFIG_AV1_MEDIACODEC_DECODER) + + OBJS-$(CONFIG_AV1_NVENC_ENCODER) += nvenc_av1.o nvenc.o + OBJS-$(CONFIG_AV1_QSV_ENCODER) += qsvenc_av1.o + OBJS-$(CONFIG_AV1_VAAPI_ENCODER) += vaapi_encode_av1.o av1_levels.o ++OBJS-$(CONFIG_AV1_RKMPP_DECODER) += rkmppdec.o + OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o + OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o + OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o +@@ -398,6 +399,7 @@ OBJS-$(CONFIG_H263_ENCODER) + + h263.o ituh263enc.o h263data.o + OBJS-$(CONFIG_H263_V4L2M2M_DECODER) += v4l2_m2m_dec.o + OBJS-$(CONFIG_H263_V4L2M2M_ENCODER) += v4l2_m2m_enc.o ++OBJS-$(CONFIG_H263_RKMPP_DECODER) += rkmppdec.o + OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \ + h264_direct.o h264_loopfilter.o \ + h264_mb.o h264_picture.o \ +@@ -414,6 +416,7 @@ OBJS-$(CONFIG_H264_OMX_ENCODER) + + OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec.o + OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o + OBJS-$(CONFIG_H264_RKMPP_DECODER) += rkmppdec.o ++OBJS-$(CONFIG_H264_RKMPP_ENCODER) += rkmppenc.o + OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_encode_h264.o h264_levels.o \ + h2645data.o + OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o +@@ -439,6 +442,7 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER) + + OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \ + hevc_data.o + OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o ++OBJS-$(CONFIG_HEVC_RKMPP_ENCODER) += rkmppenc.o + OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o \ + h2645data.o + OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o +@@ -530,6 +534,7 @@ OBJS-$(CONFIG_MPEG1VIDEO_DECODER) + + OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o + OBJS-$(CONFIG_MPEG1_CUVID_DECODER) += cuviddec.o + OBJS-$(CONFIG_MPEG1_V4L2M2M_DECODER) += v4l2_m2m_dec.o ++OBJS-$(CONFIG_MPEG1_RKMPP_DECODER) += rkmppdec.o + OBJS-$(CONFIG_MPEG2_MMAL_DECODER) += mmaldec.o + OBJS-$(CONFIG_MPEG2_QSV_DECODER) += qsvdec.o + OBJS-$(CONFIG_MPEG2_QSV_ENCODER) += qsvenc_mpeg2.o +@@ -539,6 +544,7 @@ OBJS-$(CONFIG_MPEG2_CUVID_DECODER) + + OBJS-$(CONFIG_MPEG2_MEDIACODEC_DECODER) += mediacodecdec.o + OBJS-$(CONFIG_MPEG2_VAAPI_ENCODER) += vaapi_encode_mpeg2.o + OBJS-$(CONFIG_MPEG2_V4L2M2M_DECODER) += v4l2_m2m_dec.o ++OBJS-$(CONFIG_MPEG2_RKMPP_DECODER) += rkmppdec.o + OBJS-$(CONFIG_MPEG4_DECODER) += mpeg4videodsp.o xvididct.o + OBJS-$(CONFIG_MPEG4_ENCODER) += mpeg4videoenc.o + OBJS-$(CONFIG_MPEG4_CUVID_DECODER) += cuviddec.o +@@ -546,6 +552,7 @@ OBJS-$(CONFIG_MPEG4_MEDIACODEC_DECODER) + OBJS-$(CONFIG_MPEG4_OMX_ENCODER) += omx.o + OBJS-$(CONFIG_MPEG4_V4L2M2M_DECODER) += v4l2_m2m_dec.o + OBJS-$(CONFIG_MPEG4_V4L2M2M_ENCODER) += v4l2_m2m_enc.o ++OBJS-$(CONFIG_MPEG4_RKMPP_DECODER) += rkmppdec.o + OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o + OBJS-$(CONFIG_MSA1_DECODER) += mss3.o + OBJS-$(CONFIG_MSCC_DECODER) += mscc.o +Index: jellyfin-ffmpeg/libavcodec/allcodecs.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavcodec/allcodecs.c ++++ jellyfin-ffmpeg/libavcodec/allcodecs.c +@@ -150,6 +150,7 @@ extern const FFCodec ff_h263i_decoder; + extern const FFCodec ff_h263p_encoder; + extern const FFCodec ff_h263p_decoder; + extern const FFCodec ff_h263_v4l2m2m_decoder; ++extern const FFCodec ff_h263_rkmpp_decoder; + extern const FFCodec ff_h264_decoder; + extern const FFCodec ff_h264_crystalhd_decoder; + extern const FFCodec ff_h264_v4l2m2m_decoder; +@@ -212,13 +213,16 @@ extern const FFCodec ff_mpeg4_decoder; + extern const FFCodec ff_mpeg4_crystalhd_decoder; + extern const FFCodec ff_mpeg4_v4l2m2m_decoder; + extern const FFCodec ff_mpeg4_mmal_decoder; ++extern const FFCodec ff_mpeg4_rkmpp_decoder; + extern const FFCodec ff_mpegvideo_decoder; + extern const FFCodec ff_mpeg1_v4l2m2m_decoder; ++extern const FFCodec ff_mpeg1_rkmpp_decoder; + extern const FFCodec ff_mpeg2_mmal_decoder; + extern const FFCodec ff_mpeg2_crystalhd_decoder; + extern const FFCodec ff_mpeg2_v4l2m2m_decoder; + extern const FFCodec ff_mpeg2_qsv_decoder; + extern const FFCodec ff_mpeg2_mediacodec_decoder; ++extern const FFCodec ff_mpeg2_rkmpp_decoder; + extern const FFCodec ff_msa1_decoder; + extern const FFCodec ff_mscc_decoder; + extern const FFCodec ff_msmpeg4v1_decoder; +@@ -839,6 +843,7 @@ extern const FFCodec ff_av1_qsv_decoder; + extern const FFCodec ff_av1_qsv_encoder; + extern const FFCodec ff_av1_amf_encoder; + extern const FFCodec ff_av1_vaapi_encoder; ++extern const FFCodec ff_av1_rkmpp_decoder; + extern const FFCodec ff_libopenh264_encoder; + extern const FFCodec ff_libopenh264_decoder; + extern const FFCodec ff_h264_amf_encoder; +@@ -850,6 +855,7 @@ extern const FFCodec ff_h264_qsv_encoder + extern const FFCodec ff_h264_v4l2m2m_encoder; + extern const FFCodec ff_h264_vaapi_encoder; + extern const FFCodec ff_h264_videotoolbox_encoder; ++extern const FFCodec ff_h264_rkmpp_encoder; + extern const FFCodec ff_hevc_amf_encoder; + extern const FFCodec ff_hevc_cuvid_decoder; + extern const FFCodec ff_hevc_mediacodec_decoder; +@@ -860,6 +866,7 @@ extern const FFCodec ff_hevc_qsv_encoder + extern const FFCodec ff_hevc_v4l2m2m_encoder; + extern const FFCodec ff_hevc_vaapi_encoder; + extern const FFCodec ff_hevc_videotoolbox_encoder; ++extern const FFCodec ff_hevc_rkmpp_encoder; + extern const FFCodec ff_libkvazaar_encoder; + extern const FFCodec ff_mjpeg_cuvid_decoder; + extern const FFCodec ff_mjpeg_qsv_encoder; +Index: jellyfin-ffmpeg/libavcodec/rkmppdec.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavcodec/rkmppdec.c ++++ jellyfin-ffmpeg/libavcodec/rkmppdec.c +@@ -1,6 +1,7 @@ + /* +- * RockChip MPP Video Decoder + * Copyright (c) 2017 Lionel CHAZALLON ++ * Copyright (c) 2023 Huseyin BIYIK ++ * Copyright (c) 2023 NyanMisaka + * + * This file is part of FFmpeg. + * +@@ -19,569 +20,931 @@ + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "avcodec.h" +-#include "codec_internal.h" +-#include "decode.h" +-#include "hwconfig.h" +-#include "libavutil/buffer.h" +-#include "libavutil/common.h" +-#include "libavutil/frame.h" +-#include "libavutil/hwcontext.h" +-#include "libavutil/hwcontext_drm.h" +-#include "libavutil/imgutils.h" +-#include "libavutil/log.h" +- +-#define RECEIVE_FRAME_TIMEOUT 100 +-#define FRAMEGROUP_MAX_FRAMES 16 +-#define INPUT_MAX_PACKETS 4 +- +-typedef struct { +- MppCtx ctx; +- MppApi *mpi; +- MppBufferGroup frame_group; +- +- char first_packet; +- char eos_reached; +- +- AVBufferRef *frames_ref; +- AVBufferRef *device_ref; +-} RKMPPDecoder; +- +-typedef struct { +- AVClass *av_class; +- AVBufferRef *decoder_ref; +-} RKMPPDecodeContext; +- +-typedef struct { +- MppFrame frame; +- AVBufferRef *decoder_ref; +-} RKMPPFrameContext; ++/** ++ * @file ++ * Rockchip MPP (Media Process Platform) video decoder ++ */ ++ ++#include "config.h" ++#include "config_components.h" ++ ++#include "rkmppdec.h" + +-static MppCodingType rkmpp_get_codingtype(AVCodecContext *avctx) ++#if CONFIG_RKRGA ++#include ++#endif ++ ++static MppCodingType rkmpp_get_coding_type(AVCodecContext *avctx) + { + switch (avctx->codec_id) { ++ case AV_CODEC_ID_H263: return MPP_VIDEO_CodingH263; + case AV_CODEC_ID_H264: return MPP_VIDEO_CodingAVC; + case AV_CODEC_ID_HEVC: return MPP_VIDEO_CodingHEVC; ++ case AV_CODEC_ID_AV1: return MPP_VIDEO_CodingAV1; + case AV_CODEC_ID_VP8: return MPP_VIDEO_CodingVP8; + case AV_CODEC_ID_VP9: return MPP_VIDEO_CodingVP9; ++ case AV_CODEC_ID_MPEG1VIDEO: /* fallthrough */ ++ case AV_CODEC_ID_MPEG2VIDEO: return MPP_VIDEO_CodingMPEG2; ++ case AV_CODEC_ID_MPEG4: return MPP_VIDEO_CodingMPEG4; + default: return MPP_VIDEO_CodingUnused; + } + } + +-static uint32_t rkmpp_get_frameformat(MppFrameFormat mppformat) ++static uint32_t rkmpp_get_drm_format(MppFrameFormat mpp_fmt) + { +- switch (mppformat) { ++ switch (mpp_fmt & MPP_FRAME_FMT_MASK) { + case MPP_FMT_YUV420SP: return DRM_FORMAT_NV12; +-#ifdef DRM_FORMAT_NV12_10 +- case MPP_FMT_YUV420SP_10BIT: return DRM_FORMAT_NV12_10; +-#endif +- default: return 0; ++ case MPP_FMT_YUV420SP_10BIT: return DRM_FORMAT_NV15; ++ case MPP_FMT_YUV422SP: return DRM_FORMAT_NV16; ++ case MPP_FMT_YUV422SP_10BIT: return DRM_FORMAT_NV20; ++ default: return DRM_FORMAT_INVALID; + } + } + +-static int rkmpp_write_data(AVCodecContext *avctx, uint8_t *buffer, int size, int64_t pts) ++static uint32_t rkmpp_get_drm_afbc_format(MppFrameFormat mpp_fmt) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; +- int ret; +- MppPacket packet; ++ switch (mpp_fmt & MPP_FRAME_FMT_MASK) { ++ case MPP_FMT_YUV420SP: return DRM_FORMAT_YUV420_8BIT; ++ case MPP_FMT_YUV420SP_10BIT: return DRM_FORMAT_YUV420_10BIT; ++ case MPP_FMT_YUV422SP: return DRM_FORMAT_YUYV; ++ case MPP_FMT_YUV422SP_10BIT: return DRM_FORMAT_Y210; ++ default: return DRM_FORMAT_INVALID; ++ } ++} + +- // create the MPP packet +- ret = mpp_packet_init(&packet, buffer, size); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to init MPP packet (code = %d)\n", ret); +- return AVERROR_UNKNOWN; ++static uint32_t rkmpp_get_av_format(MppFrameFormat mpp_fmt) ++{ ++ switch (mpp_fmt & MPP_FRAME_FMT_MASK) { ++ case MPP_FMT_YUV420SP: return AV_PIX_FMT_NV12; ++ case MPP_FMT_YUV420SP_10BIT: return AV_PIX_FMT_NV15; ++ case MPP_FMT_YUV422SP: return AV_PIX_FMT_NV16; ++ case MPP_FMT_YUV422SP_10BIT: return AV_PIX_FMT_NV20; ++ default: return AV_PIX_FMT_NONE; + } ++} + +- mpp_packet_set_pts(packet, pts); ++static int get_afbc_byte_stride(const AVPixFmtDescriptor *desc, ++ int *stride, int reverse) ++{ ++ if (!desc || !stride || *stride <= 0) ++ return AVERROR(EINVAL); + +- if (!buffer) +- mpp_packet_set_eos(packet); ++ if (desc->nb_components == 1 || ++ (desc->flags & AV_PIX_FMT_FLAG_RGB) || ++ (!(desc->flags & AV_PIX_FMT_FLAG_RGB) && ++ !(desc->flags & AV_PIX_FMT_FLAG_PLANAR))) ++ return 0; + +- ret = decoder->mpi->decode_put_packet(decoder->ctx, packet); +- if (ret != MPP_OK) { +- if (ret == MPP_ERR_BUFFER_FULL) { +- av_log(avctx, AV_LOG_DEBUG, "Buffer full writing %d bytes to decoder\n", size); +- ret = AVERROR(EAGAIN); +- } else +- ret = AVERROR_UNKNOWN; +- } ++ if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 1) ++ *stride = reverse ? (*stride * 2 / 3) : (*stride * 3 / 2); ++ else if (desc->log2_chroma_w == 1 && !desc->log2_chroma_h) ++ *stride = reverse ? (*stride / 2) : (*stride * 2); ++ else if (!desc->log2_chroma_w && !desc->log2_chroma_h) ++ *stride = reverse ? (*stride / 3) : (*stride * 3); + else +- av_log(avctx, AV_LOG_DEBUG, "Wrote %d bytes to decoder\n", size); +- +- mpp_packet_deinit(&packet); ++ return AVERROR(EINVAL); + +- return ret; ++ return (*stride > 0) ? 0 : AVERROR(EINVAL); + } + +-static int rkmpp_close_decoder(AVCodecContext *avctx) ++static av_cold int rkmpp_decode_close(AVCodecContext *avctx) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- av_buffer_unref(&rk_context->decoder_ref); +- return 0; +-} ++ RKMPPDecContext *r = avctx->priv_data; + +-static void rkmpp_release_decoder(void *opaque, uint8_t *data) +-{ +- RKMPPDecoder *decoder = (RKMPPDecoder *)data; ++ r->eof = 0; ++ r->info_change = 0; ++ r->errinfo_cnt = 0; ++ r->queue_cnt = 0; ++ r->queue_size = 0; + +- if (decoder->mpi) { +- decoder->mpi->reset(decoder->ctx); +- mpp_destroy(decoder->ctx); +- decoder->ctx = NULL; ++ if (r->mapi) { ++ r->mapi->reset(r->mctx); ++ mpp_destroy(r->mctx); ++ r->mctx = NULL; + } +- +- if (decoder->frame_group) { +- mpp_buffer_group_put(decoder->frame_group); +- decoder->frame_group = NULL; ++ if (r->buf_group && ++ r->buf_mode == RKMPP_DEC_PURE_EXTERNAL) { ++ mpp_buffer_group_put(r->buf_group); ++ r->buf_group = NULL; + } + +- av_buffer_unref(&decoder->frames_ref); +- av_buffer_unref(&decoder->device_ref); ++ if (r->hwframe) ++ av_buffer_unref(&r->hwframe); ++ if (r->hwdevice) ++ av_buffer_unref(&r->hwdevice); + +- av_free(decoder); ++ return 0; + } + +-static int rkmpp_init_decoder(AVCodecContext *avctx) ++static av_cold int rkmpp_decode_init(AVCodecContext *avctx) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = NULL; +- MppCodingType codectype = MPP_VIDEO_CodingUnused; +- int ret; +- RK_S64 paramS64; +- RK_S32 paramS32; +- +- avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; ++ RKMPPDecContext *r = avctx->priv_data; ++ MppCodingType coding_type = MPP_VIDEO_CodingUnused; ++ int ret, is_fmt_supported = 0; ++ enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_DRM_PRIME, ++ AV_PIX_FMT_NV12, ++ AV_PIX_FMT_NONE }; ++ ++ switch (avctx->pix_fmt) { ++ case AV_PIX_FMT_YUV420P: ++ case AV_PIX_FMT_YUVJ420P: ++ is_fmt_supported = 1; ++ break; ++ case AV_PIX_FMT_YUV420P10: ++ is_fmt_supported = ++ avctx->codec_id == AV_CODEC_ID_H264 || ++ avctx->codec_id == AV_CODEC_ID_HEVC || ++ avctx->codec_id == AV_CODEC_ID_VP9 || ++ avctx->codec_id == AV_CODEC_ID_AV1; ++ break; ++ case AV_PIX_FMT_YUV422P: ++ case AV_PIX_FMT_YUV422P10: ++ is_fmt_supported = ++ avctx->codec_id == AV_CODEC_ID_H264; ++ break; ++ case AV_PIX_FMT_NONE: /* fallback to drm_prime */ ++ is_fmt_supported = 1; ++ avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; ++ break; ++ default: ++ is_fmt_supported = 0; ++ break; ++ } ++ ++ if (avctx->pix_fmt != AV_PIX_FMT_DRM_PRIME) { ++ if (!is_fmt_supported) { ++ av_log(avctx, AV_LOG_ERROR, "MPP doesn't support codec '%s' with pix_fmt '%s'\n", ++ avcodec_get_name(avctx->codec_id), av_get_pix_fmt_name(avctx->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } + +- // create a decoder and a ref to it +- decoder = av_mallocz(sizeof(RKMPPDecoder)); +- if (!decoder) { +- ret = AVERROR(ENOMEM); +- goto fail; ++ if ((ret = ff_get_format(avctx, pix_fmts)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", ret); ++ return ret; ++ } ++ avctx->pix_fmt = ret; + } + +- rk_context->decoder_ref = av_buffer_create((uint8_t *)decoder, sizeof(*decoder), rkmpp_release_decoder, +- NULL, AV_BUFFER_FLAG_READONLY); +- if (!rk_context->decoder_ref) { +- av_free(decoder); +- ret = AVERROR(ENOMEM); +- goto fail; ++ if ((coding_type = rkmpp_get_coding_type(avctx)) == MPP_VIDEO_CodingUnused) { ++ av_log(avctx, AV_LOG_ERROR, "Unknown codec id: %d\n", avctx->codec_id); ++ return AVERROR(ENOSYS); + } + +- av_log(avctx, AV_LOG_DEBUG, "Initializing RKMPP decoder.\n"); +- +- codectype = rkmpp_get_codingtype(avctx); +- if (codectype == MPP_VIDEO_CodingUnused) { +- av_log(avctx, AV_LOG_ERROR, "Unknown codec type (%d).\n", avctx->codec_id); +- ret = AVERROR_UNKNOWN; +- goto fail; ++ if ((ret = mpp_check_support_format(MPP_CTX_DEC, coding_type)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "MPP doesn't support codec '%s' (%d)\n", ++ avcodec_get_name(avctx->codec_id), avctx->codec_id); ++ return AVERROR(ENOSYS); + } + +- ret = mpp_check_support_format(MPP_CTX_DEC, codectype); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Codec type (%d) unsupported by MPP\n", avctx->codec_id); +- ret = AVERROR_UNKNOWN; ++ if ((ret = mpp_create(&r->mctx, &r->mapi)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create MPP context and api: %d\n", ret); ++ ret = AVERROR_EXTERNAL; + goto fail; + } + +- // Create the MPP context +- ret = mpp_create(&decoder->ctx, &decoder->mpi); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to create MPP context (code = %d).\n", ret); +- ret = AVERROR_UNKNOWN; ++ if ((ret = mpp_init(r->mctx, MPP_CTX_DEC, coding_type)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init MPP context: %d\n", ret); ++ ret = AVERROR_EXTERNAL; + goto fail; + } + +- // initialize mpp +- ret = mpp_init(decoder->ctx, MPP_CTX_DEC, codectype); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to initialize MPP context (code = %d).\n", ret); +- ret = AVERROR_UNKNOWN; ++ if ((ret = r->mapi->control(r->mctx, MPP_DEC_SET_ENABLE_DEINTERLACE, &r->deint)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set enable deinterlace: %d\n", ret); ++ ret = AVERROR_EXTERNAL; + goto fail; + } + +- // make decode calls blocking with a timeout +- paramS32 = MPP_POLL_BLOCK; +- ret = decoder->mpi->control(decoder->ctx, MPP_SET_OUTPUT_BLOCK, ¶mS32); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to set blocking mode on MPI (code = %d).\n", ret); +- ret = AVERROR_UNKNOWN; +- goto fail; ++ if (avctx->pix_fmt != AV_PIX_FMT_DRM_PRIME) ++ r->afbc = 0; ++ ++ if (r->afbc == RKMPP_DEC_AFBC_ON_RGA) { ++#if CONFIG_RKRGA ++ const char *rga_ver = querystring(RGA_VERSION); ++ int has_rga3 = !!strstr(rga_ver, "RGA_3"); ++ int is_rga3_compat = avctx->width >= 68 && ++ avctx->width <= 8176 && ++ avctx->height >= 2 && ++ avctx->height <= 8176; ++ ++ if (!has_rga3 || !is_rga3_compat) { ++#endif ++ av_log(avctx, AV_LOG_VERBOSE, "AFBC is requested without capable RGA, ignoring\n"); ++ r->afbc = RKMPP_DEC_AFBC_OFF; ++#if CONFIG_RKRGA ++ } ++#endif + } + +- paramS64 = RECEIVE_FRAME_TIMEOUT; +- ret = decoder->mpi->control(decoder->ctx, MPP_SET_OUTPUT_BLOCK_TIMEOUT, ¶mS64); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to set block timeout on MPI (code = %d).\n", ret); +- ret = AVERROR_UNKNOWN; +- goto fail; ++ if (r->afbc) { ++ MppFrameFormat afbc_fmt = MPP_FRAME_FBC_AFBC_V2; ++ ++ if (avctx->codec_id == AV_CODEC_ID_H264 || ++ avctx->codec_id == AV_CODEC_ID_HEVC || ++ avctx->codec_id == AV_CODEC_ID_VP9 || ++ avctx->codec_id == AV_CODEC_ID_AV1) { ++ if ((ret = r->mapi->control(r->mctx, MPP_DEC_SET_OUTPUT_FORMAT, &afbc_fmt)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set AFBC mode: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ } else { ++ av_log(avctx, AV_LOG_VERBOSE, "AFBC is not supported in codec '%s', ignoring\n", ++ avcodec_get_name(avctx->codec_id)); ++ r->afbc = 0; ++ } + } + +- ret = mpp_buffer_group_get_internal(&decoder->frame_group, MPP_BUFFER_TYPE_ION); +- if (ret) { +- av_log(avctx, AV_LOG_ERROR, "Failed to retrieve buffer group (code = %d)\n", ret); +- ret = AVERROR_UNKNOWN; +- goto fail; ++ if (avctx->hw_device_ctx) { ++ r->hwdevice = av_buffer_ref(avctx->hw_device_ctx); ++ if (!r->hwdevice) { ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ av_log(avctx, AV_LOG_VERBOSE, "Picked up an existing RKMPP hardware device\n"); ++ } else { ++ if ((ret = av_hwdevice_ctx_create(&r->hwdevice, AV_HWDEVICE_TYPE_RKMPP, NULL, NULL, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create a RKMPP hardware device: %d\n", ret); ++ goto fail; ++ } ++ av_log(avctx, AV_LOG_VERBOSE, "Created a RKMPP hardware device\n"); + } + +- ret = decoder->mpi->control(decoder->ctx, MPP_DEC_SET_EXT_BUF_GROUP, decoder->frame_group); +- if (ret) { +- av_log(avctx, AV_LOG_ERROR, "Failed to assign buffer group (code = %d)\n", ret); +- ret = AVERROR_UNKNOWN; ++ return 0; ++ ++fail: ++ rkmpp_decode_close(avctx); ++ return ret; ++} ++ ++static int rkmpp_set_buffer_group(AVCodecContext *avctx, ++ enum AVPixelFormat pix_fmt, ++ int width, int height) ++{ ++ RKMPPDecContext *r = avctx->priv_data; ++ AVHWFramesContext *hwfc = NULL; ++ int i, ret, decoder_pool_size; ++ ++ if (!r->hwdevice) ++ return AVERROR(ENOMEM); ++ ++ av_buffer_unref(&r->hwframe); ++ ++ r->hwframe = av_hwframe_ctx_alloc(r->hwdevice); ++ if (!r->hwframe) ++ return AVERROR(ENOMEM); ++ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_H264: ++ case AV_CODEC_ID_HEVC: ++ decoder_pool_size = 20; ++ break; ++ default: ++ decoder_pool_size = 10; ++ break; ++ } ++ ++ hwfc = (AVHWFramesContext *)r->hwframe->data; ++ hwfc->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc->sw_format = pix_fmt; ++ hwfc->width = FFALIGN(width, 16); ++ hwfc->height = FFALIGN(height, 16); ++ ++ if (r->buf_mode == RKMPP_DEC_HALF_INTERNAL) { ++ AVRKMPPFramesContext *rkmpp_fc = NULL; ++ ++ if ((ret = av_hwframe_ctx_init(r->hwframe)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init RKMPP frame pool\n"); ++ goto fail; ++ } ++ ++ rkmpp_fc = hwfc->hwctx; ++ r->buf_group = rkmpp_fc->buf_group; ++ goto attach; ++ } else if (r->buf_mode != RKMPP_DEC_PURE_EXTERNAL) { ++ ret = AVERROR(EINVAL); + goto fail; + } + +- ret = mpp_buffer_group_limit_config(decoder->frame_group, 0, FRAMEGROUP_MAX_FRAMES); +- if (ret) { +- av_log(avctx, AV_LOG_ERROR, "Failed to set buffer group limit (code = %d)\n", ret); +- ret = AVERROR_UNKNOWN; ++ hwfc->initial_pool_size = decoder_pool_size + 10; ++ if (avctx->extra_hw_frames > 0) ++ hwfc->initial_pool_size += avctx->extra_hw_frames; ++ ++ if ((ret = av_hwframe_ctx_init(r->hwframe)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init RKMPP frame pool\n"); + goto fail; + } + +- decoder->first_packet = 1; ++ if (r->buf_group) { ++ if ((ret = mpp_buffer_group_clear(r->buf_group)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to clear external buffer group: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ } else { ++ if ((ret = mpp_buffer_group_get_external(&r->buf_group, MPP_BUFFER_TYPE_DRM)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get external buffer group: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ } + +- av_log(avctx, AV_LOG_DEBUG, "RKMPP decoder initialized successfully.\n"); ++ for (i = 0; i < hwfc->initial_pool_size; i++) { ++ AVRKMPPFramesContext *rkmpp_fc = hwfc->hwctx; ++ MppBufferInfo buf_info = { ++ .index = i, ++ .type = MPP_BUFFER_TYPE_DRM, ++ .ptr = mpp_buffer_get_ptr(rkmpp_fc->frames[i].buffers[0]), ++ .fd = rkmpp_fc->frames[i].drm_desc.objects[0].fd, ++ .size = rkmpp_fc->frames[i].drm_desc.objects[0].size, ++ }; ++ ++ if ((ret = mpp_buffer_commit(r->buf_group, &buf_info)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to commit external buffer group: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ } + +- decoder->device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); +- if (!decoder->device_ref) { +- ret = AVERROR(ENOMEM); ++attach: ++ if ((ret = r->mapi->control(r->mctx, MPP_DEC_SET_EXT_BUF_GROUP, r->buf_group)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to attach external buffer group: %d\n", ret); ++ ret = AVERROR_EXTERNAL; + goto fail; + } +- ret = av_hwdevice_ctx_init(decoder->device_ref); +- if (ret < 0) +- goto fail; ++ ++ if (r->buf_mode == RKMPP_DEC_HALF_INTERNAL) { ++ int group_limit = decoder_pool_size + ((width * height > (3840 * 2160 * 3)) ? 2 : 10); ++ if (avctx->extra_hw_frames > 0) ++ group_limit += avctx->extra_hw_frames; ++ if ((ret = mpp_buffer_group_limit_config(r->buf_group, 0, group_limit)) != MPP_OK) ++ av_log(avctx, AV_LOG_WARNING, "Failed to set buffer group limit: %d\n", ret); ++ } + + return 0; + + fail: +- av_log(avctx, AV_LOG_ERROR, "Failed to initialize RKMPP decoder.\n"); +- rkmpp_close_decoder(avctx); ++ if (r->buf_group && ++ r->buf_mode == RKMPP_DEC_HALF_INTERNAL) { ++ mpp_buffer_group_put(r->buf_group); ++ r->buf_group = NULL; ++ } ++ av_buffer_unref(&r->hwframe); + return ret; + } + +-static int rkmpp_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) ++static int rkmpp_export_mastering_display(AVCodecContext *avctx, AVFrame *frame, ++ MppFrameMasteringDisplayMetadata mpp_mastering) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; +- int ret; ++ AVMasteringDisplayMetadata *mastering = NULL; ++ AVFrameSideData *sd = NULL; ++ int mapping[3] = { 0, 1, 2 }; ++ int chroma_den = 0; ++ int max_luma_den = 0; ++ int min_luma_den = 0; ++ int i; + +- // handle EOF +- if (!avpkt->size) { +- av_log(avctx, AV_LOG_DEBUG, "End of stream.\n"); +- decoder->eos_reached = 1; +- ret = rkmpp_write_data(avctx, NULL, 0, 0); +- if (ret) +- av_log(avctx, AV_LOG_ERROR, "Failed to send EOS to decoder (code = %d)\n", ret); +- return ret; ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_HEVC: ++ // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b ++ mapping[0] = 2; ++ mapping[1] = 0; ++ mapping[2] = 1; ++ chroma_den = 50000; ++ max_luma_den = 10000; ++ min_luma_den = 10000; ++ break; ++ case AV_CODEC_ID_AV1: ++ chroma_den = 1 << 16; ++ max_luma_den = 1 << 8; ++ min_luma_den = 1 << 14; ++ break; ++ default: ++ return 0; + } + +- // on first packet, send extradata +- if (decoder->first_packet) { +- if (avctx->extradata_size) { +- ret = rkmpp_write_data(avctx, avctx->extradata, +- avctx->extradata_size, +- avpkt->pts); +- if (ret) { +- av_log(avctx, AV_LOG_ERROR, "Failed to write extradata to decoder (code = %d)\n", ret); +- return ret; +- } +- } +- decoder->first_packet = 0; ++ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); ++ if (sd) ++ mastering = (AVMasteringDisplayMetadata *)sd->data; ++ else ++ mastering = av_mastering_display_metadata_create_side_data(frame); ++ if (!mastering) ++ return AVERROR(ENOMEM); ++ ++ for (i = 0; i < 3; i++) { ++ const int j = mapping[i]; ++ mastering->display_primaries[i][0] = av_make_q(mpp_mastering.display_primaries[j][0], chroma_den); ++ mastering->display_primaries[i][1] = av_make_q(mpp_mastering.display_primaries[j][1], chroma_den); + } ++ mastering->white_point[0] = av_make_q(mpp_mastering.white_point[0], chroma_den); ++ mastering->white_point[1] = av_make_q(mpp_mastering.white_point[1], chroma_den); + +- // now send packet +- ret = rkmpp_write_data(avctx, avpkt->data, avpkt->size, avpkt->pts); +- if (ret && ret!=AVERROR(EAGAIN)) +- av_log(avctx, AV_LOG_ERROR, "Failed to write data to decoder (code = %d)\n", ret); ++ mastering->max_luminance = av_make_q(mpp_mastering.max_luminance, max_luma_den); ++ mastering->min_luminance = av_make_q(mpp_mastering.min_luminance, min_luma_den); + +- return ret; ++ mastering->has_luminance = 1; ++ mastering->has_primaries = 1; ++ ++ return 0; + } + +-static void rkmpp_release_frame(void *opaque, uint8_t *data) ++static int rkmpp_export_content_light(AVFrame *frame, ++ MppFrameContentLightMetadata mpp_light) + { +- AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)data; +- AVBufferRef *framecontextref = (AVBufferRef *)opaque; +- RKMPPFrameContext *framecontext = (RKMPPFrameContext *)framecontextref->data; ++ AVContentLightMetadata *light = NULL; + +- mpp_frame_deinit(&framecontext->frame); +- av_buffer_unref(&framecontext->decoder_ref); +- av_buffer_unref(&framecontextref); ++ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); ++ if (sd) ++ light = (AVContentLightMetadata *)sd->data; ++ else ++ light = av_content_light_metadata_create_side_data(frame); ++ if (!light) ++ return AVERROR(ENOMEM); ++ ++ light->MaxCLL = mpp_light.MaxCLL; ++ light->MaxFALL = mpp_light.MaxFALL; + +- av_free(desc); ++ return 0; + } + +-static int rkmpp_retrieve_frame(AVCodecContext *avctx, AVFrame *frame) ++static void rkmpp_free_mpp_frame(void *opaque, uint8_t *data) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; +- RKMPPFrameContext *framecontext = NULL; +- AVBufferRef *framecontextref = NULL; +- int ret; +- MppFrame mppframe = NULL; +- MppBuffer buffer = NULL; +- AVDRMFrameDescriptor *desc = NULL; ++ MppFrame mpp_frame = (MppFrame)opaque; ++ mpp_frame_deinit(&mpp_frame); ++} ++ ++static void rkmpp_free_drm_desc(void *opaque, uint8_t *data) ++{ ++ AVRKMPPDRMFrameDescriptor *drm_desc = (AVRKMPPDRMFrameDescriptor *)opaque; ++ av_free(drm_desc); ++} ++ ++static int frame_create_buf(AVFrame *frame, ++ uint8_t* data, int size, ++ void (*free)(void *opaque, uint8_t *data), ++ void *opaque, int flags) ++{ ++ int i; ++ ++ for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { ++ if (!frame->buf[i]) { ++ frame->buf[i] = av_buffer_create(data, size, free, opaque, flags); ++ return frame->buf[i] ? 0 : AVERROR(ENOMEM); ++ } ++ } ++ return AVERROR(EINVAL); ++} ++ ++static int rkmpp_export_frame(AVCodecContext *avctx, AVFrame *frame, MppFrame mpp_frame) ++{ ++ RKMPPDecContext *r = avctx->priv_data; ++ AVRKMPPDRMFrameDescriptor *desc = NULL; + AVDRMLayerDescriptor *layer = NULL; +- int mode; +- MppFrameFormat mppformat; +- uint32_t drmformat; ++ const AVPixFmtDescriptor *pix_desc; ++ MppBuffer mpp_buf = NULL; ++ MppFrameFormat mpp_fmt = MPP_FMT_BUTT; ++ int mpp_frame_mode = 0; ++ int ret, is_afbc = 0; ++ ++ if (!frame || !mpp_frame) ++ return AVERROR(ENOMEM); ++ ++ mpp_buf = mpp_frame_get_buffer(mpp_frame); ++ if (!mpp_buf) ++ return AVERROR(EAGAIN); ++ ++ desc = av_mallocz(sizeof(*desc)); ++ if (!desc) ++ return AVERROR(ENOMEM); ++ ++ desc->drm_desc.nb_objects = 1; ++ desc->buffers[0] = mpp_buf; ++ ++ desc->drm_desc.objects[0].fd = mpp_buffer_get_fd(mpp_buf); ++ desc->drm_desc.objects[0].size = mpp_buffer_get_size(mpp_buf); ++ ++ mpp_fmt = mpp_frame_get_fmt(mpp_frame); ++ is_afbc = mpp_fmt & MPP_FRAME_FBC_MASK; ++ ++ desc->drm_desc.nb_layers = 1; ++ layer = &desc->drm_desc.layers[0]; ++ layer->planes[0].object_index = 0; ++ ++ if (is_afbc) { ++ desc->drm_desc.objects[0].format_modifier = ++ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_BLOCK_SIZE_16x16); ++ ++ layer->format = rkmpp_get_drm_afbc_format(mpp_fmt); ++ layer->nb_planes = 1; ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = mpp_frame_get_hor_stride(mpp_frame); + +- ret = decoder->mpi->decode_get_frame(decoder->ctx, &mppframe); +- if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT) { +- av_log(avctx, AV_LOG_ERROR, "Failed to get a frame from MPP (code = %d)\n", ret); +- goto fail; ++ pix_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); ++ if ((ret = get_afbc_byte_stride(pix_desc, (int *)&layer->planes[0].pitch, 0)) < 0) ++ return ret; ++ ++ /* MPP specific AFBC src_y offset, not memory address offset */ ++ frame->crop_top = mpp_frame_get_offset_y(mpp_frame); ++ } else { ++ layer->format = rkmpp_get_drm_format(mpp_fmt); ++ layer->nb_planes = 2; ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = mpp_frame_get_hor_stride(mpp_frame); ++ ++ layer->planes[1].object_index = 0; ++ layer->planes[1].offset = layer->planes[0].pitch * mpp_frame_get_ver_stride(mpp_frame); ++ layer->planes[1].pitch = layer->planes[0].pitch; + } + +- if (mppframe) { +- // Check whether we have a special frame or not +- if (mpp_frame_get_info_change(mppframe)) { +- AVHWFramesContext *hwframes; ++ if ((ret = frame_create_buf(frame, mpp_frame, mpp_frame_get_buf_size(mpp_frame), ++ rkmpp_free_mpp_frame, mpp_frame, AV_BUFFER_FLAG_READONLY)) < 0) ++ return ret; + +- av_log(avctx, AV_LOG_INFO, "Decoder noticed an info change (%dx%d), format=%d\n", +- (int)mpp_frame_get_width(mppframe), (int)mpp_frame_get_height(mppframe), +- (int)mpp_frame_get_fmt(mppframe)); ++ if ((ret = frame_create_buf(frame, (uint8_t *)desc, sizeof(*desc), ++ rkmpp_free_drm_desc, desc, AV_BUFFER_FLAG_READONLY)) < 0) ++ return ret; + +- avctx->width = mpp_frame_get_width(mppframe); +- avctx->height = mpp_frame_get_height(mppframe); ++ frame->data[0] = (uint8_t *)desc; + +- decoder->mpi->control(decoder->ctx, MPP_DEC_SET_INFO_CHANGE_READY, NULL); ++ frame->hw_frames_ctx = av_buffer_ref(r->hwframe); ++ if (!frame->hw_frames_ctx) ++ return AVERROR(ENOMEM); + +- av_buffer_unref(&decoder->frames_ref); ++ if ((ret = ff_decode_frame_props(avctx, frame)) < 0) ++ return ret; + +- decoder->frames_ref = av_hwframe_ctx_alloc(decoder->device_ref); +- if (!decoder->frames_ref) { +- ret = AVERROR(ENOMEM); +- goto fail; +- } ++ frame->width = avctx->width; ++ frame->height = avctx->height; ++ frame->pts = MPP_PTS_TO_PTS(mpp_frame_get_pts(mpp_frame), avctx->pkt_timebase); ++ ++ mpp_frame_mode = mpp_frame_get_mode(mpp_frame); ++ frame->interlaced_frame = (mpp_frame_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED; ++ frame->top_field_first = (mpp_frame_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST; ++ ++ if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO || ++ avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { ++ MppFrameRational sar = mpp_frame_get_sar(mpp_frame); ++ frame->sample_aspect_ratio = av_div_q((AVRational) { sar.num, sar.den }, ++ (AVRational) { frame->width, frame->height }); ++ } ++ ++ if (avctx->codec_id == AV_CODEC_ID_HEVC && ++ (frame->color_trc == AVCOL_TRC_SMPTE2084 || ++ frame->color_trc == AVCOL_TRC_ARIB_STD_B67)) { ++ ret = rkmpp_export_mastering_display(avctx, frame, mpp_frame_get_mastering_display(mpp_frame)); ++ if (ret < 0) ++ return ret; ++ ret = rkmpp_export_content_light(frame, mpp_frame_get_content_light(mpp_frame)); ++ if (ret < 0) ++ return ret; ++ } + +- mppformat = mpp_frame_get_fmt(mppframe); +- drmformat = rkmpp_get_frameformat(mppformat); ++ return 0; ++} + +- hwframes = (AVHWFramesContext*)decoder->frames_ref->data; +- hwframes->format = AV_PIX_FMT_DRM_PRIME; +- hwframes->sw_format = drmformat == DRM_FORMAT_NV12 ? AV_PIX_FMT_NV12 : AV_PIX_FMT_NONE; +- hwframes->width = avctx->width; +- hwframes->height = avctx->height; +- ret = av_hwframe_ctx_init(decoder->frames_ref); +- if (ret < 0) +- goto fail; ++static void rkmpp_export_avctx_color_props(AVCodecContext *avctx, MppFrame mpp_frame) ++{ ++ int val; + +- // here decoder is fully initialized, we need to feed it again with data +- ret = AVERROR(EAGAIN); +- goto fail; +- } else if (mpp_frame_get_eos(mppframe)) { +- av_log(avctx, AV_LOG_DEBUG, "Received a EOS frame.\n"); +- decoder->eos_reached = 1; +- ret = AVERROR_EOF; +- goto fail; +- } else if (mpp_frame_get_discard(mppframe)) { +- av_log(avctx, AV_LOG_DEBUG, "Received a discard frame.\n"); +- ret = AVERROR(EAGAIN); +- goto fail; +- } else if (mpp_frame_get_errinfo(mppframe)) { +- av_log(avctx, AV_LOG_ERROR, "Received a errinfo frame.\n"); +- ret = AVERROR_UNKNOWN; +- goto fail; +- } ++ if (!avctx || !mpp_frame) ++ return; + +- // here we should have a valid frame +- av_log(avctx, AV_LOG_DEBUG, "Received a frame.\n"); ++ if (avctx->color_primaries == AVCOL_PRI_RESERVED0) ++ avctx->color_primaries = AVCOL_PRI_UNSPECIFIED; ++ if ((val = mpp_frame_get_color_primaries(mpp_frame)) && ++ val != MPP_FRAME_PRI_RESERVED0 && ++ val != MPP_FRAME_PRI_UNSPECIFIED) ++ avctx->color_primaries = val; + +- // setup general frame fields +- frame->format = AV_PIX_FMT_DRM_PRIME; +- frame->width = mpp_frame_get_width(mppframe); +- frame->height = mpp_frame_get_height(mppframe); +- frame->pts = mpp_frame_get_pts(mppframe); +- frame->color_range = mpp_frame_get_color_range(mppframe); +- frame->color_primaries = mpp_frame_get_color_primaries(mppframe); +- frame->color_trc = mpp_frame_get_color_trc(mppframe); +- frame->colorspace = mpp_frame_get_colorspace(mppframe); +- +- mode = mpp_frame_get_mode(mppframe); +- frame->interlaced_frame = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED); +- frame->top_field_first = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST); +- +- mppformat = mpp_frame_get_fmt(mppframe); +- drmformat = rkmpp_get_frameformat(mppformat); +- +- // now setup the frame buffer info +- buffer = mpp_frame_get_buffer(mppframe); +- if (buffer) { +- desc = av_mallocz(sizeof(AVDRMFrameDescriptor)); +- if (!desc) { +- ret = AVERROR(ENOMEM); +- goto fail; +- } ++ if (avctx->color_trc == AVCOL_TRC_RESERVED0) ++ avctx->color_trc = AVCOL_TRC_UNSPECIFIED; ++ if ((val = mpp_frame_get_color_trc(mpp_frame)) && ++ val != MPP_FRAME_TRC_RESERVED0 && ++ val != MPP_FRAME_TRC_UNSPECIFIED) ++ avctx->color_trc = val; + +- desc->nb_objects = 1; +- desc->objects[0].fd = mpp_buffer_get_fd(buffer); +- desc->objects[0].size = mpp_buffer_get_size(buffer); +- +- desc->nb_layers = 1; +- layer = &desc->layers[0]; +- layer->format = drmformat; +- layer->nb_planes = 2; +- +- layer->planes[0].object_index = 0; +- layer->planes[0].offset = 0; +- layer->planes[0].pitch = mpp_frame_get_hor_stride(mppframe); +- +- layer->planes[1].object_index = 0; +- layer->planes[1].offset = layer->planes[0].pitch * mpp_frame_get_ver_stride(mppframe); +- layer->planes[1].pitch = layer->planes[0].pitch; +- +- // we also allocate a struct in buf[0] that will allow to hold additionnal information +- // for releasing properly MPP frames and decoder +- framecontextref = av_buffer_allocz(sizeof(*framecontext)); +- if (!framecontextref) { +- ret = AVERROR(ENOMEM); +- goto fail; +- } ++ if (avctx->colorspace == AVCOL_SPC_RESERVED) ++ avctx->colorspace = AVCOL_SPC_UNSPECIFIED; ++ if ((val = mpp_frame_get_colorspace(mpp_frame)) && ++ val != MPP_FRAME_SPC_RESERVED && ++ val != MPP_FRAME_SPC_UNSPECIFIED) ++ avctx->colorspace = val; + +- // MPP decoder needs to be closed only when all frames have been released. +- framecontext = (RKMPPFrameContext *)framecontextref->data; +- framecontext->decoder_ref = av_buffer_ref(rk_context->decoder_ref); +- framecontext->frame = mppframe; +- +- frame->data[0] = (uint8_t *)desc; +- frame->buf[0] = av_buffer_create((uint8_t *)desc, sizeof(*desc), rkmpp_release_frame, +- framecontextref, AV_BUFFER_FLAG_READONLY); ++ if ((val = mpp_frame_get_color_range(mpp_frame)) > MPP_FRAME_RANGE_UNSPECIFIED) ++ avctx->color_range = val; + +- if (!frame->buf[0]) { +- ret = AVERROR(ENOMEM); +- goto fail; +- } ++ if ((val = mpp_frame_get_chroma_location(mpp_frame)) > MPP_CHROMA_LOC_UNSPECIFIED) ++ avctx->chroma_sample_location = val; ++} + +- frame->hw_frames_ctx = av_buffer_ref(decoder->frames_ref); +- if (!frame->hw_frames_ctx) { +- ret = AVERROR(ENOMEM); +- goto fail; +- } ++static int rkmpp_get_frame(AVCodecContext *avctx, AVFrame *frame, int timeout) ++{ ++ RKMPPDecContext *r = avctx->priv_data; ++ MppFrame mpp_frame = NULL; ++ int ret; + +- return 0; +- } else { +- av_log(avctx, AV_LOG_ERROR, "Failed to retrieve the frame buffer, frame is dropped (code = %d)\n", ret); +- mpp_frame_deinit(&mppframe); +- } +- } else if (decoder->eos_reached) { +- return AVERROR_EOF; +- } else if (ret == MPP_ERR_TIMEOUT) { +- av_log(avctx, AV_LOG_DEBUG, "Timeout when trying to get a frame from MPP\n"); ++ if ((ret = r->mapi->control(r->mctx, MPP_SET_OUTPUT_TIMEOUT, (MppParam)&timeout)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set output timeout: %d\n", ret); ++ return AVERROR_EXTERNAL; + } + +- return AVERROR(EAGAIN); ++ ret = r->mapi->decode_get_frame(r->mctx, &mpp_frame); ++ if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get frame: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ if (!mpp_frame) { ++ av_log(avctx, AV_LOG_DEBUG, "Timeout getting decoded frame\n"); ++ return AVERROR(EAGAIN); ++ } ++ if (mpp_frame_get_eos(mpp_frame)) { ++ av_log(avctx, AV_LOG_DEBUG, "Received a 'EOS' frame\n"); ++ r->eof = 1; ++ ret = AVERROR_EOF; ++ goto exit; ++ } ++ if (mpp_frame_get_discard(mpp_frame)) { ++ av_log(avctx, AV_LOG_DEBUG, "Received a 'discard' frame\n"); ++ ret = AVERROR(EAGAIN); ++ goto exit; ++ } ++ if (mpp_frame_get_errinfo(mpp_frame)) { ++ av_log(avctx, AV_LOG_DEBUG, "Received a 'errinfo' frame\n"); ++ ret = (r->errinfo_cnt++ > MAX_ERRINFO_COUNT) ? AVERROR_EXTERNAL : AVERROR(EAGAIN); ++ goto exit; ++ } ++ ++ if (r->info_change = mpp_frame_get_info_change(mpp_frame)) { ++ int fast_parse = r->fast_parse; ++ int mpp_frame_mode = mpp_frame_get_mode(mpp_frame); ++ const MppFrameFormat mpp_fmt = mpp_frame_get_fmt(mpp_frame); ++ enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_DRM_PRIME, ++ AV_PIX_FMT_NONE, ++ AV_PIX_FMT_NONE }; ++ ++ av_log(avctx, AV_LOG_VERBOSE, "Noticed an info change\n"); ++ ++ if (r->afbc && !(mpp_fmt & MPP_FRAME_FBC_MASK)) ++ av_log(avctx, AV_LOG_VERBOSE, "AFBC is requested but not supported\n"); ++ ++ pix_fmts[1] = rkmpp_get_av_format(mpp_fmt & MPP_FRAME_FMT_MASK); ++ ++ if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME) ++ avctx->sw_pix_fmt = pix_fmts[1]; ++ else { ++ if ((ret = ff_get_format(avctx, pix_fmts)) < 0) ++ goto exit; ++ avctx->pix_fmt = ret; ++ } + +-fail: +- if (mppframe) +- mpp_frame_deinit(&mppframe); ++ avctx->width = mpp_frame_get_width(mpp_frame); ++ avctx->height = mpp_frame_get_height(mpp_frame); ++ avctx->coded_width = FFALIGN(avctx->width, 64); ++ avctx->coded_height = FFALIGN(avctx->height, 64); ++ rkmpp_export_avctx_color_props(avctx, mpp_frame); ++ ++ av_log(avctx, AV_LOG_VERBOSE, "Configured with size: %dx%d | pix_fmt: %s | sw_pix_fmt: %s\n", ++ avctx->width, avctx->height, ++ av_get_pix_fmt_name(avctx->pix_fmt), ++ av_get_pix_fmt_name(avctx->sw_pix_fmt)); ++ ++ if ((ret = rkmpp_set_buffer_group(avctx, pix_fmts[1], avctx->width, avctx->height)) < 0) ++ goto exit; ++ ++ /* Disable fast parsing for the interlaced video */ ++ if (((mpp_frame_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED || ++ (mpp_frame_mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST) && fast_parse) { ++ av_log(avctx, AV_LOG_VERBOSE, "Fast parsing is disabled for the interlaced video\n"); ++ fast_parse = 0; ++ } ++ if ((ret = r->mapi->control(r->mctx, MPP_DEC_SET_PARSER_FAST_MODE, &fast_parse)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set parser fast mode: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto exit; ++ } + +- if (framecontext) +- av_buffer_unref(&framecontext->decoder_ref); ++ if ((ret = r->mapi->control(r->mctx, MPP_DEC_SET_INFO_CHANGE_READY, NULL)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set info change ready: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto exit; ++ } ++ goto exit; ++ } else { ++ av_log(avctx, AV_LOG_DEBUG, "Received a frame\n"); ++ r->errinfo_cnt = 0; ++ ++ switch (avctx->pix_fmt) { ++ case AV_PIX_FMT_DRM_PRIME: ++ { ++ if ((ret = rkmpp_export_frame(avctx, frame, mpp_frame)) < 0) ++ goto exit; ++ return 0; ++ } ++ break; ++ case AV_PIX_FMT_NV12: ++ case AV_PIX_FMT_NV16: ++ case AV_PIX_FMT_NV15: ++ case AV_PIX_FMT_NV20: ++ { ++ AVFrame *tmp_frame = av_frame_alloc(); ++ if (!tmp_frame) { ++ ret = AVERROR(ENOMEM); ++ goto exit; ++ } ++ if ((ret = rkmpp_export_frame(avctx, tmp_frame, mpp_frame)) < 0) ++ goto exit; ++ ++ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "ff_get_buffer failed: %d\n", ret); ++ av_frame_free(&tmp_frame); ++ goto exit; ++ } ++ if ((ret = av_hwframe_transfer_data(frame, tmp_frame, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed: %d\n", ret); ++ av_frame_free(&tmp_frame); ++ goto exit; ++ } ++ if ((ret = av_frame_copy_props(frame, tmp_frame)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "av_frame_copy_props failed: %d\n", ret); ++ av_frame_free(&tmp_frame); ++ goto exit; ++ } ++ av_frame_free(&tmp_frame); ++ return 0; ++ } ++ break; ++ default: ++ { ++ ret = AVERROR_BUG; ++ goto exit; ++ } ++ break; ++ } ++ } + +- if (framecontextref) +- av_buffer_unref(&framecontextref); ++exit: ++ if (mpp_frame) ++ mpp_frame_deinit(&mpp_frame); ++ return ret; ++} + +- if (desc) +- av_free(desc); ++static int rkmpp_send_eos(AVCodecContext *avctx) ++{ ++ RKMPPDecContext *r = avctx->priv_data; ++ MppPacket mpp_pkt = NULL; ++ int ret; + +- return ret; ++ if ((ret = mpp_packet_init(&mpp_pkt, NULL, 0)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init 'EOS' packet: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ mpp_packet_set_eos(mpp_pkt); ++ ++ do { ++ ret = r->mapi->decode_put_packet(r->mctx, mpp_pkt); ++ } while (ret != MPP_OK); ++ ++ mpp_packet_deinit(&mpp_pkt); ++ return 0; + } + +-static int rkmpp_receive_frame(AVCodecContext *avctx, AVFrame *frame) ++static int rkmpp_send_packet(AVCodecContext *avctx, AVPacket *pkt) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; +- int ret = MPP_NOK; +- AVPacket pkt = {0}; +- RK_S32 usedslots, freeslots; +- +- if (!decoder->eos_reached) { +- // we get the available slots in decoder +- ret = decoder->mpi->control(decoder->ctx, MPP_DEC_GET_STREAM_COUNT, &usedslots); +- if (ret != MPP_OK) { +- av_log(avctx, AV_LOG_ERROR, "Failed to get decoder used slots (code = %d).\n", ret); +- return ret; +- } ++ RKMPPDecContext *r = avctx->priv_data; ++ MppPacket mpp_pkt = NULL; ++ int64_t pts = PTS_TO_MPP_PTS(pkt->pts, avctx->pkt_timebase); ++ int ret; + +- freeslots = INPUT_MAX_PACKETS - usedslots; +- if (freeslots > 0) { +- ret = ff_decode_get_packet(avctx, &pkt); +- if (ret < 0 && ret != AVERROR_EOF) { +- return ret; +- } ++ if ((ret = mpp_packet_init(&mpp_pkt, pkt->data, pkt->size)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init packet: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ mpp_packet_set_pts(mpp_pkt, pts); ++ ++ if ((ret = r->mapi->decode_put_packet(r->mctx, mpp_pkt)) != MPP_OK) { ++ av_log(avctx, AV_LOG_TRACE, "Decoder buffer is full\n"); ++ mpp_packet_deinit(&mpp_pkt); ++ return AVERROR(EAGAIN); ++ } ++ av_log(avctx, AV_LOG_DEBUG, "Wrote %d bytes to decoder\n", pkt->size); + +- ret = rkmpp_send_packet(avctx, &pkt); +- av_packet_unref(&pkt); ++ mpp_packet_deinit(&mpp_pkt); ++ return 0; ++} ++ ++static int rkmpp_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame) ++{ ++ AVCodecInternal *avci = avctx->internal; ++ RKMPPDecContext *r = avctx->priv_data; ++ AVPacket *pkt = &r->last_pkt; ++ int retry_cnt = 0; ++ int ret_send, ret_get; ++ ++ if (r->info_change && !r->buf_group) ++ return AVERROR_EOF; + +- if (ret < 0) { +- av_log(avctx, AV_LOG_ERROR, "Failed to send packet to decoder (code = %d)\n", ret); +- return ret; ++ if (!avci->draining) { ++ if (!pkt->size) { ++ switch (ff_decode_get_packet(avctx, pkt)) { ++ case AVERROR_EOF: ++ av_log(avctx, AV_LOG_DEBUG, "Decoder draining\n"); ++ ret_send = rkmpp_send_eos(avctx); ++ if (ret_send < 0) ++ return ret_send; ++ goto get_frame; ++ case AVERROR(EAGAIN): ++ av_log(avctx, AV_LOG_TRACE, "Decoder could not get packet, retrying\n"); ++ return AVERROR(EAGAIN); + } + } +- +- // make sure we keep decoder full +- if (freeslots > 1) +- return AVERROR(EAGAIN); ++send_pkt: ++ /* there is definitely a packet to send to decoder */ ++ ret_send = rkmpp_send_packet(avctx, pkt); ++ if (ret_send == 0) { ++ /* send successful, continue until decoder input buffer is full */ ++ av_packet_unref(pkt); ++ r->queue_cnt++; ++ if (r->queue_size <= 0 || ++ r->queue_cnt < r->queue_size) ++ return AVERROR(EAGAIN); ++ } else if (ret_send < 0 && ret_send != AVERROR(EAGAIN)) { ++ /* something went wrong, raise error */ ++ av_log(avctx, AV_LOG_ERROR, "Decoder failed to send data: %d", ret_send); ++ return ret_send; ++ } else ++ /* input buffer is full, estimate queue size */ ++ r->queue_size = FFMAX(r->queue_cnt, r->queue_size); + } + +- return rkmpp_retrieve_frame(avctx, frame); ++ if (r->eof) ++ return AVERROR_EOF; ++ ++get_frame: ++ /* were here only when draining and buffer is full */ ++ ret_get = rkmpp_get_frame(avctx, frame, 100); ++ if (ret_get == AVERROR_EOF) ++ av_log(avctx, AV_LOG_DEBUG, "Decoder is at EOF\n"); ++ /* EAGAIN should never happen during draining */ ++ else if (avci->draining && ret_get == AVERROR(EAGAIN)) { ++ if (retry_cnt++ < MAX_RETRY_COUNT) ++ goto get_frame; ++ else ++ ret_get = AVERROR_BUG; ++ } ++ /* this is not likely but lets handle it in case synchronization issues of MPP */ ++ else if (ret_get == AVERROR(EAGAIN) && ret_send == AVERROR(EAGAIN)) ++ goto send_pkt; ++ else if (ret_get < 0 && ret_get != AVERROR(EAGAIN)) ++ av_log(avctx, AV_LOG_ERROR, "Decoder failed to get frame: %d\n", ret_get); ++ else ++ r->queue_cnt--; ++ ++ return ret_get; + } + +-static void rkmpp_flush(AVCodecContext *avctx) ++static void rkmpp_decode_flush(AVCodecContext *avctx) + { +- RKMPPDecodeContext *rk_context = avctx->priv_data; +- RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; +- int ret = MPP_NOK; ++ RKMPPDecContext *r = avctx->priv_data; ++ int ret; ++ ++ av_log(avctx, AV_LOG_DEBUG, "Decoder flushing\n"); + +- av_log(avctx, AV_LOG_DEBUG, "Flush.\n"); ++ if ((ret = r->mapi->reset(r->mctx)) == MPP_OK) { ++ r->eof = 0; ++ r->info_change = 0; ++ r->errinfo_cnt = 0; ++ r->queue_cnt = 0; ++ r->queue_size = 0; + +- ret = decoder->mpi->reset(decoder->ctx); +- if (ret == MPP_OK) { +- decoder->first_packet = 1; ++ av_packet_unref(&r->last_pkt); ++ av_frame_unref(&r->last_frame); + } else +- av_log(avctx, AV_LOG_ERROR, "Failed to reset MPI (code = %d)\n", ret); ++ av_log(avctx, AV_LOG_ERROR, "Failed to reset MPP context: %d\n", ret); + } + +-static const AVCodecHWConfigInternal *const rkmpp_hw_configs[] = { +- HW_CONFIG_INTERNAL(DRM_PRIME), +- NULL +-}; +- +-#define RKMPP_DEC_CLASS(NAME) \ +- static const AVClass rkmpp_##NAME##_dec_class = { \ +- .class_name = "rkmpp_" #NAME "_dec", \ +- .version = LIBAVUTIL_VERSION_INT, \ +- }; +- +-#define RKMPP_DEC(NAME, ID, BSFS) \ +- RKMPP_DEC_CLASS(NAME) \ +- const FFCodec ff_##NAME##_rkmpp_decoder = { \ +- .p.name = #NAME "_rkmpp", \ +- CODEC_LONG_NAME(#NAME " (rkmpp)"), \ +- .p.type = AVMEDIA_TYPE_VIDEO, \ +- .p.id = ID, \ +- .priv_data_size = sizeof(RKMPPDecodeContext), \ +- .init = rkmpp_init_decoder, \ +- .close = rkmpp_close_decoder, \ +- FF_CODEC_RECEIVE_FRAME_CB(rkmpp_receive_frame), \ +- .flush = rkmpp_flush, \ +- .p.priv_class = &rkmpp_##NAME##_dec_class, \ +- .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \ +- .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \ +- AV_PIX_FMT_NONE}, \ +- .hw_configs = rkmpp_hw_configs, \ +- .bsfs = BSFS, \ +- .p.wrapper_name = "rkmpp", \ +- .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \ +- }; +- +-RKMPP_DEC(h264, AV_CODEC_ID_H264, "h264_mp4toannexb") +-RKMPP_DEC(hevc, AV_CODEC_ID_HEVC, "hevc_mp4toannexb") +-RKMPP_DEC(vp8, AV_CODEC_ID_VP8, NULL) +-RKMPP_DEC(vp9, AV_CODEC_ID_VP9, NULL) ++#if CONFIG_H263_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(h263, H263, NULL) ++#endif ++#if CONFIG_H264_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(h264, H264, "h264_mp4toannexb") ++#endif ++#if CONFIG_HEVC_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(hevc, HEVC, "hevc_mp4toannexb") ++#endif ++#if CONFIG_VP8_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(vp8, VP8, NULL) ++#endif ++#if CONFIG_VP9_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(vp9, VP9, NULL) ++#endif ++#if CONFIG_AV1_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(av1, AV1, NULL) ++#endif ++#if CONFIG_MPEG1_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(mpeg1, MPEG1VIDEO, NULL) ++#endif ++#if CONFIG_MPEG2_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(mpeg2, MPEG2VIDEO, NULL) ++#endif ++#if CONFIG_MPEG4_RKMPP_DECODER ++DEFINE_RKMPP_DECODER(mpeg4, MPEG4, "mpeg4_unpack_bframes") ++#endif +Index: jellyfin-ffmpeg/libavcodec/rkmppdec.h +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavcodec/rkmppdec.h +@@ -0,0 +1,157 @@ ++/* ++ * Copyright (c) 2017 Lionel CHAZALLON ++ * Copyright (c) 2023 Huseyin BIYIK ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip MPP (Media Process Platform) video decoder ++ */ ++ ++#ifndef AVCODEC_RKMPPDEC_H ++#define AVCODEC_RKMPPDEC_H ++ ++#include ++ ++#include "codec_internal.h" ++#include "decode.h" ++#include "hwconfig.h" ++#include "internal.h" ++ ++#include "libavutil/hwcontext_rkmpp.h" ++#include "libavutil/mastering_display_metadata.h" ++#include "libavutil/opt.h" ++#include "libavutil/pixdesc.h" ++ ++#define MAX_ERRINFO_COUNT 100 ++#define MAX_RETRY_COUNT 100 ++ ++typedef struct RKMPPDecContext { ++ AVClass *class; ++ ++ MppApi *mapi; ++ MppCtx mctx; ++ MppBufferGroup buf_group; ++ ++ AVBufferRef *hwdevice; ++ AVBufferRef *hwframe; ++ ++ AVPacket last_pkt; ++ AVFrame last_frame; ++ ++ int eof; ++ int info_change; ++ int errinfo_cnt; ++ int queue_cnt; ++ int queue_size; ++ ++ int deint; ++ int afbc; ++ int fast_parse; ++ int buf_mode; ++} RKMPPDecContext; ++ ++enum { ++ RKMPP_DEC_AFBC_OFF = 0, ++ RKMPP_DEC_AFBC_ON = 1, ++ RKMPP_DEC_AFBC_ON_RGA = 2, ++}; ++ ++enum { ++ RKMPP_DEC_HALF_INTERNAL = 0, ++ RKMPP_DEC_PURE_EXTERNAL = 1, ++}; ++ ++static const AVRational mpp_tb = { 1, 1000000 }; ++ ++#define PTS_TO_MPP_PTS(pts, pts_tb) ((pts_tb.num && pts_tb.den) ? \ ++ av_rescale_q(pts, pts_tb, mpp_tb) : pts) ++ ++#define MPP_PTS_TO_PTS(mpp_pts, pts_tb) ((pts_tb.num && pts_tb.den) ? \ ++ av_rescale_q(mpp_pts, mpp_tb, pts_tb) : mpp_pts) ++ ++#define OFFSET(x) offsetof(RKMPPDecContext, x) ++#define VD (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) ++ ++static const AVOption options[] = { ++ { "deint", "Enable IEP (Image Enhancement Processor) for de-interlacing", OFFSET(deint), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VD }, ++ { "afbc", "Enable AFBC (Arm Frame Buffer Compression) to save bandwidth", OFFSET(afbc), AV_OPT_TYPE_INT, { .i64 = RKMPP_DEC_AFBC_OFF }, 0, 2, VD, "afbc" }, ++ { "off", "Disable AFBC support", 0, AV_OPT_TYPE_CONST, { .i64 = RKMPP_DEC_AFBC_OFF }, 0, 0, VD, "afbc" }, ++ { "on", "Enable AFBC support", 0, AV_OPT_TYPE_CONST, { .i64 = RKMPP_DEC_AFBC_ON }, 0, 0, VD, "afbc" }, ++ { "rga", "Enable AFBC if capable RGA is available", 0, AV_OPT_TYPE_CONST, { .i64 = RKMPP_DEC_AFBC_ON_RGA }, 0, 0, VD, "afbc" }, ++ { "fast_parse", "Enable fast parsing to improve decoding parallelism", OFFSET(fast_parse), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VD }, ++ { "buf_mode", "Set the buffer mode for MPP decoder", OFFSET(buf_mode), AV_OPT_TYPE_INT, { .i64 = RKMPP_DEC_HALF_INTERNAL }, 0, 1, VD, "buf_mode" }, ++ { "half", "Half internal mode", 0, AV_OPT_TYPE_CONST, { .i64 = RKMPP_DEC_HALF_INTERNAL }, 0, 0, VD, "buf_mode" }, ++ { "ext", "Pure external mode", 0, AV_OPT_TYPE_CONST, { .i64 = RKMPP_DEC_PURE_EXTERNAL }, 0, 0, VD, "buf_mode" }, ++ { NULL } ++}; ++ ++static const enum AVPixelFormat rkmpp_dec_pix_fmts[] = { ++ AV_PIX_FMT_NV12, ++ AV_PIX_FMT_NV16, ++ AV_PIX_FMT_NV15, ++ AV_PIX_FMT_NV20, ++ AV_PIX_FMT_DRM_PRIME, ++ AV_PIX_FMT_NONE, ++}; ++ ++static const AVCodecHWConfigInternal *const rkmpp_dec_hw_configs[] = { ++ &(const AVCodecHWConfigInternal) { ++ .public = { ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .methods = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX | ++ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX | ++ AV_CODEC_HW_CONFIG_METHOD_INTERNAL, ++ .device_type = AV_HWDEVICE_TYPE_RKMPP, ++ }, ++ .hwaccel = NULL, ++ }, ++ NULL ++}; ++ ++#define DEFINE_RKMPP_DECODER(x, X, bsf_name) \ ++static const AVClass x##_rkmpp_decoder_class = { \ ++ .class_name = #x "_rkmpp_decoder", \ ++ .item_name = av_default_item_name, \ ++ .option = options, \ ++ .version = LIBAVUTIL_VERSION_INT, \ ++}; \ ++const FFCodec ff_##x##_rkmpp_decoder = { \ ++ .p.name = #x "_rkmpp", \ ++ CODEC_LONG_NAME("Rockchip MPP (Media Process Platform) " #X " decoder"), \ ++ .p.type = AVMEDIA_TYPE_VIDEO, \ ++ .p.id = AV_CODEC_ID_##X, \ ++ .priv_data_size = sizeof(RKMPPDecContext), \ ++ .p.priv_class = &x##_rkmpp_decoder_class, \ ++ .init = rkmpp_decode_init, \ ++ .close = rkmpp_decode_close, \ ++ FF_CODEC_RECEIVE_FRAME_CB(rkmpp_decode_receive_frame), \ ++ .flush = rkmpp_decode_flush, \ ++ .bsfs = bsf_name, \ ++ .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | \ ++ AV_CODEC_CAP_HARDWARE, \ ++ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | \ ++ FF_CODEC_CAP_SETS_FRAME_PROPS, \ ++ .p.pix_fmts = rkmpp_dec_pix_fmts, \ ++ .hw_configs = rkmpp_dec_hw_configs, \ ++ .p.wrapper_name = "rkmpp", \ ++}; ++ ++#endif /* AVCODEC_RKMPPDEC_H */ +Index: jellyfin-ffmpeg/libavcodec/rkmppenc.c +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavcodec/rkmppenc.c +@@ -0,0 +1,999 @@ ++/* ++ * Copyright (c) 2023 Huseyin BIYIK ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip MPP (Media Process Platform) video encoder ++ */ ++ ++#include "config_components.h" ++#include "rkmppenc.h" ++ ++static MppCodingType rkmpp_get_coding_type(AVCodecContext *avctx) ++{ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_H264: return MPP_VIDEO_CodingAVC; ++ case AV_CODEC_ID_HEVC: return MPP_VIDEO_CodingHEVC; ++ default: return MPP_VIDEO_CodingUnused; ++ } ++} ++ ++static MppFrameFormat rkmpp_get_mpp_fmt(enum AVPixelFormat pix_fmt) ++{ ++ switch (pix_fmt) { ++ case AV_PIX_FMT_GRAY8: return MPP_FMT_YUV400; ++ case AV_PIX_FMT_YUV420P: return MPP_FMT_YUV420P; ++ case AV_PIX_FMT_YUV422P: return MPP_FMT_YUV422P; ++ case AV_PIX_FMT_YUV444P: return MPP_FMT_YUV444P; ++ case AV_PIX_FMT_NV12: return MPP_FMT_YUV420SP; ++ case AV_PIX_FMT_NV21: return MPP_FMT_YUV420SP_VU; ++ case AV_PIX_FMT_NV16: return MPP_FMT_YUV422SP; ++ case AV_PIX_FMT_NV24: return MPP_FMT_YUV444SP; ++ case AV_PIX_FMT_YUYV422: return MPP_FMT_YUV422_YUYV; ++ case AV_PIX_FMT_YVYU422: return MPP_FMT_YUV422_YVYU; ++ case AV_PIX_FMT_UYVY422: return MPP_FMT_YUV422_UYVY; ++ case AV_PIX_FMT_RGB24: return MPP_FMT_RGB888; ++ case AV_PIX_FMT_BGR24: return MPP_FMT_BGR888; ++ case AV_PIX_FMT_RGBA: ++ case AV_PIX_FMT_RGB0: return MPP_FMT_RGBA8888; ++ case AV_PIX_FMT_BGRA: ++ case AV_PIX_FMT_BGR0: return MPP_FMT_BGRA8888; ++ case AV_PIX_FMT_ARGB: ++ case AV_PIX_FMT_0RGB: return MPP_FMT_ARGB8888; ++ case AV_PIX_FMT_ABGR: ++ case AV_PIX_FMT_0BGR: return MPP_FMT_ABGR8888; ++ default: return MPP_FMT_BUTT; ++ } ++} ++ ++static uint32_t rkmpp_get_drm_afbc_format(MppFrameFormat mpp_fmt) ++{ ++ switch (mpp_fmt & MPP_FRAME_FMT_MASK) { ++ case MPP_FMT_YUV420SP: return DRM_FORMAT_YUV420_8BIT; ++ case MPP_FMT_YUV422SP: return DRM_FORMAT_YUYV; ++ default: return DRM_FORMAT_INVALID; ++ } ++} ++ ++static int get_byte_stride(const AVDRMObjectDescriptor *object, ++ const AVDRMLayerDescriptor *layer, ++ int is_rgb, int is_planar, ++ int *hs, int *vs) ++{ ++ const AVDRMPlaneDescriptor *plane0, *plane1; ++ const int is_packed_fmt = is_rgb || (!is_rgb && !is_planar); ++ ++ if (!object || !layer || !hs || !vs) ++ return AVERROR(EINVAL); ++ ++ plane0 = &layer->planes[0]; ++ plane1 = &layer->planes[1]; ++ ++ *hs = plane0->pitch; ++ *vs = is_packed_fmt ? ++ ALIGN_DOWN(object->size / plane0->pitch, is_rgb ? 1 : 2) : ++ (plane1->offset / plane0->pitch); ++ ++ return (*hs > 0 && *vs > 0) ? 0 : AVERROR(EINVAL); ++} ++ ++static int get_afbc_byte_stride(const AVPixFmtDescriptor *desc, ++ int *stride, int reverse) ++{ ++ if (!desc || !stride || *stride <= 0) ++ return AVERROR(EINVAL); ++ ++ if (desc->nb_components == 1 || ++ (desc->flags & AV_PIX_FMT_FLAG_RGB) || ++ (!(desc->flags & AV_PIX_FMT_FLAG_RGB) && ++ !(desc->flags & AV_PIX_FMT_FLAG_PLANAR))) ++ return 0; ++ ++ if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 1) ++ *stride = reverse ? (*stride * 2 / 3) : (*stride * 3 / 2); ++ else if (desc->log2_chroma_w == 1 && !desc->log2_chroma_h) ++ *stride = reverse ? (*stride / 2) : (*stride * 2); ++ else if (!desc->log2_chroma_w && !desc->log2_chroma_h) ++ *stride = reverse ? (*stride / 3) : (*stride * 3); ++ else ++ return AVERROR(EINVAL); ++ ++ return (*stride > 0) ? 0 : AVERROR(EINVAL); ++} ++ ++static unsigned get_used_frame_count(MPPEncFrame *list) ++{ ++ unsigned count = 0; ++ ++ while (list) { ++ if (list->queued == 1 && ++ (list->frame || list->mpp_frame)) ++ ++count; ++ list = list->next; ++ } ++ ++ return count; ++} ++ ++static void clear_unused_frames(MPPEncFrame *list) ++{ ++ while (list) { ++ if (list->queued == 1) { ++ MppFrame mpp_frame = list->mpp_frame; ++ MppBuffer mpp_buf = NULL; ++ ++ if (mpp_frame) ++ mpp_buf = mpp_frame_get_buffer(mpp_frame); ++ ++ if (mpp_buf && ++ mpp_buffer_get_index(mpp_buf) < 0) { ++ mpp_buffer_put(mpp_buf); ++ ++ mpp_frame_deinit(&list->mpp_frame); ++ list->mpp_frame = NULL; ++ ++ av_frame_free(&list->frame); ++ list->queued = 0; ++ } ++ } ++ list = list->next; ++ } ++} ++ ++static void clear_frame_list(MPPEncFrame **list) ++{ ++ while (*list) { ++ MPPEncFrame *frame = NULL; ++ MppFrame mpp_frame = NULL; ++ MppBuffer mpp_buf = NULL; ++ ++ frame = *list; ++ *list = (*list)->next; ++ ++ mpp_frame = frame->mpp_frame; ++ if (mpp_frame) { ++ mpp_buf = mpp_frame_get_buffer(mpp_frame); ++ if (mpp_buf && ++ mpp_buffer_get_index(mpp_buf) >= 0) ++ mpp_buffer_put(mpp_buf); ++ ++ mpp_frame_deinit(&frame->mpp_frame); ++ frame->mpp_frame = NULL; ++ } ++ ++ av_frame_free(&frame->frame); ++ av_freep(&frame); ++ } ++} ++ ++static MPPEncFrame *get_free_frame(MPPEncFrame **list) ++{ ++ MPPEncFrame *out = *list; ++ ++ for (; out; out = out->next) { ++ if (!out->queued) { ++ out->queued = 1; ++ break; ++ } ++ } ++ ++ if (!out) { ++ out = av_mallocz(sizeof(*out)); ++ if (!out) { ++ av_log(NULL, AV_LOG_ERROR, "Cannot alloc new output frame\n"); ++ return NULL; ++ } ++ out->queued = 1; ++ out->next = *list; ++ *list = out; ++ } ++ ++ return out; ++} ++ ++static int rkmpp_set_enc_cfg_prep(AVCodecContext *avctx, AVFrame *frame) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ MppEncCfg cfg = r->mcfg; ++ MppFrameFormat mpp_fmt = r->mpp_fmt; ++ int ret, is_afbc = 0; ++ int hor_stride = 0, ver_stride = 0; ++ const AVPixFmtDescriptor *pix_desc; ++ const AVDRMFrameDescriptor *drm_desc; ++ ++ if (r->cfg_init) ++ return 0; ++ ++ if (!frame) ++ return AVERROR(EINVAL); ++ ++ drm_desc = (AVDRMFrameDescriptor *)frame->data[0]; ++ if (drm_desc->objects[0].fd < 0) ++ return AVERROR(ENOMEM); ++ ++ pix_desc = av_pix_fmt_desc_get(r->pix_fmt); ++ is_afbc = drm_is_afbc(drm_desc->objects[0].format_modifier); ++ if (!is_afbc) { ++ ret = get_byte_stride(&drm_desc->objects[0], ++ &drm_desc->layers[0], ++ (pix_desc->flags & AV_PIX_FMT_FLAG_RGB), ++ (pix_desc->flags & AV_PIX_FMT_FLAG_PLANAR), ++ &hor_stride, &ver_stride); ++ if (ret < 0 || !hor_stride || !ver_stride) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get frame strides\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ mpp_enc_cfg_set_s32(cfg, "prep:hor_stride", hor_stride); ++ mpp_enc_cfg_set_s32(cfg, "prep:ver_stride", ver_stride); ++ } ++ ++ mpp_enc_cfg_set_s32(cfg, "prep:width", avctx->width); ++ mpp_enc_cfg_set_s32(cfg, "prep:height", avctx->height); ++ ++ mpp_enc_cfg_set_s32(cfg, "prep:colorspace", avctx->colorspace); ++ mpp_enc_cfg_set_s32(cfg, "prep:colorprim", avctx->color_primaries); ++ mpp_enc_cfg_set_s32(cfg, "prep:colortrc", avctx->color_trc); ++ mpp_enc_cfg_set_s32(cfg, "prep:colorrange", avctx->color_range); ++ ++ if (is_afbc) { ++ const AVDRMLayerDescriptor *layer = &drm_desc->layers[0]; ++ uint32_t drm_afbc_fmt = rkmpp_get_drm_afbc_format(mpp_fmt); ++ ++ if (drm_afbc_fmt != layer->format) { ++ av_log(avctx, AV_LOG_ERROR, "Input format '%s' with AFBC modifier is not supported\n", ++ av_get_pix_fmt_name(r->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ mpp_fmt |= MPP_FRAME_FBC_AFBC_V2; ++ } ++ mpp_enc_cfg_set_s32(cfg, "prep:format", mpp_fmt); ++ ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_SET_CFG, cfg)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set config with frame: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ r->cfg_init = 1; ++ av_log(avctx, AV_LOG_VERBOSE, "Configured with size: %dx%d | pix_fmt: %s | sw_pix_fmt: %s\n", ++ avctx->width, avctx->height, ++ av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(r->pix_fmt)); ++ ++ return 0; ++} ++ ++static int rkmpp_set_enc_cfg(AVCodecContext *avctx) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ MppEncCfg cfg = r->mcfg; ++ ++ RK_U32 rc_mode, fps_num, fps_den; ++ MppEncHeaderMode header_mode; ++ MppEncSeiMode sei_mode; ++ int max_bps, min_bps; ++ int qp_init, qp_max, qp_min, qp_max_i, qp_min_i; ++ int ret; ++ ++ mpp_enc_cfg_set_s32(cfg, "prep:width", avctx->width); ++ mpp_enc_cfg_set_s32(cfg, "prep:height", avctx->height); ++ mpp_enc_cfg_set_s32(cfg, "prep:hor_stride", FFALIGN(avctx->width, 64)); ++ mpp_enc_cfg_set_s32(cfg, "prep:ver_stride", FFALIGN(avctx->height, 64)); ++ mpp_enc_cfg_set_s32(cfg, "prep:format", MPP_FMT_YUV420SP); ++ mpp_enc_cfg_set_s32(cfg, "prep:mirroring", 0); ++ mpp_enc_cfg_set_s32(cfg, "prep:rotation", 0); ++ mpp_enc_cfg_set_s32(cfg, "prep:flip", 0); ++ ++ if (avctx->framerate.den > 0 && avctx->framerate.num > 0) ++ av_reduce(&fps_num, &fps_den, avctx->framerate.num, avctx->framerate.den, 65535); ++ else ++ av_reduce(&fps_num, &fps_den, avctx->time_base.den, avctx->time_base.num, 65535); ++ ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_in_flex", 0); ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_in_num", fps_num); ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_in_denorm", fps_den); ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_out_flex", 0); ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_out_num",fps_num); ++ mpp_enc_cfg_set_s32(cfg, "rc:fps_out_denorm", fps_den); ++ ++ mpp_enc_cfg_set_s32(cfg, "rc:gop", FFMAX(avctx->gop_size, 1)); ++ ++ rc_mode = r->rc_mode; ++ if (rc_mode == MPP_ENC_RC_MODE_BUTT) { ++ if (r->qp_init >= 0) ++ rc_mode = MPP_ENC_RC_MODE_FIXQP; ++ else if (avctx->rc_max_rate > 0) ++ rc_mode = MPP_ENC_RC_MODE_VBR; ++ else ++ rc_mode = MPP_ENC_RC_MODE_CBR; ++ } ++ ++ switch (rc_mode) { ++ case MPP_ENC_RC_MODE_VBR: ++ av_log(avctx, AV_LOG_VERBOSE, "Rate Control mode is set to VBR\n"); break; ++ case MPP_ENC_RC_MODE_CBR: ++ av_log(avctx, AV_LOG_VERBOSE, "Rate Control mode is set to CBR\n"); break; ++ case MPP_ENC_RC_MODE_FIXQP: ++ av_log(avctx, AV_LOG_VERBOSE, "Rate Control mode is set to CQP\n"); break; ++ case MPP_ENC_RC_MODE_AVBR: ++ av_log(avctx, AV_LOG_VERBOSE, "Rate Control mode is set to AVBR\n"); break; ++ } ++ mpp_enc_cfg_set_u32(cfg, "rc:mode", rc_mode); ++ ++ switch (rc_mode) { ++ case MPP_ENC_RC_MODE_FIXQP: ++ /* do not setup bitrate on FIXQP mode */ ++ min_bps = max_bps = avctx->bit_rate; ++ break; ++ case MPP_ENC_RC_MODE_VBR: ++ case MPP_ENC_RC_MODE_AVBR: ++ /* VBR mode has wide bound */ ++ max_bps = (avctx->rc_max_rate > 0 && avctx->rc_max_rate >= avctx->bit_rate) ++ ? avctx->rc_max_rate : (avctx->bit_rate * 17 / 16); ++ min_bps = (avctx->rc_min_rate > 0 && avctx->rc_min_rate <= avctx->bit_rate) ++ ? avctx->rc_min_rate : (avctx->bit_rate * 1 / 16); ++ break; ++ case MPP_ENC_RC_MODE_CBR: ++ default: ++ /* CBR mode has narrow bound */ ++ max_bps = avctx->bit_rate * 17 / 16; ++ min_bps = avctx->bit_rate * 15 / 16; ++ break; ++ } ++ mpp_enc_cfg_set_u32(cfg, "rc:bps_target", avctx->bit_rate); ++ mpp_enc_cfg_set_s32(cfg, "rc:bps_max", max_bps); ++ mpp_enc_cfg_set_s32(cfg, "rc:bps_min", min_bps); ++ ++ av_log(avctx, AV_LOG_VERBOSE, "Bitrate Target/Min/Max is set to %ld/%d/%d\n", ++ avctx->bit_rate, min_bps, max_bps); ++ ++ if (avctx->rc_buffer_size > 0 && ++ (rc_mode == MPP_ENC_RC_MODE_CBR || ++ rc_mode == MPP_ENC_RC_MODE_VBR || ++ rc_mode == MPP_ENC_RC_MODE_AVBR)) { ++ int stats_time_in_sec = avctx->rc_buffer_size / max_bps; ++ if (stats_time_in_sec > 0) { ++ mpp_enc_cfg_set_u32(cfg, "rc:stats_time", stats_time_in_sec); ++ av_log(avctx, AV_LOG_VERBOSE, "Stats time is set to %d\n", stats_time_in_sec); ++ } ++ } ++ ++ mpp_enc_cfg_set_u32(cfg, "rc:drop_mode", MPP_ENC_RC_DROP_FRM_DISABLED); ++ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_H264: ++ case AV_CODEC_ID_HEVC: ++ { ++ switch (rc_mode) { ++ case MPP_ENC_RC_MODE_FIXQP: ++ qp_init = r->qp_init >= 0 ? r->qp_init : 26; ++ qp_max = qp_min = qp_max_i = qp_min_i = qp_init; ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_ip", 0); ++ break; ++ case MPP_ENC_RC_MODE_CBR: ++ case MPP_ENC_RC_MODE_VBR: ++ case MPP_ENC_RC_MODE_AVBR: ++ qp_max = r->qp_max >= 0 ? r->qp_max : 48; ++ qp_min = FFMIN(r->qp_min >= 0 ? r->qp_min : 0, qp_max); ++ qp_max_i = r->qp_max_i >= 0 ? r->qp_max_i : 48; ++ qp_min_i = FFMIN(r->qp_min_i >= 0 ? r->qp_min_i : 0, qp_max_i); ++ qp_init = FFMIN3(r->qp_init >= 0 ? r->qp_init : 26, qp_max, qp_max_i); ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_ip", 2); ++ break; ++ default: ++ return AVERROR(EINVAL); ++ } ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_init", qp_init); ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_max", qp_max); ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_min", qp_min); ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_max_i",qp_max_i); ++ mpp_enc_cfg_set_s32(cfg, "rc:qp_min_i", qp_min_i); ++ } ++ break; ++ default: ++ return AVERROR(EINVAL); ++ } ++ ++ av_log(avctx, AV_LOG_VERBOSE, "QP Init/Max/Min/Max_I/Min_I is set to %d/%d/%d/%d/%d\n", ++ qp_init, qp_max, qp_min, qp_max_i, qp_min_i); ++ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_H264: ++ { ++ avctx->profile = r->profile; ++ avctx->level = r->level; ++ mpp_enc_cfg_set_s32(cfg, "h264:profile", avctx->profile); ++ mpp_enc_cfg_set_s32(cfg, "h264:level", avctx->level); ++ mpp_enc_cfg_set_s32(cfg, "h264:cabac_en", r->coder); ++ mpp_enc_cfg_set_s32(cfg, "h264:cabac_idc", 0); ++ mpp_enc_cfg_set_s32(cfg, "h264:trans8x8", ++ (r->dct8x8 && avctx->profile == FF_PROFILE_H264_HIGH)); ++ ++ switch (avctx->profile) { ++ case FF_PROFILE_H264_BASELINE: ++ av_log(avctx, AV_LOG_VERBOSE, "Profile is set to BASELINE\n"); break; ++ case FF_PROFILE_H264_MAIN: ++ av_log(avctx, AV_LOG_VERBOSE, "Profile is set to MAIN\n"); break; ++ case FF_PROFILE_H264_HIGH: ++ av_log(avctx, AV_LOG_VERBOSE, "Profile is set to HIGH\n"); ++ if (r->dct8x8) ++ av_log(avctx, AV_LOG_VERBOSE, "8x8 Transform is enabled\n"); ++ break; ++ } ++ av_log(avctx, AV_LOG_VERBOSE, "Level is set to %d\n", avctx->level); ++ av_log(avctx, AV_LOG_VERBOSE, "Coder is set to %s\n", r->coder ? "CABAC" : "CAVLC"); ++ } ++ break; ++ case AV_CODEC_ID_HEVC: ++ { ++ avctx->profile = r->pix_fmt == AV_PIX_FMT_GRAY8 ++ ? FF_PROFILE_HEVC_REXT : FF_PROFILE_HEVC_MAIN; ++ avctx->level = r->level; ++ mpp_enc_cfg_set_s32(cfg, "h265:profile", avctx->profile); ++ mpp_enc_cfg_set_s32(cfg, "h265:level", avctx->level); ++ ++ switch (avctx->profile) { ++ case FF_PROFILE_HEVC_MAIN: ++ av_log(avctx, AV_LOG_VERBOSE, "Profile is set to MAIN\n"); break; ++ case FF_PROFILE_HEVC_REXT: ++ av_log(avctx, AV_LOG_VERBOSE, "Profile is set to REXT\n"); break; ++ } ++ av_log(avctx, AV_LOG_VERBOSE, "Level is set to %d\n", avctx->level / 3); ++ } ++ break; ++ default: ++ return AVERROR(EINVAL); ++ } ++ ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_SET_CFG, cfg)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set config: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ sei_mode = MPP_ENC_SEI_MODE_DISABLE; ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_SET_SEI_CFG, &sei_mode)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set SEI config: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ header_mode = MPP_ENC_HEADER_MODE_EACH_IDR; ++ if (avctx->codec_id == AV_CODEC_ID_H264 || ++ avctx->codec_id == AV_CODEC_ID_HEVC) { ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_SET_HEADER_MODE, &header_mode)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set header mode: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static MPPEncFrame *rkmpp_submit_frame(AVCodecContext *avctx, AVFrame *frame) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ MppFrame mpp_frame = NULL; ++ MppBuffer mpp_buf = NULL; ++ AVFrame *drm_frame = NULL; ++ const AVDRMFrameDescriptor *drm_desc; ++ const AVDRMLayerDescriptor *layer; ++ const AVDRMPlaneDescriptor *plane0; ++ const AVPixFmtDescriptor *pix_desc; ++ int hor_stride = 0, ver_stride = 0; ++ MppBufferInfo buf_info = { 0 }; ++ MppFrameFormat mpp_fmt = r->mpp_fmt; ++ int ret, is_afbc = 0; ++ ++ MPPEncFrame *mpp_enc_frame = NULL; ++ ++ clear_unused_frames(r->frame_list); ++ ++ mpp_enc_frame = get_free_frame(&r->frame_list); ++ if (!mpp_enc_frame) ++ return NULL; ++ ++ if ((ret = mpp_frame_init(&mpp_frame)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init MPP frame: %d\n", ret); ++ goto exit; ++ } ++ mpp_enc_frame->mpp_frame = mpp_frame; ++ ++ if (!frame) { ++ av_log(avctx, AV_LOG_DEBUG, "End of stream\n"); ++ mpp_frame_set_eos(mpp_frame, 1); ++ return mpp_enc_frame; ++ } ++ ++ if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME) { ++ drm_frame = frame; ++ mpp_enc_frame->frame = av_frame_clone(drm_frame); ++ } else { ++ drm_frame = av_frame_alloc(); ++ if (!drm_frame) { ++ goto exit; ++ } ++ if ((ret = av_hwframe_get_buffer(r->hwframe, drm_frame, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Cannot allocate an internal frame: %d\n", ret); ++ goto exit; ++ } ++ if ((ret = av_hwframe_transfer_data(drm_frame, frame, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed: %d\n", ret); ++ goto exit; ++ } ++ if ((ret = av_frame_copy_props(drm_frame, frame)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "av_frame_copy_props failed: %d\n", ret); ++ goto exit; ++ } ++ mpp_enc_frame->frame = drm_frame; ++ } ++ ++ drm_desc = (AVDRMFrameDescriptor *)drm_frame->data[0]; ++ if (drm_desc->objects[0].fd < 0) ++ goto exit; ++ ++ if ((r->pix_fmt == AV_PIX_FMT_YUV420P || ++ r->pix_fmt == AV_PIX_FMT_YUV422P) && (drm_frame->width % 2)) { ++ av_log(avctx, AV_LOG_ERROR, "Unsupported width %d, not 2-aligned\n", drm_frame->width); ++ goto exit; ++ } ++ ++ mpp_frame_set_pts(mpp_frame, PTS_TO_MPP_PTS(drm_frame->pts, avctx->time_base)); ++ mpp_frame_set_width(mpp_frame, drm_frame->width); ++ mpp_frame_set_height(mpp_frame, drm_frame->height); ++ ++ mpp_frame_set_colorspace(mpp_frame, avctx->colorspace); ++ mpp_frame_set_color_primaries(mpp_frame, avctx->color_primaries); ++ mpp_frame_set_color_trc(mpp_frame, avctx->color_trc); ++ mpp_frame_set_color_range(mpp_frame, avctx->color_range); ++ ++ layer = &drm_desc->layers[0]; ++ plane0 = &layer->planes[0]; ++ ++ is_afbc = drm_is_afbc(drm_desc->objects[0].format_modifier); ++ if (is_afbc) { ++ uint32_t drm_afbc_fmt = rkmpp_get_drm_afbc_format(mpp_fmt); ++ int afbc_offset_y = 0; ++ ++ if (drm_afbc_fmt != layer->format) { ++ av_log(avctx, AV_LOG_ERROR, "Input format '%s' with AFBC modifier is not supported\n", ++ av_get_pix_fmt_name(r->pix_fmt)); ++ goto exit; ++ } ++ mpp_fmt |= MPP_FRAME_FBC_AFBC_V2; ++ ++ if (drm_frame->crop_top > 0) { ++ afbc_offset_y = drm_frame->crop_top; ++ mpp_frame_set_offset_y(mpp_frame, afbc_offset_y); ++ } ++ } ++ mpp_frame_set_fmt(mpp_frame, mpp_fmt); ++ ++ pix_desc = av_pix_fmt_desc_get(r->pix_fmt); ++ if (is_afbc) { ++ hor_stride = plane0->pitch; ++ if ((ret = get_afbc_byte_stride(pix_desc, &hor_stride, 1)) < 0) ++ goto exit; ++ ++ if (hor_stride % 16) ++ hor_stride = FFALIGN(avctx->width, 16); ++ ++ mpp_frame_set_fbc_hdr_stride(mpp_frame, hor_stride); ++ } else { ++ ret = get_byte_stride(&drm_desc->objects[0], ++ &drm_desc->layers[0], ++ (pix_desc->flags & AV_PIX_FMT_FLAG_RGB), ++ (pix_desc->flags & AV_PIX_FMT_FLAG_PLANAR), ++ &hor_stride, &ver_stride); ++ if (ret < 0 || !hor_stride || !ver_stride) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get frame strides\n"); ++ goto exit; ++ } ++ ++ mpp_frame_set_hor_stride(mpp_frame, hor_stride); ++ mpp_frame_set_ver_stride(mpp_frame, ver_stride); ++ } ++ ++ buf_info.type = MPP_BUFFER_TYPE_DRM; ++ buf_info.fd = drm_desc->objects[0].fd; ++ buf_info.size = drm_desc->objects[0].size; ++ ++ /* mark buffer as used (idx >= 0) */ ++ buf_info.index = buf_info.fd; ++ ++ if ((ret = mpp_buffer_import(&mpp_buf, &buf_info)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to import MPP buffer: %d\n", ret); ++ goto exit; ++ } ++ mpp_frame_set_buffer(mpp_frame, mpp_buf); ++ mpp_frame_set_buf_size(mpp_frame, drm_desc->objects[0].size); ++ ++ return mpp_enc_frame; ++ ++exit: ++ if (drm_frame && ++ avctx->pix_fmt != AV_PIX_FMT_DRM_PRIME) ++ av_frame_free(&drm_frame); ++ ++ return NULL; ++} ++ ++static int rkmpp_send_frame(AVCodecContext *avctx, MPPEncFrame *mpp_enc_frame) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ AVFrame *frame = NULL; ++ MppFrame mpp_frame = NULL; ++ int ret; ++ ++ if (mpp_enc_frame) { ++ frame = mpp_enc_frame->frame; ++ mpp_frame = mpp_enc_frame->mpp_frame; ++ } ++ ++ if (frame && (ret = rkmpp_set_enc_cfg_prep(avctx, frame)) < 0) ++ goto exit; ++ ++ if (frame && frame->pict_type == AV_PICTURE_TYPE_I) { ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_SET_IDR_FRAME, NULL)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set IDR frame: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto exit; ++ } ++ } ++ ++ if ((ret = r->mapi->encode_put_frame(r->mctx, mpp_frame)) != MPP_OK) { ++ int log_level = (ret == MPP_NOK) ? AV_LOG_DEBUG : AV_LOG_ERROR; ++ ret = (ret == MPP_NOK) ? AVERROR(EAGAIN) : AVERROR_EXTERNAL; ++ av_log(avctx, log_level, "Failed to put frame to encoder input queue: %d\n", ret); ++ goto exit; ++ } else ++ av_log(avctx, AV_LOG_DEBUG, "Wrote %ld bytes to encoder\n", ++ mpp_frame_get_buf_size(mpp_frame)); ++ ++exit: ++ return ret; ++} ++ ++static void rkmpp_free_packet_buf(void *opaque, uint8_t *data) ++{ ++ MppPacket mpp_pkt = opaque; ++ mpp_packet_deinit(&mpp_pkt); ++} ++ ++static int rkmpp_get_packet(AVCodecContext *avctx, AVPacket *packet) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ MppPacket mpp_pkt = NULL; ++ MppMeta mpp_meta = NULL; ++ MppFrame mpp_frame = NULL; ++ MppBuffer mpp_buf = NULL; ++ int ret, key_frame = 0; ++ ++ if ((ret = r->mapi->encode_get_packet(r->mctx, &mpp_pkt)) != MPP_OK) { ++ int log_level = (ret == MPP_NOK) ? AV_LOG_DEBUG : AV_LOG_ERROR; ++ ret = (ret == MPP_NOK) ? AVERROR(EAGAIN) : AVERROR_EXTERNAL; ++ av_log(avctx, log_level, "Failed to get packet from encoder output queue: %d\n", ret); ++ return ret; ++ } ++ if (!mpp_pkt) ++ return AVERROR(ENOMEM); ++ ++ if (mpp_packet_get_eos(mpp_pkt)) { ++ av_log(avctx, AV_LOG_DEBUG, "Received an EOS packet\n"); ++ ret = AVERROR_EOF; ++ goto exit; ++ } ++ av_log(avctx, AV_LOG_DEBUG, "Received a packet\n"); ++ ++ packet->data = mpp_packet_get_data(mpp_pkt); ++ packet->size = mpp_packet_get_length(mpp_pkt); ++ packet->buf = av_buffer_create(packet->data, packet->size, rkmpp_free_packet_buf, ++ mpp_pkt, AV_BUFFER_FLAG_READONLY); ++ if (!packet->buf) { ++ ret = AVERROR(ENOMEM); ++ goto exit; ++ } ++ ++ packet->time_base.num = avctx->time_base.num; ++ packet->time_base.den = avctx->time_base.den; ++ packet->pts = MPP_PTS_TO_PTS(mpp_packet_get_pts(mpp_pkt), avctx->time_base); ++ packet->dts = packet->pts; ++ ++ mpp_meta = mpp_packet_get_meta(mpp_pkt); ++ if (!mpp_meta || !mpp_packet_has_meta(mpp_pkt)) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get packet meta\n"); ++ ret = AVERROR_EXTERNAL; ++ goto exit; ++ } ++ ++ mpp_meta_get_s32(mpp_meta, KEY_OUTPUT_INTRA, &key_frame); ++ if (key_frame) ++ packet->flags |= AV_PKT_FLAG_KEY; ++ ++ if ((ret = mpp_meta_get_frame(mpp_meta, KEY_INPUT_FRAME, &mpp_frame)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get key input frame from packet meta: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto exit; ++ } ++ ++ mpp_buf = mpp_frame_get_buffer(mpp_frame); ++ if (!mpp_buf) ++ return AVERROR(ENOMEM); ++ ++ /* mark buffer as unused (idx < 0) */ ++ mpp_buffer_set_index(mpp_buf, -1); ++ clear_unused_frames(r->frame_list); ++ ++ return 0; ++ ++exit: ++ if (mpp_pkt) ++ mpp_packet_deinit(&mpp_pkt); ++ ++ return ret; ++} ++ ++static int rkmpp_encode_frame(AVCodecContext *avctx, AVPacket *packet, ++ const AVFrame *frame, int *got_packet) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ MPPEncFrame *mpp_enc_frame = NULL; ++ int surfaces = r->surfaces; ++ int ret; ++ ++ if (get_used_frame_count(r->frame_list) > surfaces) ++ goto get; ++ ++ mpp_enc_frame = rkmpp_submit_frame(avctx, (AVFrame *)frame); ++ if (!mpp_enc_frame) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to submit frame on input\n"); ++ return AVERROR(ENOMEM); ++ } ++ ++send: ++ ret = rkmpp_send_frame(avctx, mpp_enc_frame); ++ if (ret == AVERROR(EAGAIN)) ++ goto send; ++ else if (ret) ++ return ret; ++ ++get: ++ ret = rkmpp_get_packet(avctx, packet); ++ if (ret == AVERROR_EOF || ++ ret == AVERROR(EAGAIN)) ++ *got_packet = 0; ++ else if (ret) ++ return ret; ++ else ++ *got_packet = 1; ++ ++ return 0; ++} ++ ++static int rkmpp_encode_close(AVCodecContext *avctx) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ ++ r->cfg_init = 0; ++ ++ if (r->mapi) { ++ r->mapi->reset(r->mctx); ++ mpp_destroy(r->mctx); ++ r->mctx = NULL; ++ } ++ ++ clear_frame_list(&r->frame_list); ++ ++ if (r->hwframe) ++ av_buffer_unref(&r->hwframe); ++ if (r->hwdevice) ++ av_buffer_unref(&r->hwdevice); ++ ++ return 0; ++} ++ ++static av_cold int init_hwframes_ctx(AVCodecContext *avctx) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ AVHWFramesContext *hwfc; ++ int ret; ++ ++ av_buffer_unref(&r->hwframe); ++ r->hwframe = av_hwframe_ctx_alloc(r->hwdevice); ++ if (!r->hwframe) ++ return AVERROR(ENOMEM); ++ ++ hwfc = (AVHWFramesContext *)r->hwframe->data; ++ hwfc->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc->sw_format = avctx->pix_fmt; ++ hwfc->width = avctx->width; ++ hwfc->height = avctx->height; ++ ++ ret = av_hwframe_ctx_init(r->hwframe); ++ if (ret < 0) { ++ av_buffer_unref(&r->hwframe); ++ av_log(avctx, AV_LOG_ERROR, "Error creating internal frames_ctx: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int rkmpp_encode_init(AVCodecContext *avctx) ++{ ++ RKMPPEncContext *r = avctx->priv_data; ++ enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; ++ MppFrameFormat mpp_fmt = MPP_FMT_BUTT; ++ MppCodingType coding_type = MPP_VIDEO_CodingUnused; ++ MppPacket mpp_pkt = NULL; ++ int input_timeout = MPP_TIMEOUT_NON_BLOCK; ++ int output_timeout = MPP_TIMEOUT_NON_BLOCK; ++ int ret; ++ ++ if ((coding_type = rkmpp_get_coding_type(avctx)) == MPP_VIDEO_CodingUnused) { ++ av_log(avctx, AV_LOG_ERROR, "Unknown codec id: %d\n", avctx->codec_id); ++ return AVERROR(ENOSYS); ++ } ++ ++ pix_fmt = avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME ? avctx->sw_pix_fmt : avctx->pix_fmt; ++ mpp_fmt = rkmpp_get_mpp_fmt(pix_fmt) & MPP_FRAME_FMT_MASK; ++ ++ if (mpp_fmt == MPP_FMT_BUTT) { ++ av_log(avctx, AV_LOG_ERROR, "Unsupported input pixel format '%s'\n", ++ av_get_pix_fmt_name(pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ r->pix_fmt = pix_fmt; ++ r->mpp_fmt = mpp_fmt; ++ ++ if ((ret = mpp_check_support_format(MPP_CTX_ENC, coding_type)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "MPP doesn't support encoding codec '%s' (%d)\n", ++ avcodec_get_name(avctx->codec_id), avctx->codec_id); ++ return AVERROR(ENOSYS); ++ } ++ ++ if ((ret = mpp_create(&r->mctx, &r->mapi)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create MPP context and api: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ if ((ret = r->mapi->control(r->mctx, MPP_SET_INPUT_TIMEOUT, ++ (MppParam)&input_timeout)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set input timeout: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ if ((ret = r->mapi->control(r->mctx, MPP_SET_OUTPUT_TIMEOUT, ++ (MppParam)&output_timeout)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set output timeout: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ if ((ret = mpp_init(r->mctx, MPP_CTX_ENC, coding_type)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init MPP context: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ if ((ret = mpp_enc_cfg_init(&r->mcfg)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init encoder config: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_GET_CFG, r->mcfg)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get encoder config: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ if ((ret = rkmpp_set_enc_cfg(avctx)) < 0) ++ goto fail; ++ ++ if (avctx->codec_id == AV_CODEC_ID_H264 || ++ avctx->codec_id == AV_CODEC_ID_HEVC) { ++ RK_U8 enc_hdr_buf[H26X_HEADER_SIZE]; ++ size_t pkt_len = 0; ++ void *pkt_pos = NULL; ++ ++ memset(enc_hdr_buf, 0, H26X_HEADER_SIZE); ++ ++ if ((ret = mpp_packet_init(&mpp_pkt, ++ (void *)enc_hdr_buf, ++ H26X_HEADER_SIZE)) != MPP_OK || !mpp_pkt) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to init extra info packet: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ mpp_packet_set_length(mpp_pkt, 0); ++ if ((ret = r->mapi->control(r->mctx, MPP_ENC_GET_HDR_SYNC, mpp_pkt)) != MPP_OK) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to get header sync: %d\n", ret); ++ ret = AVERROR_EXTERNAL; ++ goto fail; ++ } ++ ++ pkt_pos = mpp_packet_get_pos(mpp_pkt); ++ pkt_len = mpp_packet_get_length(mpp_pkt); ++ ++ if (avctx->extradata) { ++ av_free(avctx->extradata); ++ avctx->extradata = NULL; ++ } ++ avctx->extradata = av_malloc(pkt_len + AV_INPUT_BUFFER_PADDING_SIZE); ++ if (!avctx->extradata) { ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ avctx->extradata_size = pkt_len + AV_INPUT_BUFFER_PADDING_SIZE; ++ memcpy(avctx->extradata, pkt_pos, pkt_len); ++ memset(avctx->extradata + pkt_len, 0, AV_INPUT_BUFFER_PADDING_SIZE); ++ mpp_packet_deinit(&mpp_pkt); ++ } ++ ++ if (avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME) ++ return 0; ++ ++ if (avctx->hw_frames_ctx || avctx->hw_device_ctx) { ++ AVBufferRef *device_ref = avctx->hw_device_ctx; ++ AVHWDeviceContext *device_ctx = NULL; ++ AVHWFramesContext *hwfc = NULL; ++ ++ if (avctx->hw_frames_ctx) { ++ hwfc = (AVHWFramesContext *)avctx->hw_frames_ctx->data; ++ device_ref = hwfc->device_ref; ++ } ++ device_ctx = (AVHWDeviceContext *)device_ref->data; ++ ++ if (device_ctx && device_ctx->type == AV_HWDEVICE_TYPE_RKMPP) { ++ r->hwdevice = av_buffer_ref(device_ref); ++ if (r->hwdevice) ++ av_log(avctx, AV_LOG_VERBOSE, "Picked up an existing RKMPP hardware device\n"); ++ } ++ } ++ if (!r->hwdevice) { ++ if ((ret = av_hwdevice_ctx_create(&r->hwdevice, ++ AV_HWDEVICE_TYPE_RKMPP, ++ NULL, NULL, 0)) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create a RKMPP hardware device: %d\n", ret); ++ goto fail; ++ } ++ av_log(avctx, AV_LOG_VERBOSE, "Created a RKMPP hardware device\n"); ++ } ++ ++ ret = init_hwframes_ctx(avctx); ++ if (ret < 0) ++ goto fail; ++ ++ return 0; ++ ++fail: ++ if (mpp_pkt) ++ mpp_packet_deinit(&mpp_pkt); ++ ++ rkmpp_encode_close(avctx); ++ return ret; ++} ++ ++#if CONFIG_H264_RKMPP_ENCODER ++DEFINE_RKMPP_ENCODER(h264, H264) ++#endif ++#if CONFIG_HEVC_RKMPP_ENCODER ++DEFINE_RKMPP_ENCODER(hevc, HEVC) ++#endif +Index: jellyfin-ffmpeg/libavcodec/rkmppenc.h +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavcodec/rkmppenc.h +@@ -0,0 +1,236 @@ ++/* ++ * Copyright (c) 2023 Huseyin BIYIK ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip MPP (Media Process Platform) video encoder ++ */ ++ ++#ifndef AVCODEC_RKMPPENC_H ++#define AVCODEC_RKMPPENC_H ++ ++#include ++ ++#include "codec_internal.h" ++#include "encode.h" ++#include "hwconfig.h" ++#include "internal.h" ++ ++#include "libavutil/hwcontext_rkmpp.h" ++#include "libavutil/opt.h" ++#include "libavutil/pixdesc.h" ++ ++#define H26X_HEADER_SIZE 1024 ++#define ALIGN_DOWN(a, b) ((a) & ~((b)-1)) ++ ++typedef struct MPPEncFrame { ++ AVFrame *frame; ++ MppFrame mpp_frame; ++ struct MPPEncFrame *next; ++ int queued; ++} MPPEncFrame; ++ ++typedef struct RKMPPEncContext { ++ AVClass *class; ++ ++ MppApi *mapi; ++ MppCtx mctx; ++ ++ AVBufferRef *hwdevice; ++ AVBufferRef *hwframe; ++ ++ MppEncCfg mcfg; ++ int cfg_init; ++ MppFrameFormat mpp_fmt; ++ enum AVPixelFormat pix_fmt; ++ ++ MPPEncFrame *frame_list; ++ ++ int rc_mode; ++ int qp_init; ++ int qp_max; ++ int qp_min; ++ int qp_max_i; ++ int qp_min_i; ++ int surfaces; ++ int profile; ++ int level; ++ int coder; ++ int dct8x8; ++} RKMPPEncContext; ++ ++static const AVRational mpp_tb = { 1, 1000000 }; ++ ++#define PTS_TO_MPP_PTS(pts, pts_tb) ((pts_tb.num && pts_tb.den) ? \ ++ av_rescale_q(pts, pts_tb, mpp_tb) : pts) ++ ++#define MPP_PTS_TO_PTS(mpp_pts, pts_tb) ((pts_tb.num && pts_tb.den) ? \ ++ av_rescale_q(mpp_pts, mpp_tb, pts_tb) : mpp_pts) ++ ++#define OFFSET(x) offsetof(RKMPPEncContext, x) ++#define VE (AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) ++ ++#define RKMPP_ENC_COMMON_OPTS \ ++ { "rc_mode", "Set the encoding rate control mode", OFFSET(rc_mode), AV_OPT_TYPE_INT, \ ++ { .i64 = MPP_ENC_RC_MODE_BUTT }, MPP_ENC_RC_MODE_VBR, MPP_ENC_RC_MODE_BUTT, VE, "rc_mode"}, \ ++ { "VBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_VBR }, 0, 0, VE, "rc_mode" }, \ ++ { "CBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_CBR }, 0, 0, VE, "rc_mode" }, \ ++ { "CQP", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_FIXQP }, 0, 0, VE, "rc_mode" }, \ ++ { "AVBR", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MPP_ENC_RC_MODE_AVBR }, 0, 0, VE, "rc_mode" }, \ ++ { "qp_init", "Set the initial QP value", OFFSET(qp_init), AV_OPT_TYPE_INT, \ ++ { .i64 = -1 }, -1, 51, VE, "qmin" }, \ ++ { "qp_max", "Set the max QP value for P and B frame", OFFSET(qp_max), AV_OPT_TYPE_INT, \ ++ { .i64 = -1 }, -1, 51, VE, "qp_max" }, \ ++ { "qp_min", "Set the min QP value for P and B frame", OFFSET(qp_min), AV_OPT_TYPE_INT, \ ++ { .i64 = -1 }, -1, 51, VE, "qp_min" }, \ ++ { "qp_max_i", "Set the max QP value for I frame", OFFSET(qp_max_i), AV_OPT_TYPE_INT, \ ++ { .i64 = -1 }, -1, 51, VE, "qp_max_i" }, \ ++ { "qp_min_i", "Set the min QP value for I frame", OFFSET(qp_min_i), AV_OPT_TYPE_INT, \ ++ { .i64 = -1 }, -1, 51, VE, "qp_min_i" }, \ ++ { "surfaces", "Set the maximum surfaces to be used for encoding", OFFSET(surfaces), AV_OPT_TYPE_INT, \ ++ { .i64 = 4 }, 1, 16, VE, "surfaces" }, ++ ++static const AVOption h264_options[] = { ++ RKMPP_ENC_COMMON_OPTS ++ { "profile", "Set the encoding profile restriction", OFFSET(profile), AV_OPT_TYPE_INT, ++ { .i64 = FF_PROFILE_H264_HIGH }, -1, FF_PROFILE_H264_HIGH, VE, "profile" }, ++ { "baseline", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_PROFILE_H264_BASELINE }, INT_MIN, INT_MAX, VE, "profile" }, ++ { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_PROFILE_H264_MAIN }, INT_MIN, INT_MAX, VE, "profile" }, ++ { "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_PROFILE_H264_HIGH }, INT_MIN, INT_MAX, VE, "profile" }, ++ { "level", "Set the encoding level restriction", OFFSET(level), AV_OPT_TYPE_INT, ++ { .i64 = 0 }, FF_LEVEL_UNKNOWN, 62, VE, "level" }, ++ { "1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0, VE, "level" }, ++ { "1.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0, VE, "level" }, ++ { "1.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0, VE, "level" }, ++ { "1.3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0, VE, "level" }, ++ { "2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0, VE, "level" }, ++ { "2.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0, VE, "level" }, ++ { "2.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0, VE, "level" }, ++ { "3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0, VE, "level" }, ++ { "3.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0, VE, "level" }, ++ { "3.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0, VE, "level" }, ++ { "4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0, VE, "level" }, ++ { "4.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0, VE, "level" }, ++ { "4.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0, VE, "level" }, ++ { "5", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0, VE, "level" }, ++ { "5.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0, VE, "level" }, ++ { "5.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0, VE, "level" }, ++ { "6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0, VE, "level" }, ++ { "6.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0, VE, "level" }, ++ { "6.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0, VE, "level" }, ++ { "coder", "Set the entropy coder type (from 0 to 1) (default cabac)", OFFSET(coder), AV_OPT_TYPE_INT, ++ { .i64 = 1 }, 0, 1, VE, "coder" }, ++ { "cavlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" }, ++ { "cabac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" }, ++ { "8x8dct", "Set the high profile 8x8 transform", OFFSET(dct8x8), AV_OPT_TYPE_BOOL, ++ { .i64 = 1 }, 0, 1, VE, "8x8dct" }, ++ { NULL } ++}; ++ ++static const AVOption hevc_options[] = { ++ RKMPP_ENC_COMMON_OPTS ++ { "profile", "Set the encoding profile restriction", OFFSET(profile), AV_OPT_TYPE_INT, ++ { .i64 = FF_PROFILE_HEVC_MAIN }, -1, FF_PROFILE_HEVC_MAIN, VE, "profile" }, ++ { "main", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_PROFILE_HEVC_MAIN }, INT_MIN, INT_MAX, VE, "profile" }, ++ { "level", "Set the encoding level restriction", OFFSET(level), AV_OPT_TYPE_INT, ++ { .i64 = 0 }, FF_LEVEL_UNKNOWN, 186, VE, "level" }, ++ { "1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0, VE, "level" }, ++ { "2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0, VE, "level" }, ++ { "2.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 63 }, 0, 0, VE, "level" }, ++ { "3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 90 }, 0, 0, VE, "level" }, ++ { "3.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 93 }, 0, 0, VE, "level" }, ++ { "4", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 120 }, 0, 0, VE, "level" }, ++ { "4.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 123 }, 0, 0, VE, "level" }, ++ { "5", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 150 }, 0, 0, VE, "level" }, ++ { "5.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 153 }, 0, 0, VE, "level" }, ++ { "5.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 156 }, 0, 0, VE, "level" }, ++ { "6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 180 }, 0, 0, VE, "level" }, ++ { "6.1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 183 }, 0, 0, VE, "level" }, ++ { "6.2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 186 }, 0, 0, VE, "level" }, ++ { NULL } ++}; ++ ++static const enum AVPixelFormat rkmpp_enc_pix_fmts[] = { ++ AV_PIX_FMT_GRAY8, ++ AV_PIX_FMT_YUV420P, ++ AV_PIX_FMT_YUV422P, ++ AV_PIX_FMT_YUV444P, ++ AV_PIX_FMT_NV12, ++ AV_PIX_FMT_NV21, ++ AV_PIX_FMT_NV16, ++ AV_PIX_FMT_NV24, ++ AV_PIX_FMT_YUYV422, ++ AV_PIX_FMT_YVYU422, ++ AV_PIX_FMT_UYVY422, ++ AV_PIX_FMT_RGB24, ++ AV_PIX_FMT_BGR24, ++ AV_PIX_FMT_RGBA, ++ AV_PIX_FMT_RGB0, ++ AV_PIX_FMT_BGRA, ++ AV_PIX_FMT_BGR0, ++ AV_PIX_FMT_ARGB, ++ AV_PIX_FMT_0RGB, ++ AV_PIX_FMT_ABGR, ++ AV_PIX_FMT_0BGR, ++ AV_PIX_FMT_DRM_PRIME, ++ AV_PIX_FMT_NONE, ++}; ++ ++static const AVCodecHWConfigInternal *const rkmpp_enc_hw_configs[] = { ++ HW_CONFIG_ENCODER_DEVICE(NONE, RKMPP), ++ HW_CONFIG_ENCODER_FRAMES(DRM_PRIME, RKMPP), ++ HW_CONFIG_ENCODER_FRAMES(DRM_PRIME, DRM), ++ NULL, ++}; ++ ++static const FFCodecDefault rkmpp_enc_defaults[] = { ++ { "b", "2M" }, ++ { "g", "250" }, ++ { NULL } ++}; ++ ++#define DEFINE_RKMPP_ENCODER(x, X) \ ++static const AVClass x##_rkmpp_encoder_class = { \ ++ .class_name = #x "_rkmpp_encoder", \ ++ .item_name = av_default_item_name, \ ++ .option = x##_options, \ ++ .version = LIBAVUTIL_VERSION_INT, \ ++}; \ ++const FFCodec ff_##x##_rkmpp_encoder = { \ ++ .p.name = #x "_rkmpp", \ ++ CODEC_LONG_NAME("Rockchip MPP (Media Process Platform) " #X " encoder"), \ ++ .p.type = AVMEDIA_TYPE_VIDEO, \ ++ .p.id = AV_CODEC_ID_##X, \ ++ .priv_data_size = sizeof(RKMPPEncContext), \ ++ .p.priv_class = &x##_rkmpp_encoder_class, \ ++ .init = rkmpp_encode_init, \ ++ .close = rkmpp_encode_close, \ ++ FF_CODEC_ENCODE_CB(rkmpp_encode_frame), \ ++ .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE, \ ++ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | \ ++ FF_CODEC_CAP_INIT_CLEANUP, \ ++ .p.pix_fmts = rkmpp_enc_pix_fmts, \ ++ .hw_configs = rkmpp_enc_hw_configs, \ ++ .defaults = rkmpp_enc_defaults, \ ++ .p.wrapper_name = "rkmpp", \ ++}; ++ ++#endif /* AVCODEC_RKMPPENC_H */ +Index: jellyfin-ffmpeg/libavfilter/Makefile +=================================================================== +--- jellyfin-ffmpeg.orig/libavfilter/Makefile ++++ jellyfin-ffmpeg/libavfilter/Makefile +@@ -29,6 +29,7 @@ OBJS-$(HAVE_THREADS) + + # subsystems + OBJS-$(CONFIG_QSVVPP) += qsvvpp.o ++OBJS-$(CONFIG_RKRGA) += rkrga_common.o + OBJS-$(CONFIG_SCENE_SAD) += scene_sad.o + OBJS-$(CONFIG_DNN) += dnn_filter_common.o + include $(SRC_PATH)/libavfilter/dnn/Makefile +@@ -402,6 +403,7 @@ OBJS-$(CONFIG_OVERLAY_OPENCL_FILTER) + OBJS-$(CONFIG_OVERLAY_QSV_FILTER) += vf_overlay_qsv.o framesync.o + OBJS-$(CONFIG_OVERLAY_VAAPI_FILTER) += vf_overlay_vaapi.o framesync.o vaapi_vpp.o + OBJS-$(CONFIG_OVERLAY_VULKAN_FILTER) += vf_overlay_vulkan.o vulkan.o vulkan_filter.o ++OBJS-$(CONFIG_OVERLAY_RKRGA_FILTER) += vf_overlay_rkrga.o framesync.o + OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o + OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o + OBJS-$(CONFIG_PAD_OPENCL_FILTER) += vf_pad_opencl.o opencl.o opencl/pad.o +@@ -451,6 +453,7 @@ OBJS-$(CONFIG_SCALE_OPENCL_FILTER) + OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_vpp_qsv.o + OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o + OBJS-$(CONFIG_SCALE_VULKAN_FILTER) += vf_scale_vulkan.o vulkan.o vulkan_filter.o ++OBJS-$(CONFIG_SCALE_RKRGA_FILTER) += vf_vpp_rkrga.o scale_eval.o + OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale_eval.o + OBJS-$(CONFIG_SCALE2REF_NPP_FILTER) += vf_scale_npp.o scale_eval.o + OBJS-$(CONFIG_SCDET_FILTER) += vf_scdet.o +@@ -541,6 +544,7 @@ OBJS-$(CONFIG_VIF_FILTER) + OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o + OBJS-$(CONFIG_VMAFMOTION_FILTER) += vf_vmafmotion.o framesync.o + OBJS-$(CONFIG_VPP_QSV_FILTER) += vf_vpp_qsv.o ++OBJS-$(CONFIG_VPP_RKRGA_FILTER) += vf_vpp_rkrga.o scale_eval.o + OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o + OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o + OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o +@@ -633,6 +637,7 @@ SKIPHEADERS-$(CONFIG_LCMS2) + SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h + + SKIPHEADERS-$(CONFIG_QSVVPP) += qsvvpp.h ++SKIPHEADERS-$(CONFIG_RKRGA) += rkrga_common.h + SKIPHEADERS-$(CONFIG_OPENCL) += opencl.h + SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_vpp.h + SKIPHEADERS-$(CONFIG_VULKAN) += vulkan.h vulkan_filter.h +Index: jellyfin-ffmpeg/libavfilter/allfilters.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavfilter/allfilters.c ++++ jellyfin-ffmpeg/libavfilter/allfilters.c +@@ -379,6 +379,7 @@ extern const AVFilter ff_vf_overlay_qsv; + extern const AVFilter ff_vf_overlay_vaapi; + extern const AVFilter ff_vf_overlay_vulkan; + extern const AVFilter ff_vf_overlay_cuda; ++extern const AVFilter ff_vf_overlay_rkrga; + extern const AVFilter ff_vf_owdenoise; + extern const AVFilter ff_vf_pad; + extern const AVFilter ff_vf_pad_opencl; +@@ -424,6 +425,7 @@ extern const AVFilter ff_vf_scale_opencl + extern const AVFilter ff_vf_scale_qsv; + extern const AVFilter ff_vf_scale_vaapi; + extern const AVFilter ff_vf_scale_vulkan; ++extern const AVFilter ff_vf_scale_rkrga; + extern const AVFilter ff_vf_scale2ref; + extern const AVFilter ff_vf_scale2ref_npp; + extern const AVFilter ff_vf_scdet; +@@ -509,6 +511,7 @@ extern const AVFilter ff_vf_vif; + extern const AVFilter ff_vf_vignette; + extern const AVFilter ff_vf_vmafmotion; + extern const AVFilter ff_vf_vpp_qsv; ++extern const AVFilter ff_vf_vpp_rkrga; + extern const AVFilter ff_vf_vstack; + extern const AVFilter ff_vf_w3fdif; + extern const AVFilter ff_vf_waveform; +Index: jellyfin-ffmpeg/libavfilter/rkrga_common.c +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavfilter/rkrga_common.c +@@ -0,0 +1,1234 @@ ++/* ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip RGA (2D Raster Graphic Acceleration) base function ++ */ ++ ++#include "libavutil/common.h" ++#include "libavutil/pixdesc.h" ++ ++#include "internal.h" ++#include "video.h" ++ ++#include "rkrga_common.h" ++ ++typedef struct RGAAsyncFrame { ++ RGAFrame *src; ++ RGAFrame *dst; ++ RGAFrame *pat; ++} RGAAsyncFrame; ++ ++typedef struct RGAFormatMap { ++ enum AVPixelFormat pix_fmt; ++ enum _Rga_SURF_FORMAT rga_fmt; ++} RGAFormatMap; ++ ++#define YUV_FORMATS \ ++ { AV_PIX_FMT_GRAY8, RK_FORMAT_YCbCr_400 }, /* RGA2 only */ \ ++ { AV_PIX_FMT_YUV420P, RK_FORMAT_YCbCr_420_P }, /* RGA2 only */ \ ++ { AV_PIX_FMT_YUV422P, RK_FORMAT_YCbCr_422_P }, /* RGA2 only */ \ ++ { AV_PIX_FMT_NV12, RK_FORMAT_YCbCr_420_SP }, \ ++ { AV_PIX_FMT_NV21, RK_FORMAT_YCrCb_420_SP }, \ ++ { AV_PIX_FMT_NV16, RK_FORMAT_YCbCr_422_SP }, \ ++ { AV_PIX_FMT_P010, RK_FORMAT_YCbCr_420_SP_10B }, /* RGA3 only */ \ ++ { AV_PIX_FMT_P210, RK_FORMAT_YCbCr_422_SP_10B }, /* RGA3 only */ \ ++ { AV_PIX_FMT_NV15, RK_FORMAT_YCbCr_420_SP_10B }, /* RGA2 only input, aka P010 compact */ \ ++ { AV_PIX_FMT_NV20, RK_FORMAT_YCbCr_422_SP_10B }, /* RGA2 only input, aka P210 compact */ \ ++ { AV_PIX_FMT_YUYV422, RK_FORMAT_YUYV_422 }, \ ++ { AV_PIX_FMT_YVYU422, RK_FORMAT_YVYU_422 }, \ ++ { AV_PIX_FMT_UYVY422, RK_FORMAT_UYVY_422 }, ++ ++#define RGB_FORMATS \ ++ { AV_PIX_FMT_RGB555LE, RK_FORMAT_BGRA_5551 }, /* RGA2 only */ \ ++ { AV_PIX_FMT_BGR555LE, RK_FORMAT_RGBA_5551 }, /* RGA2 only */ \ ++ { AV_PIX_FMT_RGB565LE, RK_FORMAT_BGR_565 }, \ ++ { AV_PIX_FMT_BGR565LE, RK_FORMAT_RGB_565 }, \ ++ { AV_PIX_FMT_RGB24, RK_FORMAT_RGB_888 }, \ ++ { AV_PIX_FMT_BGR24, RK_FORMAT_BGR_888 }, \ ++ { AV_PIX_FMT_RGBA, RK_FORMAT_RGBA_8888 }, \ ++ { AV_PIX_FMT_RGB0, RK_FORMAT_RGBA_8888 }, /* RK_FORMAT_RGBX_8888 triggers RGA2 on multicore RGA */ \ ++ { AV_PIX_FMT_BGRA, RK_FORMAT_BGRA_8888 }, \ ++ { AV_PIX_FMT_BGR0, RK_FORMAT_BGRA_8888 }, /* RK_FORMAT_BGRX_8888 triggers RGA2 on multicore RGA */ \ ++ { AV_PIX_FMT_ARGB, RK_FORMAT_ARGB_8888 }, /* RGA3 only input */ \ ++ { AV_PIX_FMT_0RGB, RK_FORMAT_ARGB_8888 }, /* RGA3 only input */ \ ++ { AV_PIX_FMT_ABGR, RK_FORMAT_ABGR_8888 }, /* RGA3 only input */ \ ++ { AV_PIX_FMT_0BGR, RK_FORMAT_ABGR_8888 }, /* RGA3 only input */ ++ ++static const RGAFormatMap supported_formats_main[] = { ++ YUV_FORMATS ++ RGB_FORMATS ++}; ++ ++static const RGAFormatMap supported_formats_overlay[] = { ++ RGB_FORMATS ++}; ++#undef YUV_FORMATS ++#undef RGB_FORMATS ++ ++static int map_av_to_rga_format(enum AVPixelFormat in_format, ++ enum _Rga_SURF_FORMAT *out_format, int is_overlay) ++{ ++ int i; ++ ++ if (is_overlay) ++ goto overlay; ++ ++ for (i = 0; i < FF_ARRAY_ELEMS(supported_formats_main); i++) { ++ if (supported_formats_main[i].pix_fmt == in_format) { ++ if (out_format) ++ *out_format = supported_formats_main[i].rga_fmt; ++ return 1; ++ } ++ } ++ return 0; ++ ++overlay: ++ for (i = 0; i < FF_ARRAY_ELEMS(supported_formats_overlay); i++) { ++ if (supported_formats_overlay[i].pix_fmt == in_format) { ++ if (out_format) ++ *out_format = supported_formats_overlay[i].rga_fmt; ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++static int get_pixel_stride(const AVDRMObjectDescriptor *object, ++ const AVDRMLayerDescriptor *layer, ++ int is_rgb, int is_planar, ++ float bytes_pp, int *ws, int *hs) ++{ ++ const AVDRMPlaneDescriptor *plane0, *plane1; ++ const int is_packed_fmt = is_rgb || (!is_rgb && !is_planar); ++ ++ if (!object || !layer || !ws || !hs || bytes_pp <= 0) ++ return AVERROR(EINVAL); ++ ++ plane0 = &layer->planes[0]; ++ plane1 = &layer->planes[1]; ++ ++ *ws = is_packed_fmt ? ++ (plane0->pitch / bytes_pp) : ++ plane0->pitch; ++ *hs = is_packed_fmt ? ++ ALIGN_DOWN(object->size / plane0->pitch, is_rgb ? 1 : 2) : ++ (plane1->offset / plane0->pitch); ++ ++ return (*ws > 0 && *hs > 0) ? 0 : AVERROR(EINVAL); ++} ++ ++static int get_afbc_pixel_stride(float bytes_pp, int *stride, int reverse) ++{ ++ if (!stride || *stride <= 0 || bytes_pp <= 0) ++ return AVERROR(EINVAL); ++ ++ *stride = reverse ? (*stride / bytes_pp) : (*stride * bytes_pp); ++ ++ return (*stride > 0) ? 0 : AVERROR(EINVAL); ++} ++ ++/* Canonical formats: https://dri.freedesktop.org/docs/drm/gpu/afbc.html */ ++static uint32_t get_drm_afbc_format(enum AVPixelFormat pix_fmt) ++{ ++ switch (pix_fmt) { ++ case AV_PIX_FMT_NV12: return DRM_FORMAT_YUV420_8BIT; ++ case AV_PIX_FMT_NV15: return DRM_FORMAT_YUV420_10BIT; ++ case AV_PIX_FMT_NV16: return DRM_FORMAT_YUYV; ++ case AV_PIX_FMT_NV20: return DRM_FORMAT_Y210; ++ case AV_PIX_FMT_RGB565LE: return DRM_FORMAT_RGB565; ++ case AV_PIX_FMT_BGR565LE: return DRM_FORMAT_BGR565; ++ case AV_PIX_FMT_RGB24: return DRM_FORMAT_RGB888; ++ case AV_PIX_FMT_BGR24: return DRM_FORMAT_BGR888; ++ case AV_PIX_FMT_RGBA: return DRM_FORMAT_ABGR8888; ++ case AV_PIX_FMT_RGB0: return DRM_FORMAT_XBGR8888; ++ case AV_PIX_FMT_BGRA: return DRM_FORMAT_ARGB8888; ++ case AV_PIX_FMT_BGR0: return DRM_FORMAT_XRGB8888; ++ default: return DRM_FORMAT_INVALID; ++ } ++} ++ ++static int is_pixel_stride_rga3_compat(int ws, int hs, ++ enum _Rga_SURF_FORMAT fmt) ++{ ++ switch (fmt) { ++ case RK_FORMAT_YCbCr_420_SP: ++ case RK_FORMAT_YCrCb_420_SP: ++ case RK_FORMAT_YCbCr_422_SP: return !(ws % 16) && !(hs % 2); ++ case RK_FORMAT_YCbCr_420_SP_10B: ++ case RK_FORMAT_YCbCr_422_SP_10B: return !(ws % 64) && !(hs % 2); ++ case RK_FORMAT_YUYV_422: ++ case RK_FORMAT_YVYU_422: ++ case RK_FORMAT_UYVY_422: return !(ws % 8) && !(hs % 2); ++ case RK_FORMAT_RGB_565: ++ case RK_FORMAT_BGR_565: return !(ws % 8); ++ case RK_FORMAT_RGB_888: ++ case RK_FORMAT_BGR_888: return !(ws % 16); ++ case RK_FORMAT_RGBA_8888: ++ case RK_FORMAT_BGRA_8888: ++ case RK_FORMAT_ARGB_8888: ++ case RK_FORMAT_ABGR_8888: return !(ws % 4); ++ default: return 0; ++ } ++} ++ ++static void clear_unused_frames(RGAFrame *list) ++{ ++ while (list) { ++ if (list->queued == 1 && !list->locked) { ++ av_frame_free(&list->frame); ++ list->queued = 0; ++ } ++ list = list->next; ++ } ++} ++ ++static void clear_frame_list(RGAFrame **list) ++{ ++ while (*list) { ++ RGAFrame *frame = NULL; ++ ++ frame = *list; ++ *list = (*list)->next; ++ av_frame_free(&frame->frame); ++ av_freep(&frame); ++ } ++} ++ ++static RGAFrame *get_free_frame(RGAFrame **list) ++{ ++ RGAFrame *out = *list; ++ ++ for (; out; out = out->next) { ++ if (!out->queued) { ++ out->queued = 1; ++ break; ++ } ++ } ++ ++ if (!out) { ++ out = av_mallocz(sizeof(*out)); ++ if (!out) { ++ av_log(NULL, AV_LOG_ERROR, "Cannot alloc new output frame\n"); ++ return NULL; ++ } ++ out->queued = 1; ++ out->next = *list; ++ *list = out; ++ } ++ ++ return out; ++} ++ ++static void set_colorspace_info(RGAFrameInfo *in_info, const AVFrame *in, ++ RGAFrameInfo *out_info, AVFrame *out, ++ int *color_space_mode) ++{ ++ if (!in_info || !out_info || !in || !out || !color_space_mode) ++ return; ++ ++ *color_space_mode = 0; ++ ++ /* rgb2yuv */ ++ if ((in_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB) && ++ !(out_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB)) { ++ /* rgb full -> yuv full/limit */ ++ if (in->color_range == AVCOL_RANGE_JPEG) { ++ switch (in->colorspace) { ++ case AVCOL_SPC_BT709: ++ out->colorspace = AVCOL_SPC_BT709; ++ *color_space_mode = 0xb << 8; /* rgb2yuv_709_limit */ ++ break; ++ case AVCOL_SPC_BT470BG: ++ out->colorspace = AVCOL_SPC_BT470BG; ++ *color_space_mode = 2 << 2; /* IM_RGB_TO_YUV_BT601_LIMIT */ ++ break; ++ } ++ } ++ if (*color_space_mode) { ++ out->color_trc = AVCOL_TRC_UNSPECIFIED; ++ out->color_primaries = AVCOL_PRI_UNSPECIFIED; ++ out->color_range = AVCOL_RANGE_MPEG; ++ } ++ } ++ ++ /* yuv2rgb */ ++ if (!(in_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB) && ++ (out_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB)) { ++ /* yuv full/limit -> rgb full */ ++ switch (in->color_range) { ++ case AVCOL_RANGE_MPEG: ++ if (in->colorspace == AVCOL_SPC_BT709) { ++ out->colorspace = AVCOL_SPC_BT709; ++ *color_space_mode = 3 << 0; /* IM_YUV_TO_RGB_BT709_LIMIT */ ++ } ++ if (in->colorspace == AVCOL_SPC_BT470BG) { ++ out->colorspace = AVCOL_SPC_BT470BG; ++ *color_space_mode = 1 << 0; /* IM_YUV_TO_RGB_BT601_LIMIT */ ++ } ++ break; ++ case AVCOL_RANGE_JPEG: ++#if 0 ++ if (in->colorspace == AVCOL_SPC_BT709) { ++ out->colorspace = AVCOL_SPC_BT709; ++ *color_space_mode = 0xc << 8; /* yuv2rgb_709_full */ ++ } ++#endif ++ if (in->colorspace == AVCOL_SPC_BT470BG) { ++ out->colorspace = AVCOL_SPC_BT470BG; ++ *color_space_mode = 2 << 0; /* IM_YUV_TO_RGB_BT601_FULL */ ++ } ++ break; ++ } ++ if (*color_space_mode) { ++ out->color_trc = AVCOL_TRC_UNSPECIFIED; ++ out->color_primaries = AVCOL_PRI_UNSPECIFIED; ++ out->color_range = AVCOL_RANGE_JPEG; ++ } ++ } ++} ++ ++static int verify_rga_frame_info_io_dynamic(AVFilterContext *avctx, ++ RGAFrameInfo *in, RGAFrameInfo *out) ++{ ++ RKRGAContext *r = avctx->priv; ++ ++ if (!in || !out) ++ return AVERROR(EINVAL); ++ ++ if (r->is_rga2_used && !r->has_rga2) { ++ av_log(avctx, AV_LOG_ERROR, "RGA2 is requested but not available\n"); ++ return AVERROR(ENOSYS); ++ } ++ if (r->is_rga2_used && ++ (in->pix_fmt == AV_PIX_FMT_P010 || ++ out->pix_fmt == AV_PIX_FMT_P010)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' is not supported if RGA2 is requested\n", ++ av_get_pix_fmt_name(AV_PIX_FMT_P010)); ++ return AVERROR(ENOSYS); ++ } ++ if (r->is_rga2_used && ++ (in->pix_fmt == AV_PIX_FMT_P210 || ++ out->pix_fmt == AV_PIX_FMT_P210)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' is not supported if RGA2 is requested\n", ++ av_get_pix_fmt_name(AV_PIX_FMT_P210)); ++ return AVERROR(ENOSYS); ++ } ++ if (r->is_rga2_used && ++ (out->pix_fmt == AV_PIX_FMT_NV15 || ++ out->pix_fmt == AV_PIX_FMT_NV20)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' as output is not supported if RGA2 is requested\n", ++ av_get_pix_fmt_name(out->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ if (r->is_rga2_used && in->crop && in->pix_desc->comp[0].depth >= 10) { ++ av_log(avctx, AV_LOG_ERROR, "Cropping 10-bit '%s' input is not supported if RGA2 is requested\n", ++ av_get_pix_fmt_name(in->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ if (r->is_rga2_used && ++ (out->act_w > 4096 || out->act_h > 4096)) { ++ av_log(avctx, AV_LOG_ERROR, "Max supported output size of RGA2 is 4096x4096\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ return 0; ++} ++ ++static RGAFrame *submit_frame(RKRGAContext *r, AVFilterLink *inlink, ++ AVFrame *picref, int do_overlay, int pat_preproc) ++{ ++ RGAFrame *rga_frame; ++ AVFilterContext *ctx = inlink->dst; ++ rga_info_t info = { .mmuFlag = 1, }; ++ int nb_link = FF_INLINK_IDX(inlink); ++ RGAFrameInfo *in_info = &r->in_rga_frame_infos[nb_link]; ++ RGAFrameInfo *out_info = &r->out_rga_frame_info; ++ int w_stride = 0, h_stride = 0; ++ const AVDRMFrameDescriptor *desc; ++ const AVDRMLayerDescriptor *layer; ++ const AVDRMPlaneDescriptor *plane0; ++ RGAFrame **frame_list = NULL; ++ int ret, is_afbc = 0; ++ ++ if (pat_preproc && !nb_link) ++ return NULL; ++ ++ frame_list = nb_link ? ++ (pat_preproc ? &r->pat_preproc_frame_list : &r->pat_frame_list) : &r->src_frame_list; ++ ++ clear_unused_frames(*frame_list); ++ ++ rga_frame = get_free_frame(frame_list); ++ if (!rga_frame) ++ return NULL; ++ ++ if (picref->format != AV_PIX_FMT_DRM_PRIME) { ++ av_log(ctx, AV_LOG_ERROR, "RGA gets a wrong frame\n"); ++ return NULL; ++ } ++ rga_frame->frame = av_frame_clone(picref); ++ ++ desc = (AVDRMFrameDescriptor *)rga_frame->frame->data[0]; ++ if (desc->objects[0].fd < 0) ++ return NULL; ++ ++ is_afbc = drm_is_afbc(desc->objects[0].format_modifier); ++ if (!is_afbc) { ++ ret = get_pixel_stride(&desc->objects[0], ++ &desc->layers[0], ++ (in_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB), ++ (in_info->pix_desc->flags & AV_PIX_FMT_FLAG_PLANAR), ++ in_info->bytes_pp, &w_stride, &h_stride); ++ if (ret < 0 || !w_stride || !h_stride) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to get frame strides\n"); ++ return NULL; ++ } ++ } ++ ++ info.fd = desc->objects[0].fd; ++ info.format = in_info->rga_fmt; ++ info.in_fence_fd = -1; ++ info.out_fence_fd = -1; ++ ++ if (in_info->uncompact_10b_msb) ++ info.is_10b_compact = info.is_10b_endian = 1; ++ ++ if (!nb_link) { ++ info.rotation = in_info->rotate_mode; ++ info.blend = (do_overlay && !pat_preproc) ? in_info->blend_mode : 0; ++ } ++ ++ if (is_afbc && (r->is_rga2_used || out_info->scheduler_core == 0x4)) { ++ av_log(ctx, AV_LOG_ERROR, "Input format '%s' with AFBC modifier is not supported by RGA2\n", ++ av_get_pix_fmt_name(in_info->pix_fmt)); ++ return NULL; ++ } ++ ++ /* verify inputs pixel stride */ ++ if (out_info->scheduler_core > 0 && ++ out_info->scheduler_core == (out_info->scheduler_core & 0x3)) { ++ if (!is_afbc && !is_pixel_stride_rga3_compat(w_stride, h_stride, in_info->rga_fmt)) { ++ r->is_rga2_used = 1; ++ av_log(ctx, AV_LOG_WARNING, "Input pixel stride (%dx%d) format '%s' is not supported by RGA3\n", ++ w_stride, h_stride, av_get_pix_fmt_name(in_info->pix_fmt)); ++ } ++ ++ if ((ret = verify_rga_frame_info_io_dynamic(ctx, in_info, out_info)) < 0) ++ return NULL; ++ ++ if (r->is_rga2_used) ++ out_info->scheduler_core = 0x4; ++ } ++ ++ if (pat_preproc) { ++ RGAFrameInfo *in0_info = &r->in_rga_frame_infos[0]; ++ rga_set_rect(&info.rect, 0, 0, ++ FFMIN((in0_info->act_w - in_info->overlay_x), in_info->act_w), ++ FFMIN((in0_info->act_h - in_info->overlay_y), in_info->act_h), ++ w_stride, h_stride, in_info->rga_fmt); ++ } else ++ rga_set_rect(&info.rect, in_info->act_x, in_info->act_y, ++ in_info->act_w, in_info->act_h, ++ w_stride, h_stride, in_info->rga_fmt); ++ ++ if (is_afbc) { ++ int afbc_offset_y = 0; ++ uint32_t drm_afbc_fmt = get_drm_afbc_format(in_info->pix_fmt); ++ ++ if (rga_frame->frame->crop_top > 0) { ++ afbc_offset_y = rga_frame->frame->crop_top; ++ info.rect.yoffset += afbc_offset_y; ++ } ++ ++ layer = &desc->layers[0]; ++ plane0 = &layer->planes[0]; ++ if (drm_afbc_fmt == layer->format) { ++ info.rect.wstride = plane0->pitch; ++ if ((ret = get_afbc_pixel_stride(in_info->bytes_pp, &info.rect.wstride, 1)) < 0) ++ return NULL; ++ ++ if (info.rect.wstride % RK_RGA_AFBC_STRIDE_ALIGN) ++ info.rect.wstride = FFALIGN(inlink->w, RK_RGA_AFBC_STRIDE_ALIGN); ++ ++ info.rect.hstride = FFALIGN(inlink->h + afbc_offset_y, RK_RGA_AFBC_STRIDE_ALIGN); ++ } else { ++ av_log(ctx, AV_LOG_ERROR, "Input format '%s' with AFBC modifier is not supported\n", ++ av_get_pix_fmt_name(in_info->pix_fmt)); ++ return NULL; ++ } ++ ++ info.rd_mode = 1 << 1; /* IM_FBC_MODE */ ++ } ++ ++ rga_frame->info = info; ++ ++ return rga_frame; ++} ++ ++static RGAFrame *query_frame(RKRGAContext *r, AVFilterLink *outlink, ++ const AVFrame *in, int pat_preproc) ++{ ++ AVFilterContext *ctx = outlink->src; ++ AVFilterLink *inlink = ctx->inputs[0]; ++ RGAFrame *out_frame; ++ rga_info_t info = { .mmuFlag = 1, }; ++ RGAFrameInfo *in0_info = &r->in_rga_frame_infos[0]; ++ RGAFrameInfo *in1_info = ctx->nb_inputs > 1 ? &r->in_rga_frame_infos[1] : NULL; ++ RGAFrameInfo *out_info = pat_preproc ? in1_info : &r->out_rga_frame_info; ++ AVBufferRef *hw_frame_ctx = pat_preproc ? r->pat_preproc_hwframes_ctx : outlink->hw_frames_ctx; ++ int w_stride = 0, h_stride = 0; ++ AVDRMFrameDescriptor *desc; ++ AVDRMLayerDescriptor *layer; ++ RGAFrame **frame_list = NULL; ++ int ret, is_afbc = 0; ++ ++ if (!out_info || !hw_frame_ctx) ++ return NULL; ++ ++ frame_list = pat_preproc ? &r->pat_frame_list : &r->dst_frame_list; ++ ++ clear_unused_frames(*frame_list); ++ ++ out_frame = get_free_frame(frame_list); ++ if (!out_frame) ++ return NULL; ++ ++ out_frame->frame = av_frame_alloc(); ++ if (!out_frame->frame) ++ return NULL; ++ ++ if (in && (ret = av_frame_copy_props(out_frame->frame, in)) < 0) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to copy metadata fields from in to out: %d\n", ret); ++ goto fail; ++ } ++ out_frame->frame->crop_top = 0; ++ ++ if ((ret = av_hwframe_get_buffer(hw_frame_ctx, out_frame->frame, 0)) < 0) { ++ av_log(ctx, AV_LOG_ERROR, "Cannot allocate an internal frame: %d\n", ret); ++ goto fail; ++ } ++ ++ desc = (AVDRMFrameDescriptor *)out_frame->frame->data[0]; ++ if (desc->objects[0].fd < 0) ++ goto fail; ++ ++ if (r->is_rga2_used || out_info->scheduler_core == 0x4) { ++ if (pat_preproc && (info.rect.width > 4096 || info.rect.height > 4096)) { ++ av_log(ctx, AV_LOG_ERROR, "Max supported output size of RGA2 is 4096x4096\n"); ++ goto fail; ++ } ++ if (r->afbc_out && !pat_preproc) { ++ av_log(ctx, AV_LOG_WARNING, "Output format '%s' with AFBC modifier is not supported by RGA2\n", ++ av_get_pix_fmt_name(out_info->pix_fmt)); ++ r->afbc_out = 0; ++ } ++ } ++ ++ is_afbc = r->afbc_out && !pat_preproc; ++ ret = get_pixel_stride(&desc->objects[0], ++ &desc->layers[0], ++ (out_info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB), ++ (out_info->pix_desc->flags & AV_PIX_FMT_FLAG_PLANAR), ++ out_info->bytes_pp, &w_stride, &h_stride); ++ if (!is_afbc && (ret < 0 || !w_stride || !h_stride)) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to get frame strides\n"); ++ goto fail; ++ } ++ ++ info.fd = desc->objects[0].fd; ++ info.format = out_info->rga_fmt; ++ info.core = out_info->scheduler_core; ++ info.in_fence_fd = -1; ++ info.out_fence_fd = -1; ++ info.sync_mode = RGA_BLIT_ASYNC; ++ ++ if (out_info->uncompact_10b_msb) ++ info.is_10b_compact = info.is_10b_endian = 1; ++ ++ if (!pat_preproc) ++ set_colorspace_info(in0_info, in, out_info, out_frame->frame, &info.color_space_mode); ++ ++ if (pat_preproc) ++ rga_set_rect(&info.rect, in1_info->overlay_x, in1_info->overlay_y, ++ FFMIN((in0_info->act_w - in1_info->overlay_x), in1_info->act_w), ++ FFMIN((in0_info->act_h - in1_info->overlay_y), in1_info->act_h), ++ w_stride, h_stride, in1_info->rga_fmt); ++ else ++ rga_set_rect(&info.rect, out_info->act_x, out_info->act_y, ++ out_info->act_w, out_info->act_h, ++ w_stride, h_stride, out_info->rga_fmt); ++ ++ if (is_afbc) { ++ uint32_t drm_afbc_fmt = get_drm_afbc_format(out_info->pix_fmt); ++ ++ if (drm_afbc_fmt == DRM_FORMAT_INVALID) { ++ av_log(ctx, AV_LOG_WARNING, "Output format '%s' with AFBC modifier is not supported\n", ++ av_get_pix_fmt_name(out_info->pix_fmt)); ++ r->afbc_out = 0; ++ goto exit; ++ } ++ ++ w_stride = FFALIGN(pat_preproc ? inlink->w : outlink->w, RK_RGA_AFBC_STRIDE_ALIGN); ++ h_stride = FFALIGN(pat_preproc ? inlink->h : outlink->h, RK_RGA_AFBC_STRIDE_ALIGN); ++ ++ if ((info.rect.format == RK_FORMAT_YCbCr_420_SP_10B || ++ info.rect.format == RK_FORMAT_YCbCr_422_SP_10B) && (w_stride % 64)) { ++ av_log(ctx, AV_LOG_WARNING, "Output pixel wstride '%d' format '%s' is not supported by RGA3 AFBC\n", ++ w_stride, av_get_pix_fmt_name(out_info->pix_fmt)); ++ r->afbc_out = 0; ++ goto exit; ++ } ++ ++ /* Inverted RGB/BGR order in FBCE */ ++ switch (info.rect.format) { ++ case RK_FORMAT_RGBA_8888: ++ info.rect.format = RK_FORMAT_BGRA_8888; ++ break; ++ case RK_FORMAT_BGRA_8888: ++ info.rect.format = RK_FORMAT_RGBA_8888; ++ break; ++ } ++ ++ info.rect.wstride = w_stride; ++ info.rect.hstride = h_stride; ++ info.rd_mode = 1 << 1; /* IM_FBC_MODE */ ++ ++ desc->objects[0].format_modifier = ++ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_BLOCK_SIZE_16x16); ++ ++ layer = &desc->layers[0]; ++ layer->format = drm_afbc_fmt; ++ layer->nb_planes = 1; ++ ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = info.rect.wstride; ++ ++ if ((ret = get_afbc_pixel_stride(out_info->bytes_pp, (int *)&layer->planes[0].pitch, 0)) < 0) ++ goto fail; ++ } ++ ++exit: ++ out_frame->info = info; ++ ++ return out_frame; ++ ++fail: ++ if (out_frame && out_frame->frame) ++ av_frame_free(&out_frame->frame); ++ ++ return NULL; ++} ++ ++static av_cold int init_hwframes_ctx(AVFilterContext *avctx) ++{ ++ RKRGAContext *r = avctx->priv; ++ AVFilterLink *inlink = avctx->inputs[0]; ++ AVFilterLink *outlink = avctx->outputs[0]; ++ AVHWFramesContext *hwfc_in; ++ AVHWFramesContext *hwfc_out; ++ AVBufferRef *hwfc_out_ref; ++ int ret; ++ ++ if (!inlink->hw_frames_ctx) ++ return AVERROR(EINVAL); ++ ++ hwfc_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data; ++ hwfc_out_ref = av_hwframe_ctx_alloc(hwfc_in->device_ref); ++ if (!hwfc_out_ref) ++ return AVERROR(ENOMEM); ++ ++ hwfc_out = (AVHWFramesContext *)hwfc_out_ref->data; ++ hwfc_out->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc_out->sw_format = r->out_sw_format; ++ hwfc_out->width = outlink->w; ++ hwfc_out->height = outlink->h; ++ ++ ret = av_hwframe_ctx_init(hwfc_out_ref); ++ if (ret < 0) { ++ av_buffer_unref(&hwfc_out_ref); ++ av_log(avctx, AV_LOG_ERROR, "Error creating frames_ctx for output pad: %d\n", ret); ++ return ret; ++ } ++ ++ av_buffer_unref(&outlink->hw_frames_ctx); ++ outlink->hw_frames_ctx = hwfc_out_ref; ++ ++ return 0; ++} ++ ++static av_cold int init_pat_preproc_hwframes_ctx(AVFilterContext *avctx) ++{ ++ RKRGAContext *r = avctx->priv; ++ AVFilterLink *inlink0 = avctx->inputs[0]; ++ AVFilterLink *inlink1 = avctx->inputs[1]; ++ AVHWFramesContext *hwfc_in0, *hwfc_in1; ++ AVHWFramesContext *hwfc_pat; ++ AVBufferRef *hwfc_pat_ref; ++ int ret; ++ ++ if (!inlink0->hw_frames_ctx || !inlink1->hw_frames_ctx) ++ return AVERROR(EINVAL); ++ ++ hwfc_in0 = (AVHWFramesContext *)inlink0->hw_frames_ctx->data; ++ hwfc_in1 = (AVHWFramesContext *)inlink1->hw_frames_ctx->data; ++ hwfc_pat_ref = av_hwframe_ctx_alloc(hwfc_in0->device_ref); ++ if (!hwfc_pat_ref) ++ return AVERROR(ENOMEM); ++ ++ hwfc_pat = (AVHWFramesContext *)hwfc_pat_ref->data; ++ hwfc_pat->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc_pat->sw_format = hwfc_in1->sw_format; ++ hwfc_pat->width = inlink0->w; ++ hwfc_pat->height = inlink0->h; ++ ++ ret = av_hwframe_ctx_init(hwfc_pat_ref); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Error creating frames_ctx for pat preproc: %d\n", ret); ++ av_buffer_unref(&hwfc_pat_ref); ++ return ret; ++ } ++ ++ av_buffer_unref(&r->pat_preproc_hwframes_ctx); ++ r->pat_preproc_hwframes_ctx = hwfc_pat_ref; ++ ++ return 0; ++} ++ ++static av_cold int verify_rga_frame_info(AVFilterContext *avctx, ++ RGAFrameInfo *src, RGAFrameInfo *dst, RGAFrameInfo *pat) ++{ ++ RKRGAContext *r = avctx->priv; ++ float scale_ratio_min, scale_ratio_max; ++ float scale_ratio_w, scale_ratio_h; ++ int ret; ++ ++ if (!src || !dst) ++ return AVERROR(EINVAL); ++ ++ scale_ratio_w = (float)dst->act_w / (float)src->act_w; ++ scale_ratio_h = (float)dst->act_h / (float)src->act_h; ++ ++ /* P010 requires RGA3 */ ++ if (!r->has_rga3 && ++ (src->pix_fmt == AV_PIX_FMT_P010 || ++ dst->pix_fmt == AV_PIX_FMT_P010)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' is only supported by RGA3\n", ++ av_get_pix_fmt_name(AV_PIX_FMT_P010)); ++ return AVERROR(ENOSYS); ++ } ++ /* P210 requires RGA3 */ ++ if (!r->has_rga3 && ++ (src->pix_fmt == AV_PIX_FMT_P210 || ++ dst->pix_fmt == AV_PIX_FMT_P210)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' is only supported by RGA3\n", ++ av_get_pix_fmt_name(AV_PIX_FMT_P210)); ++ return AVERROR(ENOSYS); ++ } ++ /* Input formats that requires RGA2 */ ++ if (!r->has_rga2 && ++ (src->pix_fmt == AV_PIX_FMT_GRAY8 || ++ src->pix_fmt == AV_PIX_FMT_YUV420P || ++ src->pix_fmt == AV_PIX_FMT_YUV422P || ++ src->pix_fmt == AV_PIX_FMT_RGB555LE || ++ src->pix_fmt == AV_PIX_FMT_BGR555LE)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' as input is only supported by RGA2\n", ++ av_get_pix_fmt_name(src->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ /* Output formats that requires RGA2 */ ++ if (!r->has_rga2 && ++ (dst->pix_fmt == AV_PIX_FMT_GRAY8 || ++ dst->pix_fmt == AV_PIX_FMT_YUV420P || ++ dst->pix_fmt == AV_PIX_FMT_YUV422P || ++ dst->pix_fmt == AV_PIX_FMT_RGB555LE || ++ dst->pix_fmt == AV_PIX_FMT_BGR555LE || ++ dst->pix_fmt == AV_PIX_FMT_ARGB || ++ dst->pix_fmt == AV_PIX_FMT_0RGB || ++ dst->pix_fmt == AV_PIX_FMT_ABGR || ++ dst->pix_fmt == AV_PIX_FMT_0BGR)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' as output is only supported by RGA2\n", ++ av_get_pix_fmt_name(dst->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ /* P010/P210 requires RGA3 but it can't handle certain formats */ ++ if ((src->pix_fmt == AV_PIX_FMT_P010 || ++ src->pix_fmt == AV_PIX_FMT_P210) && ++ (dst->pix_fmt == AV_PIX_FMT_GRAY8 || ++ dst->pix_fmt == AV_PIX_FMT_YUV420P || ++ dst->pix_fmt == AV_PIX_FMT_YUV422P || ++ dst->pix_fmt == AV_PIX_FMT_RGB555LE || ++ dst->pix_fmt == AV_PIX_FMT_BGR555LE || ++ dst->pix_fmt == AV_PIX_FMT_ARGB || ++ dst->pix_fmt == AV_PIX_FMT_0RGB || ++ dst->pix_fmt == AV_PIX_FMT_ABGR || ++ dst->pix_fmt == AV_PIX_FMT_0BGR)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' to '%s' is not supported\n", ++ av_get_pix_fmt_name(src->pix_fmt), ++ av_get_pix_fmt_name(dst->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ /* RGA3 only format to RGA2 only format is not supported */ ++ if ((dst->pix_fmt == AV_PIX_FMT_P010 || ++ dst->pix_fmt == AV_PIX_FMT_P210) && ++ (src->pix_fmt == AV_PIX_FMT_GRAY8 || ++ src->pix_fmt == AV_PIX_FMT_YUV420P || ++ src->pix_fmt == AV_PIX_FMT_YUV422P || ++ src->pix_fmt == AV_PIX_FMT_RGB555LE || ++ src->pix_fmt == AV_PIX_FMT_BGR555LE)) { ++ av_log(avctx, AV_LOG_ERROR, "'%s' to '%s' is not supported\n", ++ av_get_pix_fmt_name(src->pix_fmt), ++ av_get_pix_fmt_name(dst->pix_fmt)); ++ return AVERROR(ENOSYS); ++ } ++ ++ if (src->pix_fmt == AV_PIX_FMT_GRAY8 || ++ src->pix_fmt == AV_PIX_FMT_YUV420P || ++ src->pix_fmt == AV_PIX_FMT_YUV422P || ++ src->pix_fmt == AV_PIX_FMT_RGB555LE || ++ src->pix_fmt == AV_PIX_FMT_BGR555LE || ++ dst->pix_fmt == AV_PIX_FMT_GRAY8 || ++ dst->pix_fmt == AV_PIX_FMT_YUV420P || ++ dst->pix_fmt == AV_PIX_FMT_YUV422P || ++ dst->pix_fmt == AV_PIX_FMT_RGB555LE || ++ dst->pix_fmt == AV_PIX_FMT_BGR555LE || ++ dst->pix_fmt == AV_PIX_FMT_ARGB || ++ dst->pix_fmt == AV_PIX_FMT_0RGB || ++ dst->pix_fmt == AV_PIX_FMT_ABGR || ++ dst->pix_fmt == AV_PIX_FMT_0BGR) { ++ r->is_rga2_used = 1; ++ } ++ ++ r->is_rga2_used = r->is_rga2_used || !r->has_rga3; ++ if (r->has_rga3) { ++ if (scale_ratio_w < 0.125f || ++ scale_ratio_w > 8.0f || ++ scale_ratio_h < 0.125f || ++ scale_ratio_h > 8.0f) { ++ r->is_rga2_used = 1; ++ } ++ if (src->act_w < 68 || ++ src->act_w > 8176 || ++ src->act_h > 8176 || ++ dst->act_w < 68) { ++ r->is_rga2_used = 1; ++ } ++ if (pat && (pat->act_w < 68 || ++ pat->act_w > 8176 || ++ pat->act_h > 8176)) { ++ r->is_rga2_used = 1; ++ } ++ } ++ ++ if ((ret = verify_rga_frame_info_io_dynamic(avctx, src, dst)) < 0) ++ return ret; ++ ++ if (r->is_rga2_used) ++ r->scheduler_core = 0x4; ++ ++ /* Prioritize RGA3 on multicore RGA hw to avoid dma32 & algorithm quirks as much as possible */ ++ if (r->has_rga3 && r->has_rga2e && !r->is_rga2_used && ++ (r->scheduler_core == 0 || avctx->nb_inputs > 1 || ++ scale_ratio_w != 1.0f || scale_ratio_h != 1.0f || ++ src->crop || src->uncompact_10b_msb || dst->uncompact_10b_msb)) { ++ r->scheduler_core = 0x3; ++ } ++ ++ scale_ratio_max = 16.0f; ++ if ((r->is_rga2_used && r->has_rga2l) || ++ (!r->is_rga2_used && r->has_rga3 && !r->has_rga2) || ++ (r->scheduler_core > 0 && r->scheduler_core == (r->scheduler_core & 0x3))) { ++ scale_ratio_max = 8.0f; ++ } ++ scale_ratio_min = 1.0f / scale_ratio_max; ++ ++ if (scale_ratio_w < scale_ratio_min || scale_ratio_w > scale_ratio_max || ++ scale_ratio_h < scale_ratio_min || scale_ratio_h > scale_ratio_max) { ++ av_log(avctx, AV_LOG_ERROR, "RGA scale ratio (%.04fx%.04f) exceeds %.04f ~ %.04f.\n", ++ scale_ratio_w, scale_ratio_h, scale_ratio_min, scale_ratio_max); ++ return AVERROR(EINVAL); ++ } ++ ++ return 0; ++} ++ ++static av_cold int fill_rga_frame_info_by_link(AVFilterContext *avctx, ++ RGAFrameInfo *info, ++ AVFilterLink *link, ++ int nb_link, int is_inlink) ++{ ++ AVHWFramesContext *hwfc; ++ RKRGAContext *r = avctx->priv; ++ ++ if (!link->hw_frames_ctx || link->format != AV_PIX_FMT_DRM_PRIME) ++ return AVERROR(EINVAL); ++ ++ hwfc = (AVHWFramesContext *)link->hw_frames_ctx->data; ++ ++ if (!map_av_to_rga_format(hwfc->sw_format, &info->rga_fmt, (is_inlink && nb_link > 0))) { ++ av_log(avctx, AV_LOG_ERROR, "Unsupported '%s' pad %d format: '%s'\n", ++ (is_inlink ? "input" : "output"), nb_link, ++ av_get_pix_fmt_name(hwfc->sw_format)); ++ return AVERROR(ENOSYS); ++ } ++ ++ info->pix_fmt = hwfc->sw_format; ++ info->pix_desc = av_pix_fmt_desc_get(info->pix_fmt); ++ info->bytes_pp = av_get_padded_bits_per_pixel(info->pix_desc) / 8.0f; ++ ++ info->act_x = 0; ++ info->act_y = 0; ++ info->act_w = link->w; ++ info->act_h = link->h; ++ ++ /* The w/h of RGA YUV image needs to be 2 aligned */ ++ if (!(info->pix_desc->flags & AV_PIX_FMT_FLAG_RGB)) { ++ info->act_w = ALIGN_DOWN(info->act_w, RK_RGA_YUV_ALIGN); ++ info->act_h = ALIGN_DOWN(info->act_h, RK_RGA_YUV_ALIGN); ++ } ++ ++ info->uncompact_10b_msb = info->pix_fmt == AV_PIX_FMT_P010 || ++ info->pix_fmt == AV_PIX_FMT_P210; ++ ++ if (link->w * link->h > (3840 * 2160 * 3)) ++ r->async_depth = FFMIN(r->async_depth, 1); ++ ++ return 0; ++} ++ ++av_cold int ff_rkrga_init(AVFilterContext *avctx, RKRGAParam *param) ++{ ++ RKRGAContext *r = avctx->priv; ++ int i, ret; ++ const char *rga_ver = querystring(RGA_VERSION); ++ ++ r->got_frame = 0; ++ ++ r->has_rga2 = !!strstr(rga_ver, "RGA_2"); ++ r->has_rga2l = !!strstr(rga_ver, "RGA_2_lite"); ++ r->has_rga2e = !!strstr(rga_ver, "RGA_2_Enhance"); ++ r->has_rga3 = !!strstr(rga_ver, "RGA_3"); ++ ++ if (!(r->has_rga2 || r->has_rga3)) { ++ av_log(avctx, AV_LOG_ERROR, "No RGA2/RGA3 hw available\n"); ++ return AVERROR(ENOSYS); ++ } ++ ++ /* RGA core */ ++ if (r->scheduler_core && !(r->has_rga2 && r->has_rga3)) { ++ av_log(avctx, AV_LOG_WARNING, "Scheduler core cannot be set on non-multicore RGA hw, ignoring\n"); ++ r->scheduler_core = 0; ++ } ++ if (r->scheduler_core && r->scheduler_core != (r->scheduler_core & 0x7)) { ++ av_log(avctx, AV_LOG_WARNING, "Invalid scheduler core set, ignoring\n"); ++ r->scheduler_core = 0; ++ } ++ if (r->scheduler_core && r->scheduler_core == (r->scheduler_core & 0x3)) ++ r->has_rga2 = r->has_rga2l = r->has_rga2e = 0; ++ if (r->scheduler_core == 0x4) ++ r->has_rga3 = 0; ++ ++ r->filter_frame = param->filter_frame; ++ if (!r->filter_frame) ++ r->filter_frame = ff_filter_frame; ++ r->out_sw_format = param->out_sw_format; ++ ++ /* OUT hwfc */ ++ ret = init_hwframes_ctx(avctx); ++ if (ret < 0) ++ goto fail; ++ ++ /* IN RGAFrameInfo */ ++ r->in_rga_frame_infos = av_calloc(avctx->nb_inputs, sizeof(*r->in_rga_frame_infos)); ++ if (!r->in_rga_frame_infos) { ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ for (i = 0; i < avctx->nb_inputs; i++) { ++ ret = fill_rga_frame_info_by_link(avctx, &r->in_rga_frame_infos[i], avctx->inputs[i], i, 1); ++ if (ret < 0) ++ goto fail; ++ } ++ if (avctx->nb_inputs == 1) { ++ r->in_rga_frame_infos[0].rotate_mode = param->in_rotate_mode; ++ ++ if (param->in_crop) { ++ /* The x/y/w/h of RGA YUV image needs to be 2 aligned */ ++ if (!(r->in_rga_frame_infos[0].pix_desc->flags & AV_PIX_FMT_FLAG_RGB)) { ++ param->in_crop_x = ALIGN_DOWN(param->in_crop_x, RK_RGA_YUV_ALIGN); ++ param->in_crop_y = ALIGN_DOWN(param->in_crop_y, RK_RGA_YUV_ALIGN); ++ param->in_crop_w = ALIGN_DOWN(param->in_crop_w, RK_RGA_YUV_ALIGN); ++ param->in_crop_h = ALIGN_DOWN(param->in_crop_h, RK_RGA_YUV_ALIGN); ++ } ++ r->in_rga_frame_infos[0].crop = 1; ++ r->in_rga_frame_infos[0].act_x = param->in_crop_x; ++ r->in_rga_frame_infos[0].act_y = param->in_crop_y; ++ r->in_rga_frame_infos[0].act_w = param->in_crop_w; ++ r->in_rga_frame_infos[0].act_h = param->in_crop_h; ++ } ++ } ++ if (avctx->nb_inputs > 1) { ++ const int premultiplied_alpha = r->in_rga_frame_infos[1].pix_desc->flags & AV_PIX_FMT_FLAG_ALPHA; ++ ++ /* IM_ALPHA_BLEND_DST_OVER */ ++ if (param->in_global_alpha > 0 && param->in_global_alpha < 0xff) { ++ r->in_rga_frame_infos[0].blend_mode = premultiplied_alpha ? (0x4 | (1 << 12)) : 0x4; ++ r->in_rga_frame_infos[0].blend_mode |= (param->in_global_alpha & 0xff) << 16; /* fg_global_alpha */ ++ r->in_rga_frame_infos[0].blend_mode |= 0xff << 24; /* bg_global_alpha */ ++ } else ++ r->in_rga_frame_infos[0].blend_mode = premultiplied_alpha ? 0x504 : 0x501; ++ ++ r->in_rga_frame_infos[1].overlay_x = FFMAX(param->overlay_x, 0); ++ r->in_rga_frame_infos[1].overlay_y = FFMAX(param->overlay_y, 0); ++ ++ r->is_overlay_offset_valid = (param->overlay_x < r->in_rga_frame_infos[0].act_w - 2) && ++ (param->overlay_y < r->in_rga_frame_infos[0].act_h - 2); ++ if (r->is_overlay_offset_valid) ++ init_pat_preproc_hwframes_ctx(avctx); ++ } ++ ++ /* OUT RGAFrameInfo */ ++ ret = fill_rga_frame_info_by_link(avctx, &r->out_rga_frame_info, avctx->outputs[0], 0, 0); ++ if (ret < 0) ++ goto fail; ++ ++ /* Pre-check RGAFrameInfo */ ++ ret = verify_rga_frame_info(avctx, &r->in_rga_frame_infos[0], ++ &r->out_rga_frame_info, ++ (avctx->nb_inputs > 1 ? &r->in_rga_frame_infos[1] : NULL)); ++ if (ret < 0) ++ goto fail; ++ ++ r->out_rga_frame_info.scheduler_core = r->scheduler_core; ++ ++ /* keep fifo size at least 1. Even when async_depth is 0, fifo is used. */ ++ r->async_fifo = av_fifo_alloc2(r->async_depth + 1, sizeof(RGAAsyncFrame), 0); ++ if (!r->async_fifo) { ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ ++ return 0; ++ ++fail: ++ ff_rkrga_close(avctx); ++ return ret; ++} ++ ++static void set_rga_async_frame_lock_status(RGAAsyncFrame *frame, int lock) ++{ ++ int status = !!lock; ++ ++ if (!frame) ++ return; ++ ++ if (frame->src) ++ frame->src->locked = status; ++ if (frame->dst) ++ frame->dst->locked = status; ++ if (frame->pat) ++ frame->pat->locked = status; ++} ++ ++av_cold int ff_rkrga_close(AVFilterContext *avctx) ++{ ++ RKRGAContext *r = avctx->priv; ++ ++ clear_frame_list(&r->src_frame_list); ++ clear_frame_list(&r->dst_frame_list); ++ clear_frame_list(&r->pat_frame_list); ++ ++ clear_frame_list(&r->pat_preproc_frame_list); ++ ++ av_fifo_freep2(&r->async_fifo); ++ ++ av_buffer_unref(&r->pat_preproc_hwframes_ctx); ++ ++ return 0; ++} ++ ++static int call_rkrga_blit(AVFilterContext *avctx, ++ rga_info_t *src_info, ++ rga_info_t *dst_info, ++ rga_info_t *pat_info) ++{ ++ int ret; ++ ++ if (!src_info || !dst_info) ++ return AVERROR(EINVAL); ++ ++#define PRINT_RGA_INFO(ctx, info, name) do { \ ++ if (info && name) \ ++ av_log(ctx, AV_LOG_DEBUG, "RGA %s | fd:%d mmu:%d rd_mode:%d | x:%d y:%d w:%d h:%d ws:%d hs:%d fmt:0x%x\n", \ ++ name, info->fd, info->mmuFlag, (info->rd_mode >> 1), info->rect.xoffset, info->rect.yoffset, \ ++ info->rect.width, info->rect.height, info->rect.wstride, info->rect.hstride, (info->rect.format >> 8)); \ ++} while (0) ++ ++ PRINT_RGA_INFO(avctx, src_info, "src"); ++ PRINT_RGA_INFO(avctx, dst_info, "dst"); ++ PRINT_RGA_INFO(avctx, pat_info, "pat"); ++#undef PRINT_RGA_INFO ++ ++ if ((ret = c_RkRgaBlit(src_info, dst_info, pat_info)) != 0) { ++ av_log(avctx, AV_LOG_ERROR, "RGA blit failed: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ if (dst_info->sync_mode == RGA_BLIT_ASYNC && ++ dst_info->out_fence_fd <= 0) { ++ av_log(avctx, AV_LOG_ERROR, "RGA async blit returned invalid fence_fd: %d\n", ++ dst_info->out_fence_fd); ++ return AVERROR_EXTERNAL; ++ } ++ ++ return 0; ++} ++ ++int ff_rkrga_filter_frame(RKRGAContext *r, ++ AVFilterLink *inlink_src, AVFrame *picref_src, ++ AVFilterLink *inlink_pat, AVFrame *picref_pat) ++{ ++ AVFilterContext *ctx = inlink_src->dst; ++ AVFilterLink *outlink = ctx->outputs[0]; ++ RGAAsyncFrame aframe; ++ RGAFrame *src_frame = NULL; ++ RGAFrame *dst_frame = NULL; ++ RGAFrame *pat_frame = NULL; ++ int ret, filter_ret; ++ int do_overlay = ctx->nb_inputs > 1 && ++ r->is_overlay_offset_valid && ++ inlink_pat && picref_pat; ++ ++ /* Sync & Drain */ ++ while (r->eof && av_fifo_read(r->async_fifo, &aframe, 1) >= 0) { ++ if (imsync(aframe.dst->info.out_fence_fd) != IM_STATUS_SUCCESS) ++ av_log(ctx, AV_LOG_WARNING, "RGA sync failed\n"); ++ ++ set_rga_async_frame_lock_status(&aframe, 0); ++ ++ filter_ret = r->filter_frame(outlink, aframe.dst->frame); ++ if (filter_ret < 0) { ++ av_frame_free(&aframe.dst->frame); ++ return filter_ret; ++ } ++ aframe.dst->queued--; ++ r->got_frame = 1; ++ aframe.dst->frame = NULL; ++ } ++ ++ if (!picref_src) ++ return 0; ++ ++ /* SRC */ ++ if (!(src_frame = submit_frame(r, inlink_src, picref_src, do_overlay, 0))) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input: %d\n", ++ FF_INLINK_IDX(inlink_src)); ++ return AVERROR(ENOMEM); ++ } ++ ++ /* DST */ ++ if (!(dst_frame = query_frame(r, outlink, src_frame->frame, 0))) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to query an output frame\n"); ++ return AVERROR(ENOMEM); ++ } ++ ++ /* PAT */ ++ if (do_overlay) { ++ RGAFrameInfo *in0_info = &r->in_rga_frame_infos[0]; ++ RGAFrameInfo *in1_info = &r->in_rga_frame_infos[1]; ++ RGAFrameInfo *out_info = &r->out_rga_frame_info; ++ RGAFrame *pat_in = NULL; ++ RGAFrame *pat_out = NULL; ++ ++ /* translate PAT from top-left to (x,y) on a new image with the same size of SRC */ ++ if (in1_info->act_w != in0_info->act_w || ++ in1_info->act_h != in0_info->act_h || ++ in1_info->overlay_x > 0 || ++ in1_info->overlay_y > 0) { ++ if (!(pat_in = submit_frame(r, inlink_pat, picref_pat, 0, 1))) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input: %d\n", ++ FF_INLINK_IDX(inlink_pat)); ++ return AVERROR(ENOMEM); ++ } ++ if (!(pat_out = query_frame(r, outlink, picref_pat, 1))) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to query an output frame\n"); ++ return AVERROR(ENOMEM); ++ } ++ dst_frame->info.core = out_info->scheduler_core; ++ ++ pat_out->info.priority = 1; ++ pat_out->info.core = dst_frame->info.core; ++ pat_out->info.sync_mode = RGA_BLIT_SYNC; ++ ++ /* Sync Blit Pre-Proc */ ++ ret = call_rkrga_blit(ctx, &pat_in->info, &pat_out->info, NULL); ++ if (ret < 0) ++ return ret; ++ ++ pat_out->info.rect.xoffset = 0; ++ pat_out->info.rect.yoffset = 0; ++ pat_out->info.rect.width = in0_info->act_w; ++ pat_out->info.rect.height = in0_info->act_h; ++ ++ pat_frame = pat_out; ++ } ++ ++ if (!pat_frame && !(pat_frame = submit_frame(r, inlink_pat, picref_pat, 0, 0))) { ++ av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input: %d\n", ++ FF_INLINK_IDX(inlink_pat)); ++ return AVERROR(ENOMEM); ++ } ++ dst_frame->info.core = out_info->scheduler_core; ++ } ++ ++ /* Async Blit */ ++ ret = call_rkrga_blit(ctx, ++ &src_frame->info, ++ &dst_frame->info, ++ pat_frame ? &pat_frame->info : NULL); ++ if (ret < 0) ++ return ret; ++ ++ dst_frame->queued++; ++ aframe = (RGAAsyncFrame){ src_frame, dst_frame, pat_frame }; ++ set_rga_async_frame_lock_status(&aframe, 1); ++ av_fifo_write(r->async_fifo, &aframe, 1); ++ ++ /* Sync & Retrieve */ ++ if (av_fifo_can_read(r->async_fifo) > r->async_depth) { ++ av_fifo_read(r->async_fifo, &aframe, 1); ++ if (imsync(aframe.dst->info.out_fence_fd) != IM_STATUS_SUCCESS) { ++ av_log(ctx, AV_LOG_ERROR, "RGA sync failed\n"); ++ return AVERROR_EXTERNAL; ++ } ++ set_rga_async_frame_lock_status(&aframe, 0); ++ ++ filter_ret = r->filter_frame(outlink, aframe.dst->frame); ++ if (filter_ret < 0) { ++ av_frame_free(&aframe.dst->frame); ++ return filter_ret; ++ } ++ aframe.dst->queued--; ++ r->got_frame = 1; ++ aframe.dst->frame = NULL; ++ } ++ ++ return 0; ++} +Index: jellyfin-ffmpeg/libavfilter/rkrga_common.h +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavfilter/rkrga_common.h +@@ -0,0 +1,127 @@ ++/* ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip RGA (2D Raster Graphic Acceleration) base function ++ */ ++ ++#ifndef AVFILTER_RKRGA_COMMON_H ++#define AVFILTER_RKRGA_COMMON_H ++ ++#include ++#include ++ ++#include "avfilter.h" ++#include "libavutil/fifo.h" ++#include "libavutil/hwcontext.h" ++#include "libavutil/hwcontext_rkmpp.h" ++ ++#define ALIGN_DOWN(a, b) ((a) & ~((b)-1)) ++#define RK_RGA_YUV_ALIGN 2 ++#define RK_RGA_AFBC_STRIDE_ALIGN 16 ++ ++#define FF_INLINK_IDX(link) ((int)((link)->dstpad - (link)->dst->input_pads)) ++#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads)) ++ ++typedef struct RGAFrame { ++ AVFrame *frame; ++ rga_info_t info; ++ struct RGAFrame *next; ++ int queued; ++ int locked; ++} RGAFrame; ++ ++typedef struct RGAFrameInfo { ++ enum _Rga_SURF_FORMAT rga_fmt; ++ enum AVPixelFormat pix_fmt; ++ const AVPixFmtDescriptor *pix_desc; ++ float bytes_pp; ++ int act_x; ++ int act_y; ++ int act_w; ++ int act_h; ++ int uncompact_10b_msb; ++ int rotate_mode; ++ int blend_mode; ++ int crop; ++ int scheduler_core; ++ int overlay_x; ++ int overlay_y; ++} RGAFrameInfo; ++ ++typedef struct RKRGAContext { ++ const AVClass *class; ++ ++ int (*filter_frame) (AVFilterLink *outlink, AVFrame *frame); ++ enum AVPixelFormat out_sw_format; ++ ++ RGAFrame *src_frame_list; ++ RGAFrame *dst_frame_list; ++ RGAFrame *pat_frame_list; ++ ++ AVBufferRef *pat_preproc_hwframes_ctx; ++ RGAFrame *pat_preproc_frame_list; ++ ++ RGAFrameInfo *in_rga_frame_infos; ++ RGAFrameInfo out_rga_frame_info; ++ ++ int scheduler_core; ++ int async_depth; ++ int afbc_out; ++ ++ int has_rga2; ++ int has_rga2l; ++ int has_rga2e; ++ int has_rga3; ++ int is_rga2_used; ++ int is_overlay_offset_valid; ++ ++ int eof; ++ int got_frame; ++ ++ AVFifo *async_fifo; ++} RKRGAContext; ++ ++typedef struct RKRGAParam { ++ int (*filter_frame)(AVFilterLink *outlink, AVFrame *frame); ++ ++ enum AVPixelFormat out_sw_format; ++ ++ int in_rotate_mode; ++ int in_global_alpha; ++ ++ int in_crop; ++ int in_crop_x; ++ int in_crop_y; ++ int in_crop_w; ++ int in_crop_h; ++ ++ int overlay_x; ++ int overlay_y; ++} RKRGAParam; ++ ++int ff_rkrga_init(AVFilterContext *avctx, RKRGAParam *param); ++int ff_rkrga_close(AVFilterContext *avctx); ++int ff_rkrga_filter_frame(RKRGAContext *r, ++ AVFilterLink *inlink_src, AVFrame *picref_src, ++ AVFilterLink *inlink_pat, AVFrame *picref_pat); ++ ++#endif /* AVFILTER_RKRGA_COMMON_H */ +Index: jellyfin-ffmpeg/libavfilter/vf_overlay_rkrga.c +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavfilter/vf_overlay_rkrga.c +@@ -0,0 +1,363 @@ ++/* ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip RGA (2D Raster Graphic Acceleration) video compositor ++ */ ++ ++#include "libavutil/common.h" ++#include "libavutil/eval.h" ++#include "libavutil/internal.h" ++#include "libavutil/opt.h" ++#include "libavutil/pixdesc.h" ++ ++#include "filters.h" ++#include "framesync.h" ++ ++#include "rkrga_common.h" ++ ++enum var_name { ++ VAR_MAIN_W, VAR_MW, ++ VAR_MAIN_H, VAR_MH, ++ VAR_OVERLAY_W, VAR_OW, ++ VAR_OVERLAY_H, VAR_OH, ++ VAR_OVERLAY_X, VAR_OX, ++ VAR_OVERLAY_Y, VAR_OY, ++ VAR_VARS_NB ++}; ++ ++typedef struct RGAOverlayContext { ++ RKRGAContext rga; ++ ++ FFFrameSync fs; ++ ++ double var_values[VAR_VARS_NB]; ++ char *overlay_ox, *overlay_oy; ++ int global_alpha; ++ enum AVPixelFormat format; ++} RGAOverlayContext; ++ ++static const char *const var_names[] = { ++ "main_w", "W", /* input width of the main layer */ ++ "main_h", "H", /* input height of the main layer */ ++ "overlay_w", "w", /* input width of the overlay layer */ ++ "overlay_h", "h", /* input height of the overlay layer */ ++ "overlay_x", "x", /* x position of the overlay layer inside of main */ ++ "overlay_y", "y", /* y position of the overlay layer inside of main */ ++ NULL ++}; ++ ++static int eval_expr(AVFilterContext *ctx) ++{ ++ RGAOverlayContext *r = ctx->priv; ++ double *var_values = r->var_values; ++ int ret = 0; ++ AVExpr *ox_expr = NULL, *oy_expr = NULL; ++ AVExpr *ow_expr = NULL, *oh_expr = NULL; ++ ++#define PASS_EXPR(e, s) {\ ++ ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \ ++ if (ret < 0) {\ ++ av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\ ++ goto release;\ ++ }\ ++} ++ PASS_EXPR(ox_expr, r->overlay_ox); ++ PASS_EXPR(oy_expr, r->overlay_oy); ++ PASS_EXPR(ow_expr, "overlay_w"); ++ PASS_EXPR(oh_expr, "overlay_h"); ++#undef PASS_EXPR ++ ++ var_values[VAR_OVERLAY_W] = ++ var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL); ++ var_values[VAR_OVERLAY_H] = ++ var_values[VAR_OH] = av_expr_eval(oh_expr, var_values, NULL); ++ ++ /* calc again in case ow is relative to oh */ ++ var_values[VAR_OVERLAY_W] = ++ var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL); ++ ++ var_values[VAR_OVERLAY_X] = ++ var_values[VAR_OX] = av_expr_eval(ox_expr, var_values, NULL); ++ var_values[VAR_OVERLAY_Y] = ++ var_values[VAR_OY] = av_expr_eval(oy_expr, var_values, NULL); ++ ++ /* calc again in case ox is relative to oy */ ++ var_values[VAR_OVERLAY_X] = ++ var_values[VAR_OX] = av_expr_eval(ox_expr, var_values, NULL); ++ ++release: ++ av_expr_free(ox_expr); ++ av_expr_free(oy_expr); ++ av_expr_free(ow_expr); ++ av_expr_free(oh_expr); ++ ++ return ret; ++} ++ ++static av_cold int set_size_info(AVFilterContext *ctx, ++ AVFilterLink *inlink_main, ++ AVFilterLink *inlink_overlay, ++ AVFilterLink *outlink) ++{ ++ RGAOverlayContext *r = ctx->priv; ++ int ret; ++ ++ if (inlink_main->w < 2 || inlink_main->w > 8192 || ++ inlink_main->h < 2 || inlink_main->h > 8192 || ++ inlink_overlay->w < 2 || inlink_overlay->w > 8192 || ++ inlink_overlay->h < 2 || inlink_overlay->h > 8192) { ++ av_log(ctx, AV_LOG_ERROR, "Supported input size is range from 2x2 ~ 8192x8192\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ r->var_values[VAR_MAIN_W] = ++ r->var_values[VAR_MW] = inlink_main->w; ++ r->var_values[VAR_MAIN_H] = ++ r->var_values[VAR_MH] = inlink_main->h; ++ ++ r->var_values[VAR_OVERLAY_W] = inlink_overlay->w; ++ r->var_values[VAR_OVERLAY_H] = inlink_overlay->h; ++ ++ if ((ret = eval_expr(ctx)) < 0) ++ return ret; ++ ++ outlink->w = r->var_values[VAR_MW]; ++ outlink->h = r->var_values[VAR_MH]; ++ if (outlink->w < 2 || outlink->w > 8128 || ++ outlink->h < 2 || outlink->h > 8128) { ++ av_log(ctx, AV_LOG_ERROR, "Supported output size is range from 2x2 ~ 8128x8128\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ if (inlink_main->sample_aspect_ratio.num) ++ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink_main->w, ++ outlink->w * inlink_main->h}, ++ inlink_main->sample_aspect_ratio); ++ else ++ outlink->sample_aspect_ratio = inlink_main->sample_aspect_ratio; ++ ++ return 0; ++} ++ ++static av_cold int rgaoverlay_config_props(AVFilterLink *outlink) ++{ ++ AVFilterContext *ctx = outlink->src; ++ RGAOverlayContext *r = ctx->priv; ++ AVFilterLink *inlink_main = ctx->inputs[0]; ++ AVFilterLink *inlink_overlay = ctx->inputs[1]; ++ AVHWFramesContext *frames_ctx_main; ++ AVHWFramesContext *frames_ctx_overlay; ++ enum AVPixelFormat in_format_main; ++ enum AVPixelFormat in_format_overlay; ++ enum AVPixelFormat out_format; ++ int ret; ++ ++ RKRGAParam param = { NULL }; ++ ++ if (!inlink_main->hw_frames_ctx) { ++ av_log(ctx, AV_LOG_ERROR, "No hw context provided on main input\n"); ++ return AVERROR(EINVAL); ++ } ++ frames_ctx_main = (AVHWFramesContext *)inlink_main->hw_frames_ctx->data; ++ in_format_main = frames_ctx_main->sw_format; ++ out_format = (r->format == AV_PIX_FMT_NONE) ? in_format_main : r->format; ++ ++ if (!inlink_overlay->hw_frames_ctx) { ++ av_log(ctx, AV_LOG_ERROR, "No hw context provided on overlay input\n"); ++ return AVERROR(EINVAL); ++ } ++ frames_ctx_overlay = (AVHWFramesContext *)inlink_overlay->hw_frames_ctx->data; ++ in_format_overlay = frames_ctx_overlay->sw_format; ++ ++ ret = set_size_info(ctx, inlink_main, inlink_overlay, outlink); ++ if (ret < 0) ++ return ret; ++ ++ param.filter_frame = NULL; ++ param.out_sw_format = out_format; ++ param.in_global_alpha = r->global_alpha; ++ param.overlay_x = r->var_values[VAR_OX]; ++ param.overlay_y = r->var_values[VAR_OY]; ++ ++ ret = ff_rkrga_init(ctx, ¶m); ++ if (ret < 0) ++ return ret; ++ ++ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s + w:%d h:%d fmt:%s (x:%d y:%d) -> w:%d h:%d fmt:%s\n", ++ inlink_main->w, inlink_main->h, av_get_pix_fmt_name(in_format_main), ++ inlink_overlay->w, inlink_overlay->h, av_get_pix_fmt_name(in_format_overlay), ++ param.overlay_x, param.overlay_y, outlink->w, outlink->h, av_get_pix_fmt_name(out_format)); ++ ++ ret = ff_framesync_init_dualinput(&r->fs, ctx); ++ if (ret < 0) ++ return ret; ++ ++ r->fs.time_base = outlink->time_base = inlink_main->time_base; ++ ++ ret = ff_framesync_configure(&r->fs); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++static int rgaoverlay_on_event(FFFrameSync *fs) ++{ ++ AVFilterContext *ctx = fs->parent; ++ AVFilterLink *inlink_main = ctx->inputs[0]; ++ AVFilterLink *inlink_overlay = ctx->inputs[1]; ++ AVFrame *in_main = NULL, *in_overlay = NULL; ++ int ret; ++ ++ RGAOverlayContext *r = ctx->priv; ++ ++ ret = ff_framesync_get_frame(fs, 0, &in_main, 0); ++ if (ret < 0) ++ return ret; ++ ret = ff_framesync_get_frame(fs, 1, &in_overlay, 0); ++ if (ret < 0) ++ return ret; ++ ++ if (!in_main) ++ return AVERROR_BUG; ++ ++ return ff_rkrga_filter_frame(&r->rga, ++ inlink_main, in_main, ++ inlink_overlay, in_overlay); ++} ++ ++static av_cold int rgaoverlay_init(AVFilterContext *ctx) ++{ ++ RGAOverlayContext *r = ctx->priv; ++ ++ r->fs.on_event = &rgaoverlay_on_event; ++ ++ return 0; ++} ++ ++static av_cold void rgaoverlay_uninit(AVFilterContext *ctx) ++{ ++ RGAOverlayContext *r = ctx->priv; ++ ++ ff_framesync_uninit(&r->fs); ++ ++ ff_rkrga_close(ctx); ++} ++ ++static int rgaoverlay_activate(AVFilterContext *ctx) ++{ ++ RGAOverlayContext *r = ctx->priv; ++ AVFilterLink *inlink_main = ctx->inputs[0]; ++ AVFilterLink *inlink_overlay = ctx->inputs[1]; ++ AVFilterLink *outlink = ctx->outputs[0]; ++ int i, ret; ++ ++ ret = ff_framesync_activate(&r->fs); ++ if (ret < 0) ++ return ret; ++ ++ if (r->fs.eof) { ++ r->rga.eof = 1; ++ goto eof; ++ } ++ ++ if (!r->rga.got_frame) { ++ for (i = 0; i < ctx->nb_inputs; i++) { ++ if (!ff_inlink_check_available_frame(ctx->inputs[i])) { ++ FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[i]); ++ } ++ } ++ return FFERROR_NOT_READY; ++ } else ++ r->rga.got_frame = 0; ++ ++ return 0; ++ ++eof: ++ ff_rkrga_filter_frame(&r->rga, ++ inlink_main, NULL, ++ inlink_overlay, NULL); ++ ff_outlink_set_status(outlink, AVERROR_EOF, AV_NOPTS_VALUE); ++ return 0; ++} ++ ++#define OFFSET(x) offsetof(RGAOverlayContext, x) ++#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) ++ ++static const AVOption rgaoverlay_options[] = { ++ { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, .flags = FLAGS }, ++ { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, .flags = FLAGS }, ++ { "alpha", "Overlay global alpha", OFFSET(global_alpha), AV_OPT_TYPE_INT, { .i64 = 255 }, 0, 255, .flags = FLAGS }, ++ { "format", "Output video pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, INT_MIN, INT_MAX, .flags = FLAGS }, ++ { "eof_action", "Action to take when encountering EOF from secondary input ", ++ OFFSET(fs.opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT }, ++ EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" }, ++ { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" }, ++ { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" }, ++ { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" }, ++ { "shortest", "Force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS }, ++ { "repeatlast", "Repeat overlay of the last overlay frame", OFFSET(fs.opt_repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS }, ++ { "core", "Set multicore RGA scheduler core [use with caution]", OFFSET(rga.scheduler_core), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, 0, INT_MAX, FLAGS, "core" }, ++ { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "core" }, ++ { "rga3_core0", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "core" }, /* RGA3_SCHEDULER_CORE0 */ ++ { "rga3_core1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "core" }, /* RGA3_SCHEDULER_CORE1 */ ++ { "rga2_core0", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4 }, 0, 0, FLAGS, "core" }, /* RGA2_SCHEDULER_CORE0 */ ++ { "async_depth", "Set the internal parallelization depth", OFFSET(rga.async_depth), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, 4, .flags = FLAGS }, ++ { "afbc", "Enable AFBC (Arm Frame Buffer Compression) to save bandwidth", OFFSET(rga.afbc_out), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, .flags = FLAGS }, ++ { NULL }, ++}; ++ ++FRAMESYNC_DEFINE_CLASS(rgaoverlay, RGAOverlayContext, fs); ++ ++static const AVFilterPad rgaoverlay_inputs[] = { ++ { ++ .name = "main", ++ .type = AVMEDIA_TYPE_VIDEO, ++ }, ++ { ++ .name = "overlay", ++ .type = AVMEDIA_TYPE_VIDEO, ++ }, ++}; ++ ++static const AVFilterPad rgaoverlay_outputs[] = { ++ { ++ .name = "default", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .config_props = rgaoverlay_config_props, ++ }, ++}; ++ ++const AVFilter ff_vf_overlay_rkrga = { ++ .name = "overlay_rkrga", ++ .description = NULL_IF_CONFIG_SMALL("Rockchip RGA (2D Raster Graphic Acceleration) video compositor"), ++ .priv_size = sizeof(RGAOverlayContext), ++ .priv_class = &rgaoverlay_class, ++ .init = rgaoverlay_init, ++ .uninit = rgaoverlay_uninit, ++ .activate = rgaoverlay_activate, ++ FILTER_INPUTS(rgaoverlay_inputs), ++ FILTER_OUTPUTS(rgaoverlay_outputs), ++ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_DRM_PRIME), ++ .preinit = rgaoverlay_framesync_preinit, ++ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, ++}; +Index: jellyfin-ffmpeg/libavfilter/vf_vpp_rkrga.c +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavfilter/vf_vpp_rkrga.c +@@ -0,0 +1,561 @@ ++/* ++ * Copyright (c) 2023 NyanMisaka ++ * ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++/** ++ * @file ++ * Rockchip RGA (2D Raster Graphic Acceleration) video post-process (scale/crop/transpose) ++ */ ++ ++#include "config_components.h" ++ ++#include "libavutil/common.h" ++#include "libavutil/eval.h" ++#include "libavutil/internal.h" ++#include "libavutil/opt.h" ++#include "libavutil/pixdesc.h" ++ ++#include "filters.h" ++#include "scale_eval.h" ++#include "transpose.h" ++ ++#include "rkrga_common.h" ++ ++typedef struct RGAVppContext { ++ RKRGAContext rga; ++ ++ enum AVPixelFormat format; ++ int transpose; ++ int force_original_aspect_ratio; ++ int force_divisible_by; ++ int force_yuv; ++ int force_chroma; ++ int scheduler_core; ++ ++ int in_rotate_mode; ++ ++ char *ow, *oh; ++ char *cx, *cy, *cw, *ch; ++ int crop; ++ ++ int act_x, act_y; ++ int act_w, act_h; ++} RGAVppContext; ++ ++enum { ++ FORCE_YUV_DISABLE, ++ FORCE_YUV_AUTO, ++ FORCE_YUV_8BIT, ++ FORCE_YUV_10BIT, ++ FORCE_YUV_NB ++}; ++ ++enum { ++ FORCE_CHROMA_AUTO, ++ FORCE_CHROMA_420SP, ++ FORCE_CHROMA_420P, ++ FORCE_CHROMA_422SP, ++ FORCE_CHROMA_422P, ++ FORCE_CHROMA_NB ++}; ++ ++static const char *const var_names[] = { ++ "iw", "in_w", ++ "ih", "in_h", ++ "ow", "out_w", "w", ++ "oh", "out_h", "h", ++ "cw", ++ "ch", ++ "cx", ++ "cy", ++ "a", "dar", ++ "sar", ++ NULL ++}; ++ ++enum var_name { ++ VAR_IW, VAR_IN_W, ++ VAR_IH, VAR_IN_H, ++ VAR_OW, VAR_OUT_W, VAR_W, ++ VAR_OH, VAR_OUT_H, VAR_H, ++ VAR_CW, ++ VAR_CH, ++ VAR_CX, ++ VAR_CY, ++ VAR_A, VAR_DAR, ++ VAR_SAR, ++ VAR_VARS_NB ++}; ++ ++static av_cold int eval_expr(AVFilterContext *ctx, ++ int *ret_w, int *ret_h, ++ int *ret_cx, int *ret_cy, ++ int *ret_cw, int *ret_ch) ++{ ++#define PASS_EXPR(e, s) {\ ++ if (s) {\ ++ ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \ ++ if (ret < 0) { \ ++ av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s); \ ++ goto release; \ ++ } \ ++ }\ ++} ++#define CALC_EXPR(e, v, i, d) {\ ++ if (e)\ ++ i = v = av_expr_eval(e, var_values, NULL); \ ++ else\ ++ i = v = d;\ ++} ++ RGAVppContext *r = ctx->priv; ++ double var_values[VAR_VARS_NB] = { NAN }; ++ AVExpr *w_expr = NULL, *h_expr = NULL; ++ AVExpr *cw_expr = NULL, *ch_expr = NULL; ++ AVExpr *cx_expr = NULL, *cy_expr = NULL; ++ int ret = 0; ++ ++ PASS_EXPR(cw_expr, r->cw); ++ PASS_EXPR(ch_expr, r->ch); ++ ++ PASS_EXPR(w_expr, r->ow); ++ PASS_EXPR(h_expr, r->oh); ++ ++ PASS_EXPR(cx_expr, r->cx); ++ PASS_EXPR(cy_expr, r->cy); ++ ++ var_values[VAR_IW] = ++ var_values[VAR_IN_W] = ctx->inputs[0]->w; ++ ++ var_values[VAR_IH] = ++ var_values[VAR_IN_H] = ctx->inputs[0]->h; ++ ++ var_values[VAR_A] = (double)var_values[VAR_IN_W] / var_values[VAR_IN_H]; ++ var_values[VAR_SAR] = ctx->inputs[0]->sample_aspect_ratio.num ? ++ (double)ctx->inputs[0]->sample_aspect_ratio.num / ctx->inputs[0]->sample_aspect_ratio.den : 1; ++ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; ++ ++ /* crop params */ ++ CALC_EXPR(cw_expr, var_values[VAR_CW], *ret_cw, var_values[VAR_IW]); ++ CALC_EXPR(ch_expr, var_values[VAR_CH], *ret_ch, var_values[VAR_IH]); ++ ++ /* calc again in case cw is relative to ch */ ++ CALC_EXPR(cw_expr, var_values[VAR_CW], *ret_cw, var_values[VAR_IW]); ++ ++ CALC_EXPR(w_expr, ++ var_values[VAR_OUT_W] = var_values[VAR_OW] = var_values[VAR_W], ++ *ret_w, var_values[VAR_CW]); ++ CALC_EXPR(h_expr, ++ var_values[VAR_OUT_H] = var_values[VAR_OH] = var_values[VAR_H], ++ *ret_h, var_values[VAR_CH]); ++ ++ /* calc again in case ow is relative to oh */ ++ CALC_EXPR(w_expr, ++ var_values[VAR_OUT_W] = var_values[VAR_OW] = var_values[VAR_W], ++ *ret_w, var_values[VAR_CW]); ++ ++ CALC_EXPR(cx_expr, var_values[VAR_CX], *ret_cx, (var_values[VAR_IW] - var_values[VAR_OW]) / 2); ++ CALC_EXPR(cy_expr, var_values[VAR_CY], *ret_cy, (var_values[VAR_IH] - var_values[VAR_OH]) / 2); ++ ++ /* calc again in case cx is relative to cy */ ++ CALC_EXPR(cx_expr, var_values[VAR_CX], *ret_cx, (var_values[VAR_IW] - var_values[VAR_OW]) / 2); ++ ++ r->crop = (*ret_cw != var_values[VAR_IW]) || (*ret_ch != var_values[VAR_IH]); ++ ++release: ++ av_expr_free(w_expr); ++ av_expr_free(h_expr); ++ av_expr_free(cw_expr); ++ av_expr_free(ch_expr); ++ av_expr_free(cx_expr); ++ av_expr_free(cy_expr); ++#undef PASS_EXPR ++#undef CALC_EXPR ++ ++ return ret; ++} ++ ++static av_cold int set_size_info(AVFilterContext *ctx, ++ AVFilterLink *inlink, ++ AVFilterLink *outlink) ++{ ++ RGAVppContext *r = ctx->priv; ++ int w, h, ret; ++ ++ if (inlink->w < 2 || inlink->w > 8192 || ++ inlink->h < 2 || inlink->h > 8192) { ++ av_log(ctx, AV_LOG_ERROR, "Supported input size is range from 2x2 ~ 8192x8192\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ if ((ret = eval_expr(ctx, &w, &h, &r->act_x, &r->act_y, &r->act_w, &r->act_h)) < 0) ++ return ret; ++ ++ r->act_x = FFMAX(FFMIN(r->act_x, inlink->w), 0); ++ r->act_y = FFMAX(FFMIN(r->act_y, inlink->h), 0); ++ r->act_w = FFMAX(FFMIN(r->act_w, inlink->w), 0); ++ r->act_h = FFMAX(FFMIN(r->act_h, inlink->h), 0); ++ ++ r->act_x = FFMIN(r->act_x, inlink->w - r->act_w); ++ r->act_y = FFMIN(r->act_y, inlink->h - r->act_h); ++ r->act_w = FFMIN(r->act_w, inlink->w - r->act_x); ++ r->act_h = FFMIN(r->act_h, inlink->h - r->act_y); ++ ++ ff_scale_adjust_dimensions(inlink, &w, &h, ++ r->force_original_aspect_ratio, r->force_divisible_by); ++ ++ if (((int64_t)h * inlink->w) > INT_MAX || ++ ((int64_t)w * inlink->h) > INT_MAX) { ++ av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ outlink->w = w; ++ outlink->h = h; ++ if (outlink->w < 2 || outlink->w > 8128 || ++ outlink->h < 2 || outlink->h > 8128) { ++ av_log(ctx, AV_LOG_ERROR, "Supported output size is range from 2x2 ~ 8128x8128\n"); ++ return AVERROR(EINVAL); ++ } ++ ++ if (inlink->sample_aspect_ratio.num) ++ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, ++ outlink->w * inlink->h}, ++ inlink->sample_aspect_ratio); ++ else ++ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; ++ ++ if (r->transpose >= 0) { ++ switch (r->transpose) { ++ case TRANSPOSE_CCLOCK_FLIP: ++ r->in_rotate_mode = 0x07 | (0x01 << 4); /* HAL_TRANSFORM_ROT_270 | (HAL_TRANSFORM_FLIP_H << 4) */ ++ FFSWAP(int, outlink->w, outlink->h); ++ FFSWAP(int, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); ++ break; ++ case TRANSPOSE_CLOCK: ++ r->in_rotate_mode = 0x04; /* HAL_TRANSFORM_ROT_90 */ ++ FFSWAP(int, outlink->w, outlink->h); ++ FFSWAP(int, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); ++ break; ++ case TRANSPOSE_CCLOCK: ++ r->in_rotate_mode = 0x07; /* HAL_TRANSFORM_ROT_270 */ ++ FFSWAP(int, outlink->w, outlink->h); ++ FFSWAP(int, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); ++ break; ++ case TRANSPOSE_CLOCK_FLIP: ++ r->in_rotate_mode = 0x04 | (0x01 << 4); /* HAL_TRANSFORM_ROT_90 | (HAL_TRANSFORM_FLIP_H << 4) */ ++ FFSWAP(int, outlink->w, outlink->h); ++ FFSWAP(int, outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den); ++ break; ++ case TRANSPOSE_REVERSAL: ++ r->in_rotate_mode = 0x03; /* HAL_TRANSFORM_ROT_180 */ ++ break; ++ case TRANSPOSE_HFLIP: ++ r->in_rotate_mode = 0x01; /* HAL_TRANSFORM_FLIP_H */ ++ break; ++ case TRANSPOSE_VFLIP: ++ r->in_rotate_mode = 0x02; /* HAL_TRANSFORM_FLIP_V */ ++ break; ++ default: ++ av_log(ctx, AV_LOG_ERROR, "Failed to set transpose mode to %d\n", r->transpose); ++ return AVERROR(EINVAL); ++ } ++ } ++ ++ return 0; ++} ++ ++static av_cold void config_force_format(AVFilterContext *ctx, ++ enum AVPixelFormat in_format, ++ enum AVPixelFormat *out_format) ++{ ++ RGAVppContext *r = ctx->priv; ++ const AVPixFmtDescriptor *desc; ++ int out_depth, force_chroma; ++ int is_yuv, is_fully_planar; ++ ++ if (!out_format) ++ return; ++ ++ if (r->force_yuv == FORCE_YUV_AUTO) ++ out_depth = (in_format == AV_PIX_FMT_NV15 || ++ in_format == AV_PIX_FMT_NV20) ? 10 : 0; ++ else ++ out_depth = (r->force_yuv == FORCE_YUV_8BIT) ? 8 : ++ (r->force_yuv == FORCE_YUV_10BIT) ? 10 : 0; ++ if (!out_depth) ++ return; ++ ++ desc = av_pix_fmt_desc_get(in_format); ++ is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2; ++ ++ force_chroma = r->force_chroma; ++ if (is_yuv && force_chroma == FORCE_CHROMA_AUTO) { ++ is_fully_planar = (desc->flags & AV_PIX_FMT_FLAG_PLANAR) && ++ desc->comp[1].plane != desc->comp[2].plane; ++ if (desc->log2_chroma_w == 1 && desc->log2_chroma_h == 1) ++ force_chroma = is_fully_planar ? FORCE_CHROMA_420P : FORCE_CHROMA_420SP; ++ else if (desc->log2_chroma_w == 1 && !desc->log2_chroma_h) ++ force_chroma = is_fully_planar ? FORCE_CHROMA_422P : FORCE_CHROMA_422SP; ++ } ++ ++ switch (force_chroma) { ++ case FORCE_CHROMA_422P: ++ *out_format = AV_PIX_FMT_YUV422P; ++ break; ++ case FORCE_CHROMA_422SP: ++ *out_format = out_depth == 10 ? ++ AV_PIX_FMT_P210 : AV_PIX_FMT_NV16; ++ break; ++ case FORCE_CHROMA_420P: ++ *out_format = AV_PIX_FMT_YUV420P; ++ break; ++ case FORCE_CHROMA_420SP: ++ default: ++ *out_format = out_depth == 10 ? ++ AV_PIX_FMT_P010 : AV_PIX_FMT_NV12; ++ } ++} ++ ++static av_cold int rgavpp_config_props(AVFilterLink *outlink) ++{ ++ AVFilterContext *ctx = outlink->src; ++ RGAVppContext *r = ctx->priv; ++ AVFilterLink *inlink = ctx->inputs[0]; ++ AVHWFramesContext *in_frames_ctx; ++ enum AVPixelFormat in_format; ++ enum AVPixelFormat out_format; ++ RKRGAParam param = { NULL }; ++ int ret; ++ ++ if (!inlink->hw_frames_ctx) { ++ av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n"); ++ return AVERROR(EINVAL); ++ } ++ in_frames_ctx = (AVHWFramesContext *)inlink->hw_frames_ctx->data; ++ in_format = in_frames_ctx->sw_format; ++ out_format = (r->format == AV_PIX_FMT_NONE) ? in_format : r->format; ++ ++ config_force_format(ctx, in_format, &out_format); ++ ++ ret = set_size_info(ctx, inlink, outlink); ++ if (ret < 0) ++ return ret; ++ ++ param.filter_frame = NULL; ++ param.out_sw_format = out_format; ++ param.in_rotate_mode = r->in_rotate_mode; ++ param.in_crop = r->crop; ++ param.in_crop_x = r->act_x; ++ param.in_crop_y = r->act_y; ++ param.in_crop_w = r->act_w; ++ param.in_crop_h = r->act_h; ++ ++ ret = ff_rkrga_init(ctx, ¶m); ++ if (ret < 0) ++ return ret; ++ ++ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s\n", ++ inlink->w, inlink->h, av_get_pix_fmt_name(in_format), ++ outlink->w, outlink->h, av_get_pix_fmt_name(out_format)); ++ ++ return 0; ++} ++ ++static int rgavpp_activate(AVFilterContext *ctx) ++{ ++ AVFilterLink *inlink = ctx->inputs[0]; ++ AVFilterLink *outlink = ctx->outputs[0]; ++ RGAVppContext *r = ctx->priv; ++ AVFrame *in = NULL; ++ int ret, status = 0; ++ int64_t pts = AV_NOPTS_VALUE; ++ ++ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); ++ ++ if (!r->rga.eof) { ++ ret = ff_inlink_consume_frame(inlink, &in); ++ if (ret < 0) ++ return ret; ++ ++ if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { ++ if (status == AVERROR_EOF) { ++ r->rga.eof = 1; ++ } ++ } ++ } ++ ++ if (in || r->rga.eof) { ++ ret = ff_rkrga_filter_frame(&r->rga, inlink, in, NULL, NULL); ++ av_frame_free(&in); ++ if (ret < 0) ++ return ret; ++ else if (!r->rga.got_frame) ++ goto not_ready; ++ ++ if (r->rga.eof) ++ goto eof; ++ ++ if (r->rga.got_frame) { ++ r->rga.got_frame = 0; ++ return 0; ++ } ++ } ++ ++not_ready: ++ if (r->rga.eof) ++ goto eof; ++ ++ FF_FILTER_FORWARD_WANTED(outlink, inlink); ++ return FFERROR_NOT_READY; ++ ++eof: ++ pts = av_rescale_q(pts, inlink->time_base, outlink->time_base); ++ ff_outlink_set_status(outlink, status, pts); ++ return 0; ++} ++ ++static av_cold int rgavpp_init(AVFilterContext *ctx) ++{ ++ return 0; ++} ++ ++static av_cold void rgavpp_uninit(AVFilterContext *ctx) ++{ ++ ff_rkrga_close(ctx); ++} ++ ++#define OFFSET(x) offsetof(RGAVppContext, x) ++#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) ++ ++#define RKRGA_VPP_COMMON_OPTS \ ++ { "force_yuv", "Enforce planar YUV format output", OFFSET(force_yuv), AV_OPT_TYPE_INT, { .i64 = FORCE_YUV_DISABLE }, 0, FORCE_YUV_NB - 1, FLAGS, "force_yuv" }, \ ++ { "disable", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_YUV_DISABLE }, 0, 0, FLAGS, "force_yuv" }, \ ++ { "auto", "Match in/out bit depth", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_YUV_AUTO }, 0, 0, FLAGS, "force_yuv" }, \ ++ { "8bit", "8-bit", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_YUV_8BIT }, 0, 0, FLAGS, "force_yuv" }, \ ++ { "10bit", "10-bit uncompact/8-bit", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_YUV_10BIT }, 0, 0, FLAGS, "force_yuv" }, \ ++ { "force_chroma", "Enforce chroma of planar YUV format output", OFFSET(force_chroma), AV_OPT_TYPE_INT, { .i64 = FORCE_CHROMA_AUTO }, 0, FORCE_CHROMA_NB - 1, FLAGS, "force_chroma" }, \ ++ { "auto", "Match in/out chroma", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_CHROMA_AUTO }, 0, 0, FLAGS, "force_chroma" }, \ ++ { "420sp", "4:2:0 semi-planar", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_CHROMA_420SP }, 0, 0, FLAGS, "force_chroma" }, \ ++ { "420p", "4:2:0 fully-planar", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_CHROMA_420P }, 0, 0, FLAGS, "force_chroma" }, \ ++ { "422sp", "4:2:2 semi-planar", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_CHROMA_422SP }, 0, 0, FLAGS, "force_chroma" }, \ ++ { "422p", "4:2:2 fully-planar", 0, AV_OPT_TYPE_CONST, { .i64 = FORCE_CHROMA_422P }, 0, 0, FLAGS, "force_chroma" }, \ ++ { "core", "Set multicore RGA scheduler core [use with caution]", OFFSET(rga.scheduler_core), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, 0, INT_MAX, FLAGS, "core" }, \ ++ { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "core" }, \ ++ { "rga3_core0", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "core" }, /* RGA3_SCHEDULER_CORE0 */ \ ++ { "rga3_core1", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "core" }, /* RGA3_SCHEDULER_CORE1 */ \ ++ { "rga2_core0", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4 }, 0, 0, FLAGS, "core" }, /* RGA2_SCHEDULER_CORE0 */ \ ++ { "async_depth", "Set the internal parallelization depth", OFFSET(rga.async_depth), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, 4, .flags = FLAGS }, \ ++ { "afbc", "Enable AFBC (Arm Frame Buffer Compression) to save bandwidth", OFFSET(rga.afbc_out), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, .flags = FLAGS }, ++ ++static const AVFilterPad rgavpp_inputs[] = { ++ { ++ .name = "default", ++ .type = AVMEDIA_TYPE_VIDEO, ++ }, ++}; ++ ++static const AVFilterPad rgavpp_outputs[] = { ++ { ++ .name = "default", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .config_props = rgavpp_config_props, ++ }, ++}; ++ ++#if CONFIG_SCALE_RKRGA_FILTER ++ ++static const AVOption rgascale_options[] = { ++ { "w", "Output video width", OFFSET(ow), AV_OPT_TYPE_STRING, { .str = "iw" }, 0, 0, FLAGS }, ++ { "h", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str = "ih" }, 0, 0, FLAGS }, ++ { "format", "Output video pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, INT_MIN, INT_MAX, .flags = FLAGS }, ++ { "force_original_aspect_ratio", "Decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "force_oar" }, \ ++ { "disable", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "force_oar" }, \ ++ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "force_oar" }, \ ++ { "increase", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "force_oar" }, \ ++ { "force_divisible_by", "Enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 256, FLAGS }, \ ++ RKRGA_VPP_COMMON_OPTS ++ { NULL }, ++}; ++ ++static av_cold int rgascale_preinit(AVFilterContext *ctx) ++{ ++ RGAVppContext *r = ctx->priv; ++ ++ r->transpose = -1; ++ return 0; ++} ++ ++AVFILTER_DEFINE_CLASS(rgascale); ++ ++const AVFilter ff_vf_scale_rkrga = { ++ .name = "scale_rkrga", ++ .description = NULL_IF_CONFIG_SMALL("Rockchip RGA (2D Raster Graphic Acceleration) video resizer and format converter"), ++ .priv_size = sizeof(RGAVppContext), ++ .priv_class = &rgascale_class, ++ .preinit = rgascale_preinit, ++ .init = rgavpp_init, ++ .uninit = rgavpp_uninit, ++ FILTER_INPUTS(rgavpp_inputs), ++ FILTER_OUTPUTS(rgavpp_outputs), ++ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_DRM_PRIME), ++ .activate = rgavpp_activate, ++ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, ++}; ++ ++#endif ++ ++#if CONFIG_VPP_RKRGA_FILTER ++ ++static const AVOption rgavpp_options[] = { ++ { "w", "Output video width", OFFSET(ow), AV_OPT_TYPE_STRING, { .str = "cw" }, 0, 0, FLAGS }, ++ { "h", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str = "w*ch/cw" }, 0, 0, FLAGS }, ++ { "cw", "Set the width crop area expression", OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, 0, 0, FLAGS }, ++ { "ch", "Set the height crop area expression", OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, 0, 0, FLAGS }, ++ { "cx", "Set the x crop area expression", OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, 0, 0, FLAGS }, ++ { "cy", "Set the y crop area expression", OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, 0, 0, FLAGS }, ++ { "format", "Output video pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, INT_MIN, INT_MAX, .flags = FLAGS }, ++ { "transpose", "Set transpose direction", OFFSET(transpose), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 6, FLAGS, "transpose" }, ++ { "cclock_hflip", "Rotate counter-clockwise with horizontal flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 0, FLAGS, "transpose" }, ++ { "clock", "Rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, 0, 0, FLAGS, "transpose" }, ++ { "cclock", "Rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, 0, 0, FLAGS, "transpose" }, ++ { "clock_hflip", "Rotate clockwise with horizontal flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, 0, 0, FLAGS, "transpose" }, ++ { "reversal", "Rotate by half-turn", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, 0, 0, FLAGS, "transpose" }, ++ { "hflip", "Flip horizontally", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, 0, 0, FLAGS, "transpose" }, ++ { "vflip", "Flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, 0, 0, FLAGS, "transpose" }, ++ RKRGA_VPP_COMMON_OPTS ++ { NULL }, ++}; ++ ++AVFILTER_DEFINE_CLASS(rgavpp); ++ ++const AVFilter ff_vf_vpp_rkrga = { ++ .name = "vpp_rkrga", ++ .description = NULL_IF_CONFIG_SMALL("Rockchip RGA (2D Raster Graphic Acceleration) video post-process (scale/crop/transpose)"), ++ .priv_size = sizeof(RGAVppContext), ++ .priv_class = &rgavpp_class, ++ .init = rgavpp_init, ++ .uninit = rgavpp_uninit, ++ FILTER_INPUTS(rgavpp_inputs), ++ FILTER_OUTPUTS(rgavpp_outputs), ++ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_DRM_PRIME), ++ .activate = rgavpp_activate, ++ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, ++}; ++ ++#endif +Index: jellyfin-ffmpeg/libavutil/Makefile +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/Makefile ++++ jellyfin-ffmpeg/libavutil/Makefile +@@ -50,6 +50,7 @@ HEADERS = adler32.h + hwcontext_videotoolbox.h \ + hwcontext_vdpau.h \ + hwcontext_vulkan.h \ ++ hwcontext_rkmpp.h \ + imgutils.h \ + intfloat.h \ + intreadwrite.h \ +@@ -196,6 +197,7 @@ OBJS-$(CONFIG_VAAPI) + OBJS-$(CONFIG_VIDEOTOOLBOX) += hwcontext_videotoolbox.o + OBJS-$(CONFIG_VDPAU) += hwcontext_vdpau.o + OBJS-$(CONFIG_VULKAN) += hwcontext_vulkan.o ++OBJS-$(CONFIG_RKMPP) += hwcontext_rkmpp.o + + OBJS-$(!CONFIG_VULKAN) += hwcontext_stub.o + +@@ -218,6 +220,7 @@ SKIPHEADERS-$(CONFIG_VDPAU) + + SKIPHEADERS-$(CONFIG_VULKAN) += hwcontext_vulkan.h vulkan.h \ + vulkan_functions.h \ + vulkan_loader.h ++SKIPHEADERS-$(CONFIG_RKMPP) += hwcontext_rkmpp.h + + TESTPROGS = adler32 \ + aes \ +Index: jellyfin-ffmpeg/libavutil/hwcontext.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/hwcontext.c ++++ jellyfin-ffmpeg/libavutil/hwcontext.c +@@ -63,6 +63,9 @@ static const HWContextType * const hw_ta + #if CONFIG_VULKAN + &ff_hwcontext_type_vulkan, + #endif ++#if CONFIG_RKMPP ++ &ff_hwcontext_type_rkmpp, ++#endif + NULL, + }; + +@@ -78,6 +81,7 @@ static const char *const hw_type_names[] + [AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox", + [AV_HWDEVICE_TYPE_MEDIACODEC] = "mediacodec", + [AV_HWDEVICE_TYPE_VULKAN] = "vulkan", ++ [AV_HWDEVICE_TYPE_RKMPP] = "rkmpp", + }; + + enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name) +Index: jellyfin-ffmpeg/libavutil/hwcontext.h +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/hwcontext.h ++++ jellyfin-ffmpeg/libavutil/hwcontext.h +@@ -37,6 +37,7 @@ enum AVHWDeviceType { + AV_HWDEVICE_TYPE_OPENCL, + AV_HWDEVICE_TYPE_MEDIACODEC, + AV_HWDEVICE_TYPE_VULKAN, ++ AV_HWDEVICE_TYPE_RKMPP, + }; + + typedef struct AVHWDeviceInternal AVHWDeviceInternal; +Index: jellyfin-ffmpeg/libavutil/hwcontext_internal.h +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/hwcontext_internal.h ++++ jellyfin-ffmpeg/libavutil/hwcontext_internal.h +@@ -174,5 +174,6 @@ extern const HWContextType ff_hwcontext_ + extern const HWContextType ff_hwcontext_type_videotoolbox; + extern const HWContextType ff_hwcontext_type_mediacodec; + extern const HWContextType ff_hwcontext_type_vulkan; ++extern const HWContextType ff_hwcontext_type_rkmpp; + + #endif /* AVUTIL_HWCONTEXT_INTERNAL_H */ +Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/hwcontext_opencl.c ++++ jellyfin-ffmpeg/libavutil/hwcontext_opencl.c +@@ -83,6 +83,16 @@ typedef CL_API_ENTRY cl_mem(CL_API_CALL + #include + #include + #include "hwcontext_drm.h" ++ ++typedef intptr_t cl_import_properties_arm; ++typedef CL_API_ENTRY cl_mem(CL_API_CALL *clImportMemoryARM_fn)( ++ cl_context context, ++ cl_mem_flags flags, ++ const cl_import_properties_arm *properties, ++ void *memory, ++ size_t size, ++ cl_int *errcode_ret); ++ + #endif + + #if HAVE_OPENCL_VAAPI_INTEL_MEDIA && CONFIG_LIBMFX +@@ -150,6 +160,9 @@ typedef struct OpenCLDeviceContext { + + #if HAVE_OPENCL_DRM_ARM + int drm_arm_mapping_usable; ++ ++ clImportMemoryARM_fn ++ clImportMemoryARM; + #endif + } OpenCLDeviceContext; + +@@ -927,7 +940,8 @@ static int opencl_device_init(AVHWDevice + fail = 1; + } + +- // clImportMemoryARM() is linked statically. ++ CL_FUNC(clImportMemoryARM, ++ "DRM to OpenCL mapping on ARM"); + + if (fail) { + av_log(hwdev, AV_LOG_WARNING, "DRM to OpenCL mapping on ARM " +@@ -1404,6 +1418,7 @@ static int opencl_device_derive(AVHWDevi + + #if HAVE_OPENCL_DRM_ARM + case AV_HWDEVICE_TYPE_DRM: ++ case AV_HWDEVICE_TYPE_RKMPP: + { + OpenCLDeviceSelector selector = { + .platform_index = -1, +@@ -3193,6 +3208,7 @@ static int opencl_map_from_drm_arm(AVHWF + AVHWFramesContext *src_fc = + (AVHWFramesContext*)src->hw_frames_ctx->data; + AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx; ++ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv; + const AVDRMFrameDescriptor *desc; + DRMARMtoOpenCLMapping *mapping = NULL; + cl_mem_flags cl_flags; +@@ -3226,8 +3242,8 @@ static int opencl_map_from_drm_arm(AVHWF + } + + mapping->object_buffers[i] = +- clImportMemoryARM(dst_dev->context, cl_flags, props, +- &fd, desc->objects[i].size, &cle); ++ device_priv->clImportMemoryARM(dst_dev->context, cl_flags, props, ++ &fd, desc->objects[i].size, &cle); + if (!mapping->object_buffers[i]) { + av_log(dst_fc, AV_LOG_ERROR, "Failed to create CL buffer " + "from object %d (fd %d, size %"SIZE_SPECIFIER") of DRM frame: %d.\n", +@@ -3258,6 +3274,8 @@ static int opencl_map_from_drm_arm(AVHWF + goto fail; + } + ++ image_desc.image_row_pitch = plane->pitch; ++ + region.origin = plane->offset; + region.size = image_desc.image_row_pitch * + image_desc.image_height; +Index: jellyfin-ffmpeg/libavutil/hwcontext_rkmpp.c +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavutil/hwcontext_rkmpp.c +@@ -0,0 +1,581 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "config.h" ++ ++#define _GNU_SOURCE ++#include ++#include ++#include ++ ++/* This was introduced in version 4.6. And may not exist all without an ++ * optional package. So to prevent a hard dependency on needing the Linux ++ * kernel headers to compile, make this optional. */ ++#if HAVE_LINUX_DMA_BUF_H ++#include ++#include ++#endif ++ ++#include "avassert.h" ++#include "hwcontext.h" ++#include "hwcontext_rkmpp.h" ++#include "hwcontext_internal.h" ++#include "imgutils.h" ++ ++static const struct { ++ enum AVPixelFormat pixfmt; ++ uint32_t drm_format; ++} supported_formats[] = { ++ /* grayscale */ ++ { AV_PIX_FMT_GRAY8, DRM_FORMAT_R8 }, ++ /* planar YUV */ ++ { AV_PIX_FMT_YUV420P, DRM_FORMAT_YUV420, }, ++ { AV_PIX_FMT_YUV422P, DRM_FORMAT_YUV422, }, ++ { AV_PIX_FMT_YUV444P, DRM_FORMAT_YUV444, }, ++ /* semi-planar YUV */ ++ { AV_PIX_FMT_NV12, DRM_FORMAT_NV12, }, ++ { AV_PIX_FMT_NV21, DRM_FORMAT_NV21, }, ++ { AV_PIX_FMT_NV16, DRM_FORMAT_NV16, }, ++ { AV_PIX_FMT_NV24, DRM_FORMAT_NV24, }, ++ /* semi-planar YUV 10-bit */ ++ { AV_PIX_FMT_P010, DRM_FORMAT_P010, }, ++ { AV_PIX_FMT_P210, DRM_FORMAT_P210, }, ++ { AV_PIX_FMT_NV15, DRM_FORMAT_NV15, }, ++ { AV_PIX_FMT_NV20, DRM_FORMAT_NV20, }, ++ /* packed YUV */ ++ { AV_PIX_FMT_YUYV422, DRM_FORMAT_YUYV, }, ++ { AV_PIX_FMT_YVYU422, DRM_FORMAT_YVYU, }, ++ { AV_PIX_FMT_UYVY422, DRM_FORMAT_UYVY, }, ++ /* packed RGB */ ++ { AV_PIX_FMT_RGB555LE, DRM_FORMAT_XRGB1555, }, ++ { AV_PIX_FMT_BGR555LE, DRM_FORMAT_XBGR1555, }, ++ { AV_PIX_FMT_RGB565LE, DRM_FORMAT_RGB565, }, ++ { AV_PIX_FMT_BGR565LE, DRM_FORMAT_BGR565, }, ++ { AV_PIX_FMT_RGB24, DRM_FORMAT_RGB888, }, ++ { AV_PIX_FMT_BGR24, DRM_FORMAT_BGR888, }, ++ { AV_PIX_FMT_RGBA, DRM_FORMAT_ABGR8888, }, ++ { AV_PIX_FMT_RGB0, DRM_FORMAT_XBGR8888, }, ++ { AV_PIX_FMT_BGRA, DRM_FORMAT_ARGB8888, }, ++ { AV_PIX_FMT_BGR0, DRM_FORMAT_XRGB8888, }, ++ { AV_PIX_FMT_ARGB, DRM_FORMAT_BGRA8888, }, ++ { AV_PIX_FMT_0RGB, DRM_FORMAT_BGRX8888, }, ++ { AV_PIX_FMT_ABGR, DRM_FORMAT_RGBA8888, }, ++ { AV_PIX_FMT_0BGR, DRM_FORMAT_RGBX8888, }, ++}; ++ ++static int rkmpp_device_create(AVHWDeviceContext *hwdev, const char *device, ++ AVDictionary *opts, int flags) ++{ ++ AVRKMPPDeviceContext *hwctx = hwdev->hwctx; ++ AVDictionaryEntry *opt_d = NULL; ++ ++ hwctx->flags = MPP_BUFFER_FLAGS_DMA32 | MPP_BUFFER_FLAGS_CACHABLE; ++ ++ opt_d = av_dict_get(opts, "dma32", NULL, 0); ++ if (opt_d && !strtol(opt_d->value, NULL, 10)) ++ hwctx->flags &= ~MPP_BUFFER_FLAGS_DMA32; ++ ++ opt_d = av_dict_get(opts, "cacheable", NULL, 0); ++ if (opt_d && !strtol(opt_d->value, NULL, 10)) ++ hwctx->flags &= ~MPP_BUFFER_FLAGS_CACHABLE; ++ ++ return 0; ++} ++ ++static int rkmpp_frames_get_constraints(AVHWDeviceContext *hwdev, ++ const void *hwconfig, ++ AVHWFramesConstraints *constraints) ++{ ++ int i; ++ ++ constraints->min_width = 16; ++ constraints->min_height = 16; ++ ++ constraints->valid_hw_formats = ++ av_malloc_array(2, sizeof(enum AVPixelFormat)); ++ if (!constraints->valid_hw_formats) ++ return AVERROR(ENOMEM); ++ constraints->valid_hw_formats[0] = AV_PIX_FMT_DRM_PRIME; ++ constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE; ++ ++ constraints->valid_sw_formats = ++ av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1, ++ sizeof(enum AVPixelFormat)); ++ if (!constraints->valid_sw_formats) ++ return AVERROR(ENOMEM); ++ for(i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) ++ constraints->valid_sw_formats[i] = supported_formats[i].pixfmt; ++ constraints->valid_sw_formats[i] = AV_PIX_FMT_NONE; ++ ++ return 0; ++} ++ ++static void rkmpp_free_drm_frame_descriptor(void *opaque, uint8_t *data) ++{ ++ ++ MppBuffer mpp_buf = opaque; ++ AVRKMPPDRMFrameDescriptor *desc = (AVRKMPPDRMFrameDescriptor *)data; ++ int ret; ++ ++ if (!desc) ++ return; ++ ++ if (mpp_buf) { ++ ret = mpp_buffer_put(mpp_buf); ++ if (ret != MPP_OK) ++ av_log(NULL, AV_LOG_WARNING, ++ "Failed to put MPP buffer: %d\n", ret); ++ } ++ ++ memset(desc, 0, sizeof(*desc)); ++ av_free(desc); ++} ++ ++static int rkmpp_get_aligned_linesize(enum AVPixelFormat pix_fmt, int width, int plane) ++{ ++ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(pix_fmt); ++ const int is_rgb = pixdesc->flags & AV_PIX_FMT_FLAG_RGB; ++ const int is_planar = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR; ++ const int is_packed_fmt = is_rgb || (!is_rgb && !is_planar); ++ int linesize; ++ ++ if (pix_fmt == AV_PIX_FMT_NV15 || ++ pix_fmt == AV_PIX_FMT_NV20) { ++ const int log2_chroma_w = plane == 1 ? 1 : 0; ++ const int width_align_256_odds = FFALIGN(width << log2_chroma_w, 256) | 256; ++ return FFALIGN(width_align_256_odds * 10 / 8, 64); ++ } ++ ++ linesize = av_image_get_linesize(pix_fmt, width, plane); ++ ++ if (is_packed_fmt) { ++ const int pixel_width = av_get_padded_bits_per_pixel(pixdesc) / 8; ++ linesize = FFALIGN(linesize / pixel_width, 8) * pixel_width; ++ } else ++ linesize = FFALIGN(linesize, 64); ++ ++ return linesize; ++} ++ ++static AVBufferRef *rkmpp_drm_pool_alloc(void *opaque, size_t size) ++{ ++ int ret; ++ AVHWFramesContext *hwfc = opaque; ++ AVRKMPPFramesContext *avfc = hwfc->hwctx; ++ AVRKMPPDeviceContext *hwctx = hwfc->device_ctx->hwctx; ++ AVRKMPPDRMFrameDescriptor *desc; ++ AVDRMLayerDescriptor *layer; ++ AVBufferRef *ref; ++ ++ int i; ++ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(hwfc->sw_format); ++ const int bits_pp = av_get_padded_bits_per_pixel(pixdesc); ++ const int aligned_w = FFALIGN(hwfc->width * 5 / 4, 64); ++ const int aligned_h = FFALIGN(hwfc->height * 5 / 4, 64); ++ ++ MppBuffer mpp_buf = NULL; ++ size_t mpp_buf_size = aligned_w * aligned_h * bits_pp / 8; ++ ++ if (hwfc->initial_pool_size > 0 && ++ avfc->nb_frames >= hwfc->initial_pool_size) ++ return NULL; ++ ++ desc = av_mallocz(sizeof(*desc)); ++ if (!desc) ++ return NULL; ++ ++ desc->drm_desc.nb_objects = 1; ++ desc->drm_desc.nb_layers = 1; ++ ++ ret = mpp_buffer_get(avfc->buf_group, &mpp_buf, mpp_buf_size); ++ if (ret != MPP_OK || !mpp_buf) { ++ av_log(hwctx, AV_LOG_ERROR, "Failed to get MPP buffer: %d\n", ret); ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ desc->buffers[0] = mpp_buf; ++ ++ desc->drm_desc.objects[0].fd = mpp_buffer_get_fd(mpp_buf); ++ desc->drm_desc.objects[0].size = mpp_buffer_get_size(mpp_buf); ++ ++ layer = &desc->drm_desc.layers[0]; ++ for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) { ++ if (supported_formats[i].pixfmt == hwfc->sw_format) { ++ layer->format = supported_formats[i].drm_format; ++ break; ++ } ++ } ++ layer->nb_planes = av_pix_fmt_count_planes(hwfc->sw_format); ++ layer->planes[0].object_index = 0; ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = ++ rkmpp_get_aligned_linesize(hwfc->sw_format, hwfc->width, 0); ++ ++ for (i = 1; i < layer->nb_planes; i++) { ++ layer->planes[i].object_index = 0; ++ layer->planes[i].offset = ++ layer->planes[i-1].offset + ++ layer->planes[i-1].pitch * (hwfc->height >> (i > 1 ? pixdesc->log2_chroma_h : 0)); ++ layer->planes[i].pitch = ++ rkmpp_get_aligned_linesize(hwfc->sw_format, hwfc->width, i); ++ } ++ ++ ref = av_buffer_create((uint8_t*)desc, sizeof(*desc), rkmpp_free_drm_frame_descriptor, ++ mpp_buf, 0); ++ if (!ref) { ++ av_log(hwfc, AV_LOG_ERROR, "Failed to create RKMPP buffer.\n"); ++ goto fail; ++ } ++ ++ if (hwfc->initial_pool_size > 0) { ++ av_assert0(avfc->nb_frames < hwfc->initial_pool_size); ++ memcpy(&avfc->frames[avfc->nb_frames], desc, sizeof(*desc)); ++ ++avfc->nb_frames; ++ } ++ ++ return ref; ++ ++fail: ++ rkmpp_free_drm_frame_descriptor(mpp_buf, (uint8_t *)desc); ++ return NULL; ++} ++ ++static int rkmpp_frames_init(AVHWFramesContext *hwfc) ++{ ++ AVRKMPPFramesContext *avfc = hwfc->hwctx; ++ AVRKMPPDeviceContext *hwctx = hwfc->device_ctx->hwctx; ++ int i, ret; ++ ++ if (hwfc->pool) ++ return 0; ++ ++ for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) ++ if (supported_formats[i].pixfmt == hwfc->sw_format) ++ break; ++ if (i >= FF_ARRAY_ELEMS(supported_formats)) { ++ av_log(hwfc, AV_LOG_ERROR, "Unsupported format: %s.\n", ++ av_get_pix_fmt_name(hwfc->sw_format)); ++ return AVERROR(EINVAL); ++ } ++ ++ avfc->nb_frames = 0; ++ avfc->frames = NULL; ++ if (hwfc->initial_pool_size > 0) { ++ avfc->frames = av_malloc(hwfc->initial_pool_size * ++ sizeof(*avfc->frames)); ++ if (!avfc->frames) ++ return AVERROR(ENOMEM); ++ } ++ ++ ret = mpp_buffer_group_get_internal(&avfc->buf_group, MPP_BUFFER_TYPE_DRM | hwctx->flags); ++ if (ret != MPP_OK) { ++ av_log(hwfc, AV_LOG_ERROR, "Failed to get MPP internal buffer group: %d\n", ret); ++ return AVERROR_EXTERNAL; ++ } ++ ++ hwfc->internal->pool_internal = ++ av_buffer_pool_init2(sizeof(AVRKMPPDRMFrameDescriptor), hwfc, ++ rkmpp_drm_pool_alloc, NULL); ++ if (!hwfc->internal->pool_internal) { ++ av_log(hwfc, AV_LOG_ERROR, "Failed to create RKMPP buffer pool.\n"); ++ return AVERROR(ENOMEM); ++ } ++ ++ return 0; ++} ++ ++static void rkmpp_frames_uninit(AVHWFramesContext *hwfc) ++{ ++ AVRKMPPFramesContext *avfc = hwfc->hwctx; ++ ++ av_freep(&avfc->frames); ++ ++ if (avfc->buf_group) { ++ mpp_buffer_group_put(avfc->buf_group); ++ avfc->buf_group = NULL; ++ } ++} ++ ++static int rkmpp_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame) ++{ ++ frame->buf[0] = av_buffer_pool_get(hwfc->pool); ++ if (!frame->buf[0]) ++ return AVERROR(ENOMEM); ++ ++ frame->data[0] = (uint8_t*)frame->buf[0]->data; ++ ++ frame->format = AV_PIX_FMT_DRM_PRIME; ++ frame->width = hwfc->width; ++ frame->height = hwfc->height; ++ ++ return 0; ++} ++ ++typedef struct RKMPPDRMMapping { ++ // Address and length of each mmap()ed region. ++ int nb_regions; ++ int sync_flags; ++ int object[AV_DRM_MAX_PLANES]; ++ void *address[AV_DRM_MAX_PLANES]; ++ size_t length[AV_DRM_MAX_PLANES]; ++ int unmap[AV_DRM_MAX_PLANES]; ++} RKMPPDRMMapping; ++ ++static void rkmpp_unmap_frame(AVHWFramesContext *hwfc, ++ HWMapDescriptor *hwmap) ++{ ++ AVRKMPPDeviceContext *hwctx = hwfc->device_ctx->hwctx; ++ RKMPPDRMMapping *map = hwmap->priv; ++ ++ for (int i = 0; i < map->nb_regions; i++) { ++#if HAVE_LINUX_DMA_BUF_H ++ struct dma_buf_sync sync = { .flags = DMA_BUF_SYNC_END | map->sync_flags }; ++ if (hwctx->flags & MPP_BUFFER_FLAGS_CACHABLE) ++ ioctl(map->object[i], DMA_BUF_IOCTL_SYNC, &sync); ++#endif ++ if (map->address[i] && map->unmap[i]) ++ munmap(map->address[i], map->length[i]); ++ } ++ ++ av_free(map); ++} ++ ++static int rkmpp_map_frame(AVHWFramesContext *hwfc, ++ AVFrame *dst, const AVFrame *src, int flags) ++{ ++ AVRKMPPDeviceContext *hwctx = hwfc->device_ctx->hwctx; ++ const AVRKMPPDRMFrameDescriptor *desc = (AVRKMPPDRMFrameDescriptor *)src->data[0]; ++#if HAVE_LINUX_DMA_BUF_H ++ struct dma_buf_sync sync_start = { 0 }; ++#endif ++ RKMPPDRMMapping *map; ++ int err, i, p, plane; ++ int mmap_prot; ++ void *addr; ++ ++ map = av_mallocz(sizeof(*map)); ++ if (!map) ++ return AVERROR(ENOMEM); ++ ++ mmap_prot = 0; ++ if (flags & AV_HWFRAME_MAP_READ) ++ mmap_prot |= PROT_READ; ++ if (flags & AV_HWFRAME_MAP_WRITE) ++ mmap_prot |= PROT_WRITE; ++ ++#if HAVE_LINUX_DMA_BUF_H ++ if (flags & AV_HWFRAME_MAP_READ) ++ map->sync_flags |= DMA_BUF_SYNC_READ; ++ if (flags & AV_HWFRAME_MAP_WRITE) ++ map->sync_flags |= DMA_BUF_SYNC_WRITE; ++ sync_start.flags = DMA_BUF_SYNC_START | map->sync_flags; ++#endif ++ ++ if (desc->drm_desc.objects[0].format_modifier != DRM_FORMAT_MOD_LINEAR) { ++ av_log(hwfc, AV_LOG_ERROR, "Transfer non-linear DRM_PRIME frame is not supported!\n"); ++ return AVERROR(ENOSYS); ++ } ++ ++ av_assert0(desc->drm_desc.nb_objects <= AV_DRM_MAX_PLANES); ++ for (i = 0; i < desc->drm_desc.nb_objects; i++) { ++ addr = NULL; ++ if (desc->buffers[i]) ++ addr = mpp_buffer_get_ptr(desc->buffers[i]); ++ ++ if (addr) { ++ map->unmap[i] = 0; ++ } else { ++ addr = mmap(NULL, desc->drm_desc.objects[i].size, mmap_prot, MAP_SHARED, ++ desc->drm_desc.objects[i].fd, 0); ++ if (addr == MAP_FAILED) { ++ err = AVERROR(errno); ++ av_log(hwfc, AV_LOG_ERROR, "Failed to map RKMPP object %d to " ++ "memory: %d.\n", desc->drm_desc.objects[i].fd, errno); ++ goto fail; ++ } ++ map->unmap[i] = 1; ++ } ++ ++ map->address[i] = addr; ++ map->length[i] = desc->drm_desc.objects[i].size; ++ map->object[i] = desc->drm_desc.objects[i].fd; ++ ++#if HAVE_LINUX_DMA_BUF_H ++ /* We're not checking for errors here because the kernel may not ++ * support the ioctl, in which case its okay to carry on */ ++ if (hwctx->flags & MPP_BUFFER_FLAGS_CACHABLE) ++ ioctl(desc->drm_desc.objects[i].fd, DMA_BUF_IOCTL_SYNC, &sync_start); ++#endif ++ } ++ map->nb_regions = i; ++ ++ plane = 0; ++ for (i = 0; i < desc->drm_desc.nb_layers; i++) { ++ const AVDRMLayerDescriptor *layer = &desc->drm_desc.layers[i]; ++ for (p = 0; p < layer->nb_planes; p++) { ++ dst->data[plane] = ++ (uint8_t*)map->address[layer->planes[p].object_index] + ++ layer->planes[p].offset; ++ dst->linesize[plane] = layer->planes[p].pitch; ++ ++plane; ++ } ++ } ++ av_assert0(plane <= AV_DRM_MAX_PLANES); ++ ++ dst->width = src->width; ++ dst->height = src->height; ++ ++ err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src, ++ &rkmpp_unmap_frame, map); ++ if (err < 0) ++ goto fail; ++ ++ return 0; ++ ++fail: ++ for (i = 0; i < desc->drm_desc.nb_objects; i++) { ++ if (map->address[i] && map->unmap[i]) ++ munmap(map->address[i], map->length[i]); ++ } ++ av_free(map); ++ return err; ++} ++ ++static int rkmpp_transfer_get_formats(AVHWFramesContext *ctx, ++ enum AVHWFrameTransferDirection dir, ++ enum AVPixelFormat **formats) ++{ ++ enum AVPixelFormat *pix_fmts; ++ ++ pix_fmts = av_malloc_array(2, sizeof(*pix_fmts)); ++ if (!pix_fmts) ++ return AVERROR(ENOMEM); ++ ++ pix_fmts[0] = ctx->sw_format; ++ pix_fmts[1] = AV_PIX_FMT_NONE; ++ ++ *formats = pix_fmts; ++ return 0; ++} ++ ++static int rkmpp_transfer_data_from(AVHWFramesContext *hwfc, ++ AVFrame *dst, const AVFrame *src) ++{ ++ AVFrame *map; ++ int err; ++ ++ if (dst->width > hwfc->width || dst->height > hwfc->height) ++ return AVERROR(EINVAL); ++ ++ map = av_frame_alloc(); ++ if (!map) ++ return AVERROR(ENOMEM); ++ map->format = dst->format; ++ ++ err = rkmpp_map_frame(hwfc, map, src, AV_HWFRAME_MAP_READ); ++ if (err) ++ goto fail; ++ ++ map->width = dst->width; ++ map->height = dst->height; ++ ++ err = av_frame_copy(dst, map); ++ if (err) ++ goto fail; ++ ++ err = 0; ++fail: ++ av_frame_free(&map); ++ return err; ++} ++ ++static int rkmpp_transfer_data_to(AVHWFramesContext *hwfc, ++ AVFrame *dst, const AVFrame *src) ++{ ++ AVFrame *map; ++ int err; ++ ++ if (src->width > hwfc->width || src->height > hwfc->height) ++ return AVERROR(EINVAL); ++ ++ map = av_frame_alloc(); ++ if (!map) ++ return AVERROR(ENOMEM); ++ map->format = src->format; ++ ++ err = rkmpp_map_frame(hwfc, map, dst, AV_HWFRAME_MAP_WRITE | ++ AV_HWFRAME_MAP_OVERWRITE); ++ if (err) ++ goto fail; ++ ++ map->width = src->width; ++ map->height = src->height; ++ ++ err = av_frame_copy(map, src); ++ if (err) ++ goto fail; ++ ++ err = 0; ++fail: ++ av_frame_free(&map); ++ return err; ++} ++ ++static int rkmpp_map_from(AVHWFramesContext *hwfc, AVFrame *dst, ++ const AVFrame *src, int flags) ++{ ++ int err; ++ ++ if (hwfc->sw_format != dst->format) ++ return AVERROR(ENOSYS); ++ ++ err = rkmpp_map_frame(hwfc, dst, src, flags); ++ if (err) ++ return err; ++ ++ err = av_frame_copy_props(dst, src); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++const HWContextType ff_hwcontext_type_rkmpp = { ++ .type = AV_HWDEVICE_TYPE_RKMPP, ++ .name = "RKMPP", ++ ++ .device_hwctx_size = sizeof(AVRKMPPDeviceContext), ++ .frames_hwctx_size = sizeof(AVRKMPPFramesContext), ++ ++ .device_create = &rkmpp_device_create, ++ ++ .frames_get_constraints = &rkmpp_frames_get_constraints, ++ ++ .frames_get_buffer = &rkmpp_get_buffer, ++ .frames_init = &rkmpp_frames_init, ++ .frames_uninit = &rkmpp_frames_uninit, ++ .transfer_get_formats = &rkmpp_transfer_get_formats, ++ .transfer_data_to = &rkmpp_transfer_data_to, ++ .transfer_data_from = &rkmpp_transfer_data_from, ++ .map_from = &rkmpp_map_from, ++ ++ .pix_fmts = (const enum AVPixelFormat[]) { ++ AV_PIX_FMT_DRM_PRIME, ++ AV_PIX_FMT_NONE ++ }, ++}; +Index: jellyfin-ffmpeg/libavutil/hwcontext_rkmpp.h +=================================================================== +--- /dev/null ++++ jellyfin-ffmpeg/libavutil/hwcontext_rkmpp.h +@@ -0,0 +1,110 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#ifndef AVUTIL_HWCONTEXT_RKMPP_H ++#define AVUTIL_HWCONTEXT_RKMPP_H ++ ++#include ++#include ++#include ++#include ++ ++#include "hwcontext_drm.h" ++ ++#ifndef DRM_FORMAT_P010 ++#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') ++#endif ++#ifndef DRM_FORMAT_P210 ++#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') ++#endif ++#ifndef DRM_FORMAT_NV15 ++#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') ++#endif ++#ifndef DRM_FORMAT_NV20 ++#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') ++#endif ++#ifndef DRM_FORMAT_YUV420_8BIT ++#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8') ++#endif ++#ifndef DRM_FORMAT_YUV420_10BIT ++#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0') ++#endif ++#ifndef DRM_FORMAT_Y210 ++#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') ++#endif ++ ++#ifndef DRM_FORMAT_MOD_VENDOR_ARM ++#define DRM_FORMAT_MOD_VENDOR_ARM 0x08 ++#endif ++#ifndef DRM_FORMAT_MOD_ARM_TYPE_AFBC ++#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00 ++#endif ++ ++#define drm_is_afbc(mod) \ ++ ((mod >> 52) == (DRM_FORMAT_MOD_ARM_TYPE_AFBC | \ ++ (DRM_FORMAT_MOD_VENDOR_ARM << 4))) ++ ++/** ++ * DRM Prime Frame descriptor for RKMPP HWDevice. ++ */ ++typedef struct AVRKMPPDRMFrameDescriptor { ++ /** ++ * Backwards compatibility with AVDRMFrameDescriptor. ++ */ ++ AVDRMFrameDescriptor drm_desc; ++ ++ /** ++ * References to MppBuffer instances which are used ++ * on each drm frame index. ++ */ ++ MppBuffer buffers[AV_DRM_MAX_PLANES]; ++} AVRKMPPDRMFrameDescriptor; ++ ++/** ++ * RKMPP-specific data associated with a frame pool. ++ * ++ * Allocated as AVHWFramesContext.hwctx. ++ */ ++typedef struct AVRKMPPFramesContext { ++ /** ++ * MPP buffer group. ++ */ ++ MppBufferGroup buf_group; ++ ++ /** ++ * The descriptors of all frames in the pool after creation. ++ * Only valid if AVHWFramesContext.initial_pool_size was positive. ++ * These are intended to be used as the buffer of RKMPP decoder. ++ */ ++ AVRKMPPDRMFrameDescriptor *frames; ++ int nb_frames; ++} AVRKMPPFramesContext; ++ ++/** ++ * RKMPP device details. ++ * ++ * Allocated as AVHWDeviceContext.hwctx ++ */ ++typedef struct AVRKMPPDeviceContext { ++ /** ++ * MPP buffer allocation flags. ++ */ ++ int flags; ++} AVRKMPPDeviceContext; ++ ++#endif /* AVUTIL_HWCONTEXT_RKMPP_H */ +Index: jellyfin-ffmpeg/libavutil/pixdesc.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/pixdesc.c ++++ jellyfin-ffmpeg/libavutil/pixdesc.c +@@ -2717,6 +2717,30 @@ static const AVPixFmtDescriptor av_pix_f + .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT | + AV_PIX_FMT_FLAG_ALPHA, + }, ++ [AV_PIX_FMT_NV15] = { ++ .name = "nv15", ++ .nb_components = 3, ++ .log2_chroma_w = 1, ++ .log2_chroma_h = 1, ++ .comp = { ++ { 0, 10, 0, 0, 10 }, /* Y */ ++ { 1, 20, 0, 0, 10 }, /* U */ ++ { 1, 20, 10, 0, 10 }, /* V */ ++ }, ++ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BITSTREAM, ++ }, ++ [AV_PIX_FMT_NV20] = { ++ .name = "nv20", ++ .nb_components = 3, ++ .log2_chroma_w = 1, ++ .log2_chroma_h = 0, ++ .comp = { ++ { 0, 10, 0, 0, 10 }, /* Y */ ++ { 1, 20, 0, 0, 10 }, /* U */ ++ { 1, 20, 10, 0, 10 }, /* V */ ++ }, ++ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BITSTREAM, ++ }, + }; + + static const char * const color_range_names[] = { +Index: jellyfin-ffmpeg/libavutil/pixfmt.h +=================================================================== +--- jellyfin-ffmpeg.orig/libavutil/pixfmt.h ++++ jellyfin-ffmpeg/libavutil/pixfmt.h +@@ -189,8 +189,8 @@ enum AVPixelFormat { + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) +- AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian +- AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian ++ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian, deprecated in favor of AV_PIX_FMT_NV20 ++ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian, deprecated in favor of AV_PIX_FMT_NV20 + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +@@ -420,6 +420,9 @@ enum AVPixelFormat { + AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian + AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian + ++ AV_PIX_FMT_NV15, ///< like P010, but has no zero padding bits, 15bpp, bitstream ++ AV_PIX_FMT_NV20, ///< like P210, but has no zero padding bits, 20bpp, bitstream ++ + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions + }; + +@@ -503,7 +506,6 @@ enum AVPixelFormat { + #define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + + #define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +-#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) + #define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) + #define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) + #define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE) +Index: jellyfin-ffmpeg/libswscale/input.c +=================================================================== +--- jellyfin-ffmpeg.orig/libswscale/input.c ++++ jellyfin-ffmpeg/libswscale/input.c +@@ -793,6 +793,39 @@ static void nv21ToUV_c(uint8_t *dstU, ui + nvXXtoUV_c(dstV, dstU, src1, width); + } + ++static av_always_inline void nv15_20ToYUV_c(uint16_t *dst, const uint8_t *src, ++ int dst_pos, int src_pos) ++{ ++ int shift = (src_pos << 1) & 7; ++ src_pos = (src_pos * 10) >> 3; ++ AV_WN16(dst + dst_pos, ++ ((AV_RL16(src + src_pos) >> shift) | ++ (AV_RL16(src + src_pos + 1) << (8 - shift))) & 0x3FF); ++} ++ ++static void nv15_20ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, ++ const uint8_t *unused2, int width, uint32_t *unused, void *opq) ++{ ++ int i; ++ const uint8_t *src = (const uint8_t *)_src; ++ uint16_t *dst = (uint16_t *)_dst; ++ for (i = 0; i < width; i++) ++ nv15_20ToYUV_c(dst, src, i, i); ++} ++ ++static void nv15_20ToUV_c(uint8_t *_dstU, uint8_t *_dstV, ++ const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, ++ int width, uint32_t *unused, void *opq) ++{ ++ int i; ++ const uint8_t *src1 = (const uint8_t *)_src1; ++ uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV; ++ for (i = 0; i < width; i++) { ++ nv15_20ToYUV_c(dstU, src1, i, 2 * i); ++ nv15_20ToYUV_c(dstV, src1, i, 2 * i + 1); ++ } ++} ++ + #define p01x_uv_wrapper(bits, shift) \ + static void p0 ## bits ## LEToUV_c(uint8_t *dstU, uint8_t *dstV, \ + const uint8_t *unused0, \ +@@ -1441,6 +1474,10 @@ av_cold void ff_sws_init_input_funcs(Sws + case AV_PIX_FMT_XV36LE: + c->chrToYV12 = read_xv36le_UV_c; + break; ++ case AV_PIX_FMT_NV15: ++ case AV_PIX_FMT_NV20: ++ c->chrToYV12 = nv15_20ToUV_c; ++ break; + case AV_PIX_FMT_P010LE: + case AV_PIX_FMT_P210LE: + case AV_PIX_FMT_P410LE: +@@ -1933,6 +1970,10 @@ av_cold void ff_sws_init_input_funcs(Sws + case AV_PIX_FMT_BGRA64LE: + c->lumToYV12 = bgr64LEToY_c; + break; ++ case AV_PIX_FMT_NV15: ++ case AV_PIX_FMT_NV20: ++ c->lumToYV12 = nv15_20ToY_c; ++ break; + case AV_PIX_FMT_P010LE: + case AV_PIX_FMT_P210LE: + case AV_PIX_FMT_P410LE: +Index: jellyfin-ffmpeg/libswscale/utils.c +=================================================================== +--- jellyfin-ffmpeg.orig/libswscale/utils.c ++++ jellyfin-ffmpeg/libswscale/utils.c +@@ -228,6 +228,8 @@ static const FormatEntry format_entries[ + [AV_PIX_FMT_XYZ12BE] = { 1, 1, 1 }, + [AV_PIX_FMT_XYZ12LE] = { 1, 1, 1 }, + [AV_PIX_FMT_AYUV64LE] = { 1, 1}, ++ [AV_PIX_FMT_NV15] = { 1, 0 }, ++ [AV_PIX_FMT_NV20] = { 1, 0 }, + [AV_PIX_FMT_P010LE] = { 1, 1 }, + [AV_PIX_FMT_P010BE] = { 1, 1 }, + [AV_PIX_FMT_P012LE] = { 1, 1 }, +Index: jellyfin-ffmpeg/tests/ref/fate/imgutils +=================================================================== +--- jellyfin-ffmpeg.orig/tests/ref/fate/imgutils ++++ jellyfin-ffmpeg/tests/ref/fate/imgutils +@@ -262,3 +262,5 @@ rgbf32be planes: 1, linesizes: 76 + rgbf32le planes: 1, linesizes: 768 0 0 0, plane_sizes: 36864 0 0 0, plane_offsets: 0 0 0, total_size: 36864 + rgbaf32be planes: 1, linesizes: 1024 0 0 0, plane_sizes: 49152 0 0 0, plane_offsets: 0 0 0, total_size: 49152 + rgbaf32le planes: 1, linesizes: 1024 0 0 0, plane_sizes: 49152 0 0 0, plane_offsets: 0 0 0, total_size: 49152 ++nv15 planes: 2, linesizes: 80 80 0 0, plane_sizes: 3840 1920 0 0, plane_offsets: 3840 0 0, total_size: 5760 ++nv20 planes: 2, linesizes: 80 80 0 0, plane_sizes: 3840 3840 0 0, plane_offsets: 3840 0 0, total_size: 7680 +Index: jellyfin-ffmpeg/tests/ref/fate/sws-pixdesc-query +=================================================================== +--- jellyfin-ffmpeg.orig/tests/ref/fate/sws-pixdesc-query ++++ jellyfin-ffmpeg/tests/ref/fate/sws-pixdesc-query +@@ -59,6 +59,8 @@ isNBPS: + gray14le + gray9be + gray9le ++ nv15 ++ nv20 + nv20be + nv20le + p010be +@@ -212,7 +214,9 @@ isYUV: + ayuv64be + ayuv64le + nv12 ++ nv15 + nv16 ++ nv20 + nv20be + nv20le + nv21 +@@ -324,7 +328,9 @@ isYUV: + + isPlanarYUV: + nv12 ++ nv15 + nv16 ++ nv20 + nv20be + nv20le + nv21 +@@ -417,7 +423,9 @@ isPlanarYUV: + + isSemiPlanarYUV: + nv12 ++ nv15 + nv16 ++ nv20 + nv20be + nv20le + nv21 +@@ -839,7 +847,9 @@ Planar: + gbrpf32be + gbrpf32le + nv12 ++ nv15 + nv16 ++ nv20 + nv20be + nv20le + nv21 diff --git a/debian/patches/0059-add-checks-for-arm-mali-in-opencl-tonemap.patch b/debian/patches/0059-add-checks-for-arm-mali-in-opencl-tonemap.patch new file mode 100644 index 0000000000..0d8c02d654 --- /dev/null +++ b/debian/patches/0059-add-checks-for-arm-mali-in-opencl-tonemap.patch @@ -0,0 +1,27 @@ +Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_opencl.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavfilter/vf_tonemap_opencl.c ++++ jellyfin-ffmpeg/libavfilter/vf_tonemap_opencl.c +@@ -333,6 +333,7 @@ static int tonemap_opencl_init(AVFilterC + cl_uint max_compute_units, device_vendor_id; + cl_int cle; + cl_mem_flags dovi_buf_flags = CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR; ++ char *device_vendor = NULL; + char *device_name = NULL; + char *device_exts = NULL; + int i, j, err; +@@ -407,6 +408,14 @@ static int tonemap_opencl_init(AVFilterC + } + av_free(device_name); + } ++ } else if (device_is_integrated == CL_TRUE) { ++ device_vendor = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_VENDOR); ++ device_name = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_NAME); ++ if (!strstr(device_vendor, "ARM") && ++ !strstr(device_name, "Mali")) ++ ctx->tradeoff = 0; ++ av_free(device_vendor); ++ av_free(device_name); + } else { + ctx->tradeoff = 0; + } diff --git a/debian/patches/0060-fix-libx265-encoded-fmp4-hls-playback-on-safari.patch b/debian/patches/0060-fix-libx265-encoded-fmp4-hls-playback-on-safari.patch new file mode 100644 index 0000000000..8a909d08a8 --- /dev/null +++ b/debian/patches/0060-fix-libx265-encoded-fmp4-hls-playback-on-safari.patch @@ -0,0 +1,13 @@ +Index: jellyfin-ffmpeg/libavformat/movenc.c +=================================================================== +--- jellyfin-ffmpeg.orig/libavformat/movenc.c ++++ jellyfin-ffmpeg/libavformat/movenc.c +@@ -2716,7 +2716,7 @@ static int mov_write_stbl_tag(AVFormatCo + track->par->codec_tag == MKTAG('r','t','p',' ')) && + track->has_keyframes && track->has_keyframes < track->entry) + mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE); +- if (track->par->codec_type == AVMEDIA_TYPE_VIDEO && track->has_disposable) ++ if (track->par->codec_type == AVMEDIA_TYPE_VIDEO && track->has_disposable && track->entry) + mov_write_sdtp_tag(pb, track); + if (track->mode == MODE_MOV && track->flags & MOV_TRACK_STPS) + mov_write_stss_tag(pb, track, MOV_PARTIAL_SYNC_SAMPLE); diff --git a/debian/patches/series b/debian/patches/series index bd991c9f2b..e2822c52ea 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -55,3 +55,6 @@ 0055-fix-libplacebo-filter-build-with-v6-api.patch 0056-sync-intel-d3d11va-textures-before-mapping-to-opencl.patch 0057-add-icon-for-windows-version-ffmpeg.patch +0058-add-full-hwa-pipeline-for-rockchip-rk3588-platform.patch +0059-add-checks-for-arm-mali-in-opencl-tonemap.patch +0060-fix-libx265-encoded-fmp4-hls-playback-on-safari.patch diff --git a/debian/rules b/debian/rules index 078737195b..6fa2427284 100755 --- a/debian/rules +++ b/debian/rules @@ -23,6 +23,7 @@ CONFIG := --prefix=${TARGET_DIR} \ --enable-gmp \ --enable-gnutls \ --enable-chromaprint \ + --enable-opencl \ --enable-libdrm \ --enable-libass \ --enable-libfreetype \ @@ -35,6 +36,7 @@ CONFIG := --prefix=${TARGET_DIR} \ --enable-libvorbis \ --enable-libopenmpt \ --enable-libdav1d \ + --enable-libsvtav1 \ --enable-libwebp \ --enable-libvpx \ --enable-libx264 \ @@ -43,23 +45,10 @@ CONFIG := --prefix=${TARGET_DIR} \ --enable-libzimg \ --enable-libfdk-aac \ -CONFIG_ARM_COMMON := --toolchain=hardened \ - --enable-cross-compile \ - -CONFIG_ARM := ${CONFIG_ARM_COMMON} \ - --arch=armhf \ - --cross-prefix=/usr/bin/arm-linux-gnueabihf- \ - -CONFIG_ARM64 := ${CONFIG_ARM_COMMON} \ - --arch=arm64 \ - --cross-prefix=/usr/bin/aarch64-linux-gnu- \ - CONFIG_x86 := --arch=amd64 \ - --enable-libsvtav1 \ --enable-libshaderc \ --enable-libplacebo \ --enable-vulkan \ - --enable-opencl \ --enable-vaapi \ --enable-amf \ --enable-libvpl \ @@ -70,6 +59,19 @@ CONFIG_x86 := --arch=amd64 \ --enable-nvdec \ --enable-nvenc \ +CONFIG_ARM_COMMON := --toolchain=hardened \ + --enable-cross-compile \ + --enable-rkmpp \ + --enable-rkrga \ + +CONFIG_ARM := --arch=armhf \ + --cross-prefix=/usr/bin/arm-linux-gnueabihf- \ + ${CONFIG_ARM_COMMON} \ + +CONFIG_ARM64 := --arch=arm64 \ + --cross-prefix=/usr/bin/aarch64-linux-gnu- \ + ${CONFIG_ARM_COMMON} \ + HOST_ARCH := $(shell arch) BUILD_ARCH := ${DEB_HOST_MULTIARCH} ifeq ($(BUILD_ARCH),x86_64-linux-gnu) diff --git a/docker-build-win64.sh b/docker-build-win64.sh index 7dfdf5647a..5ab3677ab0 100755 --- a/docker-build-win64.sh +++ b/docker-build-win64.sh @@ -159,7 +159,7 @@ popd popd # LZMA -git clone -b v5.4.5 --depth=1 https://github.com/tukaani-project/xz.git +git clone -b v5.4.6 --depth=1 https://github.com/tukaani-project/xz.git pushd xz ./autogen.sh --no-po4a --no-doxygen ./configure \ @@ -176,7 +176,7 @@ popd # FONTCONFIG mkdir fontconfig pushd fontconfig -fc_ver="2.14.2" +fc_ver="2.15.0" fc_link="https://www.freedesktop.org/software/fontconfig/release/fontconfig-${fc_ver}.tar.xz" wget ${fc_link} -O fc.tar.gz tar xaf fc.tar.gz @@ -456,7 +456,7 @@ popd popd # SVT-AV1 -git clone -b v1.7.0 --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git +git clone -b v1.8.0 --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git pushd SVT-AV1 mkdir build pushd build @@ -557,8 +557,8 @@ mv * ${FF_DEPS_PREFIX}/include/AMF popd # VPL -git clone -b v2023.3.1 --depth=1 https://github.com/oneapi-src/oneVPL.git -pushd oneVPL +git clone -b v2.10.1 --depth=1 https://github.com/intel/libvpl.git +pushd libvpl mkdir build && pushd build cmake \ -DCMAKE_TOOLCHAIN_FILE=${FF_CMAKE_TOOLCHAIN} \ diff --git a/docker-build.sh b/docker-build.sh index c2df8f6edb..642c573855 100755 --- a/docker-build.sh +++ b/docker-build.sh @@ -7,7 +7,7 @@ set -o xtrace DEBIAN_ADDR=http://deb.debian.org/debian/ UBUNTU_ARCHIVE_ADDR=http://archive.ubuntu.com/ubuntu/ -UBUNTU_PORTS_ADDR=http://ports.ubuntu.com/ +UBUNTU_PORTS_ADDR=http://ports.ubuntu.com/ubuntu-ports/ # Prepare common extra libs for amd64, armhf and arm64 prepare_extra_common() { @@ -119,33 +119,23 @@ prepare_extra_common() { echo "dav1d/libdav1d.so* /usr/lib/jellyfin-ffmpeg/lib" >> ${DPKG_INSTALL_LIST} popd - # FDK-AAC-STRIPPED - pushd ${SOURCE_DIR} - git clone -b stripped4 --depth=1 https://gitlab.freedesktop.org/wtaymans/fdk-aac-stripped.git - pushd fdk-aac-stripped - ./autogen.sh - ./configure \ - --disable-{static,silent-rules} \ - --prefix=${TARGET_DIR} CFLAGS="-O3 -DNDEBUG" CXXFLAGS="-O3 -DNDEBUG" ${CROSS_OPT} - make -j$(nproc) && make install && make install DESTDIR=${SOURCE_DIR}/fdk-aac-stripped - echo "fdk-aac-stripped${TARGET_DIR}/lib/libfdk-aac.so* usr/lib/jellyfin-ffmpeg/lib" >> ${DPKG_INSTALL_LIST} - popd - popd -} - -# Prepare extra headers, libs and drivers for x86_64-linux-gnu -prepare_extra_amd64() { # SVT-AV1 # nasm >= 2.14 pushd ${SOURCE_DIR} - git clone -b v1.7.0 --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git + git clone -b v1.8.0 --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git pushd SVT-AV1 mkdir build pushd build + if [ "${ARCH}" = "amd64" ]; then + svtav1_avx512="-DENABLE_AVX512=ON" + else + svtav1_avx512="-DENABLE_AVX512=OFF" + fi cmake \ + ${CMAKE_TOOLCHAIN_OPT} \ -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} \ -DCMAKE_BUILD_TYPE=Release \ - -DENABLE_AVX512=ON \ + $svtav1_avx512 \ -DBUILD_SHARED_LIBS=ON \ -DBUILD_{TESTING,APPS,DEC}=OFF \ .. @@ -154,6 +144,22 @@ prepare_extra_amd64() { popd popd + # FDK-AAC-STRIPPED + pushd ${SOURCE_DIR} + git clone -b stripped4 --depth=1 https://gitlab.freedesktop.org/wtaymans/fdk-aac-stripped.git + pushd fdk-aac-stripped + ./autogen.sh + ./configure \ + --disable-{static,silent-rules} \ + --prefix=${TARGET_DIR} CFLAGS="-O3 -DNDEBUG" CXXFLAGS="-O3 -DNDEBUG" ${CROSS_OPT} + make -j$(nproc) && make install && make install DESTDIR=${SOURCE_DIR}/fdk-aac-stripped + echo "fdk-aac-stripped${TARGET_DIR}/lib/libfdk-aac.so* usr/lib/jellyfin-ffmpeg/lib" >> ${DPKG_INSTALL_LIST} + popd + popd +} + +# Prepare extra headers, libs and drivers for x86_64-linux-gnu +prepare_extra_amd64() { # FFNVCODEC pushd ${SOURCE_DIR} git clone -b n12.0.16.0 --depth=1 https://github.com/FFmpeg/nv-codec-headers.git @@ -237,7 +243,7 @@ prepare_extra_amd64() { # GMMLIB pushd ${SOURCE_DIR} - git clone -b intel-gmmlib-22.3.12 --depth=1 https://github.com/intel/gmmlib.git + git clone -b intel-gmmlib-22.3.17 --depth=1 https://github.com/intel/gmmlib.git pushd gmmlib mkdir build && pushd build cmake -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} .. @@ -267,13 +273,13 @@ prepare_extra_amd64() { popd popd - # ONEVPL (dispatcher + header) + # LIBVPL (dispatcher + header) # Provides VPL header and dispatcher (libvpl.so.2) for FFmpeg # Both MSDK and VPL runtime can be loaded by VPL dispatcher pushd ${SOURCE_DIR} - git clone -b v2023.3.1 --depth=1 https://github.com/oneapi-src/oneVPL.git - pushd oneVPL - sed -i 's|ParseEnvSearchPaths(ONEVPL_PRIORITY_PATH_VAR, searchDirList)|searchDirList.push_back("/usr/lib/jellyfin-ffmpeg/lib")|g' dispatcher/vpl/mfx_dispatcher_vpl_loader.cpp + git clone -b v2.10.1 --depth=1 https://github.com/intel/libvpl.git + pushd libvpl + sed -i 's|ParseEnvSearchPaths(ONEVPL_PRIORITY_PATH_VAR, searchDirList)|searchDirList.push_back("/usr/lib/jellyfin-ffmpeg/lib")|g' libvpl/src/mfx_dispatcher_vpl_loader.cpp mkdir build && pushd build cmake -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} \ -DCMAKE_INSTALL_BINDIR=${TARGET_DIR}/bin \ @@ -294,7 +300,7 @@ prepare_extra_amd64() { # ONEVPL-INTEL-GPU (RT only) # Provides VPL runtime (libmfx-gen.so.1.2) for 11th Gen Tiger Lake and newer pushd ${SOURCE_DIR} - git clone -b intel-onevpl-23.4.0 --depth=1 https://github.com/oneapi-src/oneVPL-intel-gpu.git + git clone -b intel-onevpl-24.1.1 --depth=1 https://github.com/oneapi-src/oneVPL-intel-gpu.git pushd oneVPL-intel-gpu mkdir build && pushd build cmake -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} \ @@ -314,7 +320,7 @@ prepare_extra_amd64() { # Full Feature Build: ENABLE_KERNELS=ON(Default) ENABLE_NONFREE_KERNELS=ON(Default) # Free Kernel Build: ENABLE_KERNELS=ON ENABLE_NONFREE_KERNELS=OFF pushd ${SOURCE_DIR} - git clone -b intel-media-23.4.0 --depth=1 https://github.com/intel/media-driver.git + git clone -b intel-media-24.1.1 --depth=1 https://github.com/intel/media-driver.git pushd media-driver mkdir build && pushd build cmake -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} \ @@ -333,7 +339,7 @@ prepare_extra_amd64() { # Vulkan Headers pushd ${SOURCE_DIR} - git clone -b v1.3.270 --depth=1 https://github.com/KhronosGroup/Vulkan-Headers.git + git clone -b v1.3.276 --depth=1 https://github.com/KhronosGroup/Vulkan-Headers.git pushd Vulkan-Headers mkdir build && pushd build cmake \ @@ -346,7 +352,7 @@ prepare_extra_amd64() { # Vulkan ICD Loader pushd ${SOURCE_DIR} - git clone -b v1.3.270 --depth=1 https://github.com/KhronosGroup/Vulkan-Loader.git + git clone -b v1.3.276 --depth=1 https://github.com/KhronosGroup/Vulkan-Loader.git pushd Vulkan-Loader mkdir build && pushd build cmake \ @@ -465,6 +471,46 @@ prepare_extra_amd64() { popd } +# Prepare extra headers, libs and drivers for {arm,aarch64}-linux-gnu* +prepare_extra_arm() { + # RKMPP + pushd ${SOURCE_DIR} + git clone -b jellyfin-mpp --depth=1 https://github.com/nyanmisaka/mpp.git rkmpp + pushd rkmpp + mkdir rkmpp_build + pushd rkmpp_build + cmake \ + ${CMAKE_TOOLCHAIN_OPT} \ + -DCMAKE_INSTALL_PREFIX=${TARGET_DIR} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_TEST=OFF \ + .. + make -j$(nproc) && make install && make install DESTDIR=${SOURCE_DIR}/rkmpp + echo "rkmpp${TARGET_DIR}/lib/librockchip*.* usr/lib/jellyfin-ffmpeg/lib" >> ${DPKG_INSTALL_LIST} + popd + popd + popd + + # RKRGA + pushd ${SOURCE_DIR} + git clone -b jellyfin-rga --depth=1 https://github.com/nyanmisaka/rk-mirrors.git rkrga + meson setup rkrga rkrga_build \ + ${MESON_CROSS_OPT} \ + --prefix=${TARGET_DIR} \ + --libdir=lib \ + --buildtype=release \ + --default-library=shared \ + -Dcpp_args=-fpermissive \ + -Dlibdrm=false \ + -Dlibrga_demo=false + meson configure rkrga_build + ninja -C rkrga_build install + cp -a ${TARGET_DIR}/lib/librga.so* ${SOURCE_DIR}/rkrga + echo "rkrga/librga.so* usr/lib/jellyfin-ffmpeg/lib" >> ${DPKG_INSTALL_LIST} + popd +} + # Prepare the cross-toolchain prepare_crossbuild_env_armhf() { # Prepare the Ubuntu-specific cross-build requirements @@ -501,7 +547,7 @@ EOF # Install dependencies pushd cross-gcc-packages-amd64/cross-gcc-${GCC_VER}-armhf ln -fs /usr/share/zoneinfo/America/Toronto /etc/localtime - yes | apt-get install -y -o Dpkg::Options::="--force-overwrite" -o APT::Immediate-Configure=0 gcc-${GCC_VER}-source gcc-${GCC_VER}-arm-linux-gnueabihf g++-${GCC_VER}-arm-linux-gnueabihf libstdc++6-armhf-cross binutils-arm-linux-gnueabihf bison flex libtool gdb sharutils netbase libmpc-dev libmpfr-dev libgmp-dev systemtap-sdt-dev autogen expect chrpath zlib1g-dev zip libc6-dev:armhf linux-libc-dev:armhf libgcc1:armhf libcurl4-openssl-dev:armhf libfontconfig1-dev:armhf libfreetype6-dev:armhf libstdc++6:armhf + yes | apt-get install -y -o Dpkg::Options::="--force-overwrite" -o APT::Immediate-Configure=0 gcc-${GCC_VER}-source gcc-${GCC_VER}-arm-linux-gnueabihf g++-${GCC_VER}-arm-linux-gnueabihf libstdc++6-armhf-cross binutils-arm-linux-gnueabihf bison flex libtool gdb sharutils netbase libmpc-dev libmpfr-dev libgmp-dev systemtap-sdt-dev autogen expect chrpath zlib1g-dev zip libc6-dev:armhf linux-libc-dev:armhf libgcc1:armhf libfontconfig1-dev:armhf libfreetype6-dev:armhf libstdc++6:armhf popd } prepare_crossbuild_env_arm64() { @@ -539,7 +585,7 @@ EOF # Install dependencies pushd cross-gcc-packages-amd64/cross-gcc-${GCC_VER}-arm64 ln -fs /usr/share/zoneinfo/America/Toronto /etc/localtime - yes | apt-get install -y -o Dpkg::Options::="--force-overwrite" -o APT::Immediate-Configure=0 gcc-${GCC_VER}-source gcc-${GCC_VER}-aarch64-linux-gnu g++-${GCC_VER}-aarch64-linux-gnu libstdc++6-arm64-cross binutils-aarch64-linux-gnu bison flex libtool gdb sharutils netbase libmpc-dev libmpfr-dev libgmp-dev systemtap-sdt-dev autogen expect chrpath zlib1g-dev zip libc6-dev:arm64 linux-libc-dev:arm64 libgcc1:arm64 libcurl4-openssl-dev:arm64 libfontconfig1-dev:arm64 libfreetype6-dev:arm64 libstdc++6:arm64 + yes | apt-get install -y -o Dpkg::Options::="--force-overwrite" -o APT::Immediate-Configure=0 gcc-${GCC_VER}-source gcc-${GCC_VER}-aarch64-linux-gnu g++-${GCC_VER}-aarch64-linux-gnu libstdc++6-arm64-cross binutils-aarch64-linux-gnu bison flex libtool gdb sharutils netbase libmpc-dev libmpfr-dev libgmp-dev systemtap-sdt-dev autogen expect chrpath zlib1g-dev zip libc6-dev:arm64 linux-libc-dev:arm64 libgcc1:arm64 libfontconfig1-dev:arm64 libfreetype6-dev:arm64 libstdc++6:arm64 popd } @@ -559,6 +605,7 @@ case ${ARCH} in ln -s /usr/bin/arm-linux-gnueabihf-gcc-ar-${GCC_VER} /usr/bin/arm-linux-gnueabihf-gcc-ar ln -s /usr/bin/arm-linux-gnueabihf-g++-${GCC_VER} /usr/bin/arm-linux-gnueabihf-g++ prepare_extra_common + prepare_extra_arm CONFIG_SITE="/etc/dpkg-cross/cross-config.${ARCH}" DEP_ARCH_OPT="--host-arch armhf" BUILD_ARCH_OPT="-aarmhf" @@ -569,6 +616,7 @@ case ${ARCH} in ln -s /usr/bin/aarch64-linux-gnu-gcc-ar-${GCC_VER} /usr/bin/aarch64-linux-gnu-gcc-ar ln -s /usr/bin/aarch64-linux-gnu-g++-${GCC_VER} /usr/bin/aarch64-linux-gnu-g++ prepare_extra_common + prepare_extra_arm CONFIG_SITE="/etc/dpkg-cross/cross-config.${ARCH}" DEP_ARCH_OPT="--host-arch arm64" BUILD_ARCH_OPT="-aarm64"