sm-commit AT lists.ibiblio.org
Subject: Source Mage code commit list
List archive
[[SM-Commit] ] GIT changes to master grimoire by Pavel Vinogradov (f84038cfde96069b5d567ebc26db1acae64d7fe3)
- From: Pavel Vinogradov <scm AT sourcemage.org>
- To: sm-commit AT lists.ibiblio.org, sm-commit AT lists.sourcemage.org
- Subject: [[SM-Commit] ] GIT changes to master grimoire by Pavel Vinogradov (f84038cfde96069b5d567ebc26db1acae64d7fe3)
- Date: Thu, 27 Nov 2025 16:59:29 +0000
GIT changes to master grimoire by Pavel Vinogradov <public AT sourcemage.org>:
http/firefox/HISTORY |
3
http/firefox/patches-for-musl/0013-bgo-748849-RUST_TARGET_override.patch |
61
http/firefox/patches/0013-bgo-748849-RUST_TARGET_override.patch |
61
http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p1.patch
|11152 ++++++++++
http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p2.patch |
309
http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p3.patch |
248
http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p4.patch |
45
http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p5.patch |
129
http/firefox/patches/0023-bgo-966424-include-prenv.h-jumbo-build-fix.patch |
10
9 files changed, 11957 insertions(+), 61 deletions(-)
New commits:
commit f84038cfde96069b5d567ebc26db1acae64d7fe3
Author: Pavel Vinogradov <public AT sourcemage.org>
Commit: Pavel Vinogradov <public AT sourcemage.org>
http/firefox: updated patches for 145.0.x
diff --git a/http/firefox/HISTORY b/http/firefox/HISTORY
index c24eb97..7e4f1a0 100644
--- a/http/firefox/HISTORY
+++ b/http/firefox/HISTORY
@@ -1,3 +1,6 @@
+2025-11-27 Pavel Vinogradov <public AT sourcemage.org>
+ * patches/*, patches-for-musl/*: updated
+
2025-11-24 Pavel Vinogradov <public AT sourcemage.org>
* DETAILS: version 145.0.2
diff --git
a/http/firefox/patches-for-musl/0013-bgo-748849-RUST_TARGET_override.patch
b/http/firefox/patches-for-musl/0013-bgo-748849-RUST_TARGET_override.patch
new file mode 100644
index 0000000..f109538
--- /dev/null
+++ b/http/firefox/patches-for-musl/0013-bgo-748849-RUST_TARGET_override.patch
@@ -0,0 +1,61 @@
+From: Sam James <sam AT gentoo.org>
+Subject: Allow RUST_TARGET override for detected Rust triplet (fixes musl
build)
+
+Respect RUST_TARGET from the environment to override Mozilla's
+own triplet detection.
+
+If the variable is unset, the old logic is used.
+
+The issue is that Gentoo supports both, say:
+- x86_64-unknown-linux-musl, and
+- x86-64-gentoo-linux-musl
+
+Our Rust is built to understand
+-gentoo- as a vendor target, but
+our Rust only actually includes
+-unknown-.
+
+Mozilla's configure sees -gentoo-
+in CHOST and that Rust _understands_
+-gentoo- and tries to use it, without
+verifying that we actually have
+-gentoo- installed as a Rust target.
+
+This requires exporting RUST_TARGET
+in the ebuild, possibly like:
+````
+inherit rust-toolchain
+
+src_configure() {
+ [...]
+ export RUST_TARGET=$(rust_abi)
+ [...]
+}
+````
+
+Inspired by Alpine's patch at
+https://git.alpinelinux.org/aports/tree/community/firefox/fix-rust-target.patch?id=740922900cf8042be6751fcfcccdd3fc11bd77d3.
+
+Bug: https://bugs.gentoo.org/748849
+Bug: https://bugs.gentoo.org/779178
+Bug: https://bugs.gentoo.org/836226
+Thanks-to: Georgy Yakovlev <gyakovlev AT gentoo.org>
+--- a/build/moz.configure/rust.configure
++++ b/build/moz.configure/rust.configure
+@@ -275,6 +275,7 @@ def rust_supported_targets(rustc):
+ return data
+
+
++@imports('os')
+ def detect_rustc_target(
+ host_or_target, compiler_info, arm_target, rust_supported_targets
+ ):
+@@ -396,7 +397,7 @@ def detect_rustc_target(
+
+ return None
+
+- rustc_target = find_candidate(candidates)
++ rustc_target = os.environ.get('RUST_TARGET', find_candidate(candidates))
+
+ if rustc_target is None:
+ die("Don't know how to translate {} for
rustc".format(host_or_target.alias))
diff --git a/http/firefox/patches/0013-bgo-748849-RUST_TARGET_override.patch
b/http/firefox/patches/0013-bgo-748849-RUST_TARGET_override.patch
deleted file mode 100644
index f109538..0000000
--- a/http/firefox/patches/0013-bgo-748849-RUST_TARGET_override.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Sam James <sam AT gentoo.org>
-Subject: Allow RUST_TARGET override for detected Rust triplet (fixes musl
build)
-
-Respect RUST_TARGET from the environment to override Mozilla's
-own triplet detection.
-
-If the variable is unset, the old logic is used.
-
-The issue is that Gentoo supports both, say:
-- x86_64-unknown-linux-musl, and
-- x86-64-gentoo-linux-musl
-
-Our Rust is built to understand
--gentoo- as a vendor target, but
-our Rust only actually includes
--unknown-.
-
-Mozilla's configure sees -gentoo-
-in CHOST and that Rust _understands_
--gentoo- and tries to use it, without
-verifying that we actually have
--gentoo- installed as a Rust target.
-
-This requires exporting RUST_TARGET
-in the ebuild, possibly like:
-````
-inherit rust-toolchain
-
-src_configure() {
- [...]
- export RUST_TARGET=$(rust_abi)
- [...]
-}
-````
-
-Inspired by Alpine's patch at
-https://git.alpinelinux.org/aports/tree/community/firefox/fix-rust-target.patch?id=740922900cf8042be6751fcfcccdd3fc11bd77d3.
-
-Bug: https://bugs.gentoo.org/748849
-Bug: https://bugs.gentoo.org/779178
-Bug: https://bugs.gentoo.org/836226
-Thanks-to: Georgy Yakovlev <gyakovlev AT gentoo.org>
---- a/build/moz.configure/rust.configure
-+++ b/build/moz.configure/rust.configure
-@@ -275,6 +275,7 @@ def rust_supported_targets(rustc):
- return data
-
-
-+@imports('os')
- def detect_rustc_target(
- host_or_target, compiler_info, arm_target, rust_supported_targets
- ):
-@@ -396,7 +397,7 @@ def detect_rustc_target(
-
- return None
-
-- rustc_target = find_candidate(candidates)
-+ rustc_target = os.environ.get('RUST_TARGET', find_candidate(candidates))
-
- if rustc_target is None:
- die("Don't know how to translate {} for
rustc".format(host_or_target.alias))
diff --git
a/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p1.patch
b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p1.patch
new file mode 100644
index 0000000..57909c6
--- /dev/null
+++ b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p1.patch
@@ -0,0 +1,11152 @@
+
+# HG changeset patch
+# User Landry Breuil <landry AT openbsd.org>
+# Date 1763998692 0
+# Node ID 19e0df8033b24b543bd24343bea7cee0865c47f2
+# Parent 060a0a7be5c9c556c6ad83bf3484ebc2a9bdda20
+Bug 1962139 - Vendor ffmpeg 8.0 headers
r=media-playback-reviewers,frontend-codestyle-reviewers,mossop,alwu
+
+taken straight from ffmpeg, without any kind of formatting/licence changes
+
+Differential Revision: https://phabricator.services.mozilla.com/D272252
+
+
+diff --git a/.clang-format-ignore b/.clang-format-ignore
+--- a/.clang-format-ignore
++++ b/.clang-format-ignore
+@@ -77,16 +77,17 @@ dom/media/gmp/rlz/.*
+ dom/media/gmp/widevine-adapter/content_decryption_module.h
+ dom/media/gmp/widevine-adapter/content_decryption_module_export.h
+ dom/media/gmp/widevine-adapter/content_decryption_module_ext.h
+ dom/media/platforms/ffmpeg/ffmpeg57/.*
+ dom/media/platforms/ffmpeg/ffmpeg58/.*
+ dom/media/platforms/ffmpeg/ffmpeg59/.*
+ dom/media/platforms/ffmpeg/ffmpeg60/.*
+ dom/media/platforms/ffmpeg/ffmpeg61/.*
++dom/media/platforms/ffmpeg/ffmpeg62/.*
+ dom/media/platforms/ffmpeg/libav53/.*
+ dom/media/platforms/ffmpeg/libav54/.*
+ dom/media/platforms/ffmpeg/libav55/.*
+ dom/media/webrtc/transport/third_party/.*
+ dom/media/webspeech/recognition/endpointer.cc
+ dom/media/webspeech/recognition/endpointer.h
+ dom/media/webspeech/recognition/energy_endpointer.cc
+ dom/media/webspeech/recognition/energy_endpointer.h
+diff --git a/.prettierignore b/.prettierignore
+--- a/.prettierignore
++++ b/.prettierignore
+@@ -1383,16 +1383,17 @@ dom/media/gmp/rlz/
+ dom/media/gmp/widevine-adapter/content_decryption_module_export.h
+ dom/media/gmp/widevine-adapter/content_decryption_module_ext.h
+ dom/media/gmp/widevine-adapter/content_decryption_module.h
+ dom/media/platforms/ffmpeg/ffmpeg57/
+ dom/media/platforms/ffmpeg/ffmpeg58/
+ dom/media/platforms/ffmpeg/ffmpeg59/
+ dom/media/platforms/ffmpeg/ffmpeg60/
+ dom/media/platforms/ffmpeg/ffmpeg61/
++dom/media/platforms/ffmpeg/ffmpeg62/
+ dom/media/platforms/ffmpeg/libav53/
+ dom/media/platforms/ffmpeg/libav54/
+ dom/media/platforms/ffmpeg/libav55/
+ dom/media/webaudio/test/blink/
+ dom/media/webrtc/tests/mochitests/helpers_from_wpt/sdp.js
+ dom/media/webrtc/transport/third_party/
+ dom/media/webspeech/recognition/endpointer.cc
+ dom/media/webspeech/recognition/endpointer.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1
b/dom/media/platforms/ffmpeg/ffmpeg62/include/COPYING.LGPLv2.1
+copy from dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/COPYING.LGPLv2.1
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/avcodec.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/avcodec.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/avcodec.h
+@@ -0,0 +1,2947 @@
++/*
++ * copyright (c) 2001 Fabrice Bellard
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_AVCODEC_H
++#define AVCODEC_AVCODEC_H
++
++/**
++ * @file
++ * @ingroup libavc
++ * Libavcodec external API header
++ */
++
++#include "libavutil/samplefmt.h"
++#include "libavutil/attributes.h"
++#include "libavutil/avutil.h"
++#include "libavutil/buffer.h"
++#include "libavutil/channel_layout.h"
++#include "libavutil/dict.h"
++#include "libavutil/frame.h"
++#include "libavutil/log.h"
++#include "libavutil/pixfmt.h"
++#include "libavutil/rational.h"
++
++#include "codec.h"
++#include "codec_id.h"
++#include "defs.h"
++#include "packet.h"
++#include "version_major.h"
++#ifndef HAVE_AV_CONFIG_H
++/* When included as part of the ffmpeg build, only include the major version
++ * to avoid unnecessary rebuilds. When included externally, keep including
++ * the full version information. */
++#include "version.h"
++
++#include "codec_desc.h"
++#include "codec_par.h"
++#endif
++
++struct AVCodecParameters;
++
++/**
++ * @defgroup libavc libavcodec
++ * Encoding/Decoding Library
++ *
++ * @{
++ *
++ * @defgroup lavc_decoding Decoding
++ * @{
++ * @}
++ *
++ * @defgroup lavc_encoding Encoding
++ * @{
++ * @}
++ *
++ * @defgroup lavc_codec Codecs
++ * @{
++ * @defgroup lavc_codec_native Native Codecs
++ * @{
++ * @}
++ * @defgroup lavc_codec_wrappers External library wrappers
++ * @{
++ * @}
++ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
++ * @{
++ * @}
++ * @}
++ * @defgroup lavc_internal Internal
++ * @{
++ * @}
++ * @}
++ */
++
++/**
++ * @ingroup libavc
++ * @defgroup lavc_encdec send/receive encoding and decoding API overview
++ * @{
++ *
++ * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/
++ * avcodec_receive_packet() functions provide an encode/decode API, which
++ * decouples input and output.
++ *
++ * The API is very similar for encoding/decoding and audio/video, and works
as
++ * follows:
++ * - Set up and open the AVCodecContext as usual.
++ * - Send valid input:
++ * - For decoding, call avcodec_send_packet() to give the decoder raw
++ * compressed data in an AVPacket.
++ * - For encoding, call avcodec_send_frame() to give the encoder an
AVFrame
++ * containing uncompressed audio or video.
++ *
++ * In both cases, it is recommended that AVPackets and AVFrames are
++ * refcounted, or libavcodec might have to copy the input data.
(libavformat
++ * always returns refcounted AVPackets, and av_frame_get_buffer()
allocates
++ * refcounted AVFrames.)
++ * - Receive output in a loop. Periodically call one of the
avcodec_receive_*()
++ * functions and process their output:
++ * - For decoding, call avcodec_receive_frame(). On success, it will
return
++ * an AVFrame containing uncompressed audio or video data.
++ * - For encoding, call avcodec_receive_packet(). On success, it will
return
++ * an AVPacket with a compressed frame.
++ *
++ * Repeat this call until it returns AVERROR(EAGAIN) or an error. The
++ * AVERROR(EAGAIN) return value means that new input data is required to
++ * return new output. In this case, continue with sending input. For each
++ * input frame/packet, the codec will typically return 1 output
frame/packet,
++ * but it can also be 0 or more than 1.
++ *
++ * At the beginning of decoding or encoding, the codec might accept multiple
++ * input frames/packets without returning a frame, until its internal
buffers
++ * are filled. This situation is handled transparently if you follow the
steps
++ * outlined above.
++ *
++ * In theory, sending input can result in EAGAIN - this should happen only
if
++ * not all output was received. You can use this to structure alternative
decode
++ * or encode loops other than the one suggested above. For example, you
could
++ * try sending new input on each iteration, and try to receive output if
that
++ * returns EAGAIN.
++ *
++ * End of stream situations. These require "flushing" (aka draining) the
codec,
++ * as the codec might buffer multiple frames or packets internally for
++ * performance or out of necessity (consider B-frames).
++ * This is handled as follows:
++ * - Instead of valid input, send NULL to the avcodec_send_packet()
(decoding)
++ * or avcodec_send_frame() (encoding) functions. This will enter draining
++ * mode.
++ * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet()
++ * (encoding) in a loop until AVERROR_EOF is returned. The functions will
++ * not return AVERROR(EAGAIN), unless you forgot to enter draining mode.
++ * - Before decoding can be resumed again, the codec has to be reset with
++ * avcodec_flush_buffers().
++ *
++ * Using the API as outlined above is highly recommended. But it is also
++ * possible to call functions outside of this rigid schema. For example,
you can
++ * call avcodec_send_packet() repeatedly without calling
++ * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed
++ * until the codec's internal buffer has been filled up (which is typically
of
++ * size 1 per output frame, after initial input), and then reject input with
++ * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but
to
++ * read at least some output.
++ *
++ * Not all codecs will follow a rigid and predictable dataflow; the only
++ * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call
on
++ * one end implies that a receive/send call on the other end will succeed,
or
++ * at least will not fail with AVERROR(EAGAIN). In general, no codec will
++ * permit unlimited buffering of input or output.
++ *
++ * A codec is not allowed to return AVERROR(EAGAIN) for both sending and
receiving. This
++ * would be an invalid state, which could put the codec user into an endless
++ * loop. The API has no concept of time either: it cannot happen that
trying to
++ * do avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call
1 second
++ * later accepts the packet (with no other receive/flush API calls
involved).
++ * The API is a strict state machine, and the passage of time is not
supposed
++ * to influence it. Some timing-dependent behavior might still be deemed
++ * acceptable in certain cases. But it must never result in both
send/receive
++ * returning EAGAIN at the same time at any point. It must also absolutely
be
++ * avoided that the current state is "unstable" and can "flip-flop" between
++ * the send/receive APIs allowing progress. For example, it's not allowed
that
++ * the codec randomly decides that it actually wants to consume a packet now
++ * instead of returning a frame, after it just returned AVERROR(EAGAIN) on
an
++ * avcodec_send_packet() call.
++ * @}
++ */
++
++/**
++ * @defgroup lavc_core Core functions/structures.
++ * @ingroup libavc
++ *
++ * Basic definitions, functions for querying libavcodec capabilities,
++ * allocating core structures, etc.
++ * @{
++ */
++
++/**
++ * @ingroup lavc_encoding
++ */
++typedef struct RcOverride{
++ int start_frame;
++ int end_frame;
++ int qscale; // If this is 0 then quality_factor will be used instead.
++ float quality_factor;
++} RcOverride;
++
++/* encoding support
++ These flags can be passed in AVCodecContext.flags before initialization.
++ Note: Not everything is supported yet.
++*/
++
++/**
++ * Allow decoders to produce frames with data planes that are not aligned
++ * to CPU requirements (e.g. due to cropping).
++ */
++#define AV_CODEC_FLAG_UNALIGNED (1 << 0)
++/**
++ * Use fixed qscale.
++ */
++#define AV_CODEC_FLAG_QSCALE (1 << 1)
++/**
++ * 4 MV per MB allowed / advanced prediction for H.263.
++ */
++#define AV_CODEC_FLAG_4MV (1 << 2)
++/**
++ * Output even those frames that might be corrupted.
++ */
++#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
++/**
++ * Use qpel MC.
++ */
++#define AV_CODEC_FLAG_QPEL (1 << 4)
++/**
++ * Request the encoder to output reconstructed frames, i.e.\ frames that
would
++ * be produced by decoding the encoded bitstream. These frames may be
retrieved
++ * by calling avcodec_receive_frame() immediately after a successful call to
++ * avcodec_receive_packet().
++ *
++ * Should only be used with encoders flagged with the
++ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
++ *
++ * @note
++ * Each reconstructed frame returned by the encoder corresponds to the last
++ * encoded packet, i.e. the frames are returned in coded order rather than
++ * presentation order.
++ *
++ * @note
++ * Frame parameters (like pixel format or dimensions) do not have to match
the
++ * AVCodecContext values. Make sure to use the values from the returned
frame.
++ */
++#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
++/**
++ * @par decoding
++ * Request the decoder to propagate each packet's AVPacket.opaque and
++ * AVPacket.opaque_ref to its corresponding output AVFrame.
++ *
++ * @par encoding:
++ * Request the encoder to propagate each frame's AVFrame.opaque and
++ * AVFrame.opaque_ref values to its corresponding output AVPacket.
++ *
++ * @par
++ * May only be set on encoders that have the
++ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
++ *
++ * @note
++ * While in typical cases one input frame produces exactly one output packet
++ * (perhaps after a delay), in general the mapping of frames to packets is
++ * M-to-N, so
++ * - Any number of input frames may be associated with any given output
packet.
++ * This includes zero - e.g. some encoders may output packets that carry
only
++ * metadata about the whole stream.
++ * - A given input frame may be associated with any number of output
packets.
++ * Again this includes zero - e.g. some encoders may drop frames under
certain
++ * conditions.
++ * .
++ * This implies that when using this flag, the caller must NOT assume that
++ * - a given input frame's opaques will necessarily appear on some output
packet;
++ * - every output packet will have some non-NULL opaque value.
++ * .
++ * When an output packet contains multiple frames, the opaque values will be
++ * taken from the first of those.
++ *
++ * @note
++ * The converse holds for decoders, with frames and packets switched.
++ */
++#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
++/**
++ * Signal to the encoder that the values of AVFrame.duration are valid and
++ * should be used (typically for transferring them to output packets).
++ *
++ * If this flag is not set, frame durations are ignored.
++ */
++#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
++/**
++ * Use internal 2pass ratecontrol in first pass mode.
++ */
++#define AV_CODEC_FLAG_PASS1 (1 << 9)
++/**
++ * Use internal 2pass ratecontrol in second pass mode.
++ */
++#define AV_CODEC_FLAG_PASS2 (1 << 10)
++/**
++ * loop filter.
++ */
++#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
++/**
++ * Only decode/encode grayscale.
++ */
++#define AV_CODEC_FLAG_GRAY (1 << 13)
++/**
++ * error[?] variables will be set during encoding.
++ */
++#define AV_CODEC_FLAG_PSNR (1 << 15)
++/**
++ * Use interlaced DCT.
++ */
++#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
++/**
++ * Force low delay.
++ */
++#define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
++/**
++ * Place global headers in extradata instead of every keyframe.
++ */
++#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
++/**
++ * Use only bitexact stuff (except (I)DCT).
++ */
++#define AV_CODEC_FLAG_BITEXACT (1 << 23)
++/* Fx : Flag for H.263+ extra options */
++/**
++ * H.263 advanced intra coding / MPEG-4 AC prediction
++ */
++#define AV_CODEC_FLAG_AC_PRED (1 << 24)
++/**
++ * interlaced motion estimation
++ */
++#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
++#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
++
++/**
++ * Allow non spec compliant speedup tricks.
++ */
++#define AV_CODEC_FLAG2_FAST (1 << 0)
++/**
++ * Skip bitstream encoding.
++ */
++#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
++/**
++ * Place global headers at every keyframe instead of in extradata.
++ */
++#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
++
++/**
++ * Input bitstream might be truncated at a packet boundaries
++ * instead of only at frame boundaries.
++ */
++#define AV_CODEC_FLAG2_CHUNKS (1 << 15)
++/**
++ * Discard cropping information from SPS.
++ */
++#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
++
++/**
++ * Show all frames before the first keyframe
++ */
++#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22)
++/**
++ * Export motion vectors through frame side data
++ */
++#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28)
++/**
++ * Do not skip samples and export skip information as frame side data
++ */
++#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29)
++/**
++ * Do not reset ASS ReadOrder field on flush (subtitles decoding)
++ */
++#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30)
++/**
++ * Generate/parse ICC profiles on encode/decode, as appropriate for the
type of
++ * file. No effect on codecs which cannot contain embedded ICC profiles, or
++ * when compiled without support for lcms2.
++ */
++#define AV_CODEC_FLAG2_ICC_PROFILES (1U << 31)
++
++/* Exported side data.
++ These flags can be passed in AVCodecContext.export_side_data before
initialization.
++*/
++/**
++ * Export motion vectors through frame side data
++ */
++#define AV_CODEC_EXPORT_DATA_MVS (1 << 0)
++/**
++ * Export encoder Producer Reference Time through packet side data
++ */
++#define AV_CODEC_EXPORT_DATA_PRFT (1 << 1)
++/**
++ * Decoding only.
++ * Export the AVVideoEncParams structure through frame side data.
++ */
++#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS (1 << 2)
++/**
++ * Decoding only.
++ * Do not apply film grain, export it instead.
++ */
++#define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
++
++/**
++ * Decoding only.
++ * Do not apply picture enhancement layers, export them instead.
++ */
++#define AV_CODEC_EXPORT_DATA_ENHANCEMENTS (1 << 4)
++
++/**
++ * The decoder will keep a reference to the frame and may reuse it later.
++ */
++#define AV_GET_BUFFER_FLAG_REF (1 << 0)
++
++/**
++ * The encoder will keep a reference to the packet and may reuse it later.
++ */
++#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
++
++/**
++ * main external API structure.
++ * New fields can be added to the end with minor version bumps.
++ * Removal, reordering and changes to existing fields require a major
++ * version bump.
++ * You can use AVOptions (av_opt* / av_set/get*()) to access these fields
from user
++ * applications.
++ * The name string for AVOptions options matches the associated command line
++ * parameter name and can be found in libavcodec/options_table.h
++ * The AVOption/command line parameter names differ in some cases from the C
++ * structure field names for historic reasons or brevity.
++ * sizeof(AVCodecContext) must not be used outside libav*.
++ */
++typedef struct AVCodecContext {
++ /**
++ * information on struct for av_log
++ * - set by avcodec_alloc_context3
++ */
++ const AVClass *av_class;
++ int log_level_offset;
++
++ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
++ const struct AVCodec *codec;
++ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
++
++ /**
++ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) +
'A').
++ * This is used to work around some encoder bugs.
++ * A demuxer should set this to what is stored in the field used to
identify the codec.
++ * If there are multiple such fields in a container then the demuxer
should choose the one
++ * which maximizes the information about the used codec.
++ * If the codec tag field in a container is larger than 32 bits then
the demuxer should
++ * remap the longer ID to 32 bits with a table or other structure.
Alternatively a new
++ * extra_codec_tag + size could be added but for this a clear advantage
must be demonstrated
++ * first.
++ * - encoding: Set by user, if not then the default based on codec_id
will be used.
++ * - decoding: Set by user, will be converted to uppercase by
libavcodec during init.
++ */
++ unsigned int codec_tag;
++
++ void *priv_data;
++
++ /**
++ * Private context used for internal data.
++ *
++ * Unlike priv_data, this is not codec-specific. It is used in general
++ * libavcodec functions.
++ */
++ struct AVCodecInternal *internal;
++
++ /**
++ * Private data of the user, can be used to carry app specific stuff.
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ void *opaque;
++
++ /**
++ * the average bitrate
++ * - encoding: Set by user; unused for constant quantizer encoding.
++ * - decoding: Set by user, may be overwritten by libavcodec
++ * if this info is available in the stream
++ */
++ int64_t bit_rate;
++
++ /**
++ * AV_CODEC_FLAG_*.
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int flags;
++
++ /**
++ * AV_CODEC_FLAG2_*
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int flags2;
++
++ /**
++ * Out-of-band global headers that may be used by some codecs.
++ *
++ * - decoding: Should be set by the caller when available (typically
from a
++ * demuxer) before opening the decoder; some decoders require this to
be
++ * set and will fail to initialize otherwise.
++ *
++ * The array must be allocated with the av_malloc() family of
functions;
++ * allocated size must be at least AV_INPUT_BUFFER_PADDING_SIZE bytes
++ * larger than extradata_size.
++ *
++ * - encoding: May be set by the encoder in avcodec_open2() (possibly
++ * depending on whether the AV_CODEC_FLAG_GLOBAL_HEADER flag is set).
++ *
++ * After being set, the array is owned by the codec and freed in
++ * avcodec_free_context().
++ */
++ uint8_t *extradata;
++ int extradata_size;
++
++ /**
++ * This is the fundamental unit of time (in seconds) in terms
++ * of which frame timestamps are represented. For fixed-fps content,
++ * timebase should be 1/framerate and timestamp increments should be
++ * identically 1.
++ * This often, but not always is the inverse of the frame rate or field
rate
++ * for video. 1/time_base is not the average frame rate if the frame
rate is not
++ * constant.
++ *
++ * Like containers, elementary streams also can store timestamps,
1/time_base
++ * is the unit in which these timestamps are specified.
++ * As example of such codec time base see ISO/IEC 14496-2:2001(E)
++ * vop_time_increment_resolution and fixed_vop_rate
++ * (fixed_vop_rate == 0 implies that it is different from the framerate)
++ *
++ * - encoding: MUST be set by user.
++ * - decoding: unused.
++ */
++ AVRational time_base;
++
++ /**
++ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
++ * - encoding: unused.
++ * - decoding: set by user.
++ */
++ AVRational pkt_timebase;
++
++ /**
++ * - decoding: For codecs that store a framerate value in the compressed
++ * bitstream, the decoder may export it here. { 0, 1} when
++ * unknown.
++ * - encoding: May be used to signal the framerate of CFR content to an
++ * encoder.
++ */
++ AVRational framerate;
++
++ /**
++ * Codec delay.
++ *
++ * Encoding: Number of frames delay there will be from the encoder
input to
++ * the decoder output. (we assume the decoder matches the
spec)
++ * Decoding: Number of frames delay in addition to what a standard
decoder
++ * as specified in the spec would produce.
++ *
++ * Video:
++ * Number of frames the decoded output will be delayed relative to the
++ * encoded input.
++ *
++ * Audio:
++ * For encoding, this field is unused (see initial_padding).
++ *
++ * For decoding, this is the number of samples the decoder needs to
++ * output before the decoder's output is valid. When seeking, you
should
++ * start decoding this many samples prior to your desired seek point.
++ *
++ * - encoding: Set by libavcodec.
++ * - decoding: Set by libavcodec.
++ */
++ int delay;
++
++
++ /* video only */
++ /**
++ * picture width / height.
++ *
++ * @note Those fields may not match the values of the last
++ * AVFrame output by avcodec_receive_frame() due frame
++ * reordering.
++ *
++ * - encoding: MUST be set by user.
++ * - decoding: May be set by the user before opening the decoder if
known e.g.
++ * from the container. Some decoders will require the
dimensions
++ * to be set by the caller. During decoding, the decoder may
++ * overwrite those values as required while parsing the
data.
++ */
++ int width, height;
++
++ /**
++ * Bitstream width / height, may be different from width/height e.g.
when
++ * the decoded frame is cropped before being output or lowres is
enabled.
++ *
++ * @note Those field may not match the value of the last
++ * AVFrame output by avcodec_receive_frame() due frame
++ * reordering.
++ *
++ * - encoding: unused
++ * - decoding: May be set by the user before opening the decoder if
known
++ * e.g. from the container. During decoding, the decoder may
++ * overwrite those values as required while parsing the
data.
++ */
++ int coded_width, coded_height;
++
++ /**
++ * sample aspect ratio (0 if unknown)
++ * That is the width of a pixel divided by the height of the pixel.
++ * Numerator and denominator must be relatively prime and smaller than
256 for some video standards.
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ AVRational sample_aspect_ratio;
++
++ /**
++ * Pixel format, see AV_PIX_FMT_xxx.
++ * May be set by the demuxer if known from headers.
++ * May be overridden by the decoder if it knows better.
++ *
++ * @note This field may not match the value of the last
++ * AVFrame output by avcodec_receive_frame() due frame
++ * reordering.
++ *
++ * - encoding: Set by user.
++ * - decoding: Set by user if known, overridden by libavcodec while
++ * parsing the data.
++ */
++ enum AVPixelFormat pix_fmt;
++
++ /**
++ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
++ * - encoding: unused.
++ * - decoding: Set by libavcodec before calling get_format()
++ */
++ enum AVPixelFormat sw_pix_fmt;
++
++ /**
++ * Chromaticity coordinates of the source primaries.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVColorPrimaries color_primaries;
++
++ /**
++ * Color Transfer Characteristic.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVColorTransferCharacteristic color_trc;
++
++ /**
++ * YUV colorspace type.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVColorSpace colorspace;
++
++ /**
++ * MPEG vs JPEG YUV range.
++ * - encoding: Set by user to override the default output color range
value,
++ * If not specified, libavcodec sets the color range depending on the
++ * output format.
++ * - decoding: Set by libavcodec, can be set by the user to propagate
the
++ * color range to components reading from the decoder context.
++ */
++ enum AVColorRange color_range;
++
++ /**
++ * This defines the location of chroma samples.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVChromaLocation chroma_sample_location;
++
++ /** Field order
++ * - encoding: set by libavcodec
++ * - decoding: Set by user.
++ */
++ enum AVFieldOrder field_order;
++
++ /**
++ * number of reference frames
++ * - encoding: Set by user.
++ * - decoding: Set by lavc.
++ */
++ int refs;
++
++ /**
++ * Size of the frame reordering buffer in the decoder.
++ * For MPEG-2 it is 1 IPB or 0 low delay IP.
++ * - encoding: Set by libavcodec.
++ * - decoding: Set by libavcodec.
++ */
++ int has_b_frames;
++
++ /**
++ * slice flags
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ int slice_flags;
++#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called
in coded order instead of display
++#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with
field slices (MPEG-2 field pics)
++#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with
1 component at a time (SVQ1)
++
++ /**
++ * If non NULL, 'draw_horiz_band' is called by the libavcodec
++ * decoder to draw a horizontal band. It improves cache usage. Not
++ * all codecs can do that. You must check the codec capabilities
++ * beforehand.
++ * When multithreading is used, it may be called from multiple threads
++ * at the same time; threads might draw different parts of the same
AVFrame,
++ * or multiple AVFrames, and there is no guarantee that slices will be
drawn
++ * in order.
++ * The function is also used by hardware acceleration APIs.
++ * It is called at least once during frame decoding to pass
++ * the data needed for hardware render.
++ * In that mode instead of pixel data, AVFrame points to
++ * a structure specific to the acceleration API. The application
++ * reads the structure and can change some fields to indicate progress
++ * or mark state.
++ * - encoding: unused
++ * - decoding: Set by user.
++ * @param height the height of the slice
++ * @param y the y position of the slice
++ * @param type 1->top field, 2->bottom field, 3->frame
++ * @param offset offset into the AVFrame.data from which the slice
should be read
++ */
++ void (*draw_horiz_band)(struct AVCodecContext *s,
++ const AVFrame *src, int
offset[AV_NUM_DATA_POINTERS],
++ int y, int type, int height);
++
++ /**
++ * Callback to negotiate the pixel format. Decoding only, may be set by
the
++ * caller before avcodec_open2().
++ *
++ * Called by some decoders to select the pixel format that will be used
for
++ * the output frames. This is mainly used to set up hardware
acceleration,
++ * then the provided format list contains the corresponding hwaccel
pixel
++ * formats alongside the "software" one. The software pixel format may
also
++ * be retrieved from \ref sw_pix_fmt.
++ *
++ * This callback will be called when the coded frame properties (such as
++ * resolution, pixel format, etc.) change and more than one output
format is
++ * supported for those new properties. If a hardware pixel format is
chosen
++ * and initialization for it fails, the callback may be called again
++ * immediately.
++ *
++ * This callback may be called from different threads if the decoder is
++ * multi-threaded, but not from more than one thread simultaneously.
++ *
++ * @param fmt list of formats which may be used in the current
++ * configuration, terminated by AV_PIX_FMT_NONE.
++ * @warning Behavior is undefined if the callback returns a value other
++ * than one of the formats in fmt or AV_PIX_FMT_NONE.
++ * @return the chosen format or AV_PIX_FMT_NONE
++ */
++ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum
AVPixelFormat * fmt);
++
++ /**
++ * maximum number of B-frames between non-B-frames
++ * Note: The output will be delayed by max_b_frames+1 relative to the
input.
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int max_b_frames;
++
++ /**
++ * qscale factor between IP and B-frames
++ * If > 0 then the last P-frame quantizer will be used (q=
lastp_q*factor+offset).
++ * If < 0 then normal ratecontrol will be done (q=
-normal_q*factor+offset).
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float b_quant_factor;
++
++ /**
++ * qscale offset between IP and B-frames
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float b_quant_offset;
++
++ /**
++ * qscale factor between P- and I-frames
++ * If > 0 then the last P-frame quantizer will be used (q = lastp_q *
factor + offset).
++ * If < 0 then normal ratecontrol will be done (q=
-normal_q*factor+offset).
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float i_quant_factor;
++
++ /**
++ * qscale offset between P and I-frames
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float i_quant_offset;
++
++ /**
++ * luminance masking (0-> disabled)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float lumi_masking;
++
++ /**
++ * temporary complexity masking (0-> disabled)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float temporal_cplx_masking;
++
++ /**
++ * spatial complexity masking (0-> disabled)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float spatial_cplx_masking;
++
++ /**
++ * p block masking (0-> disabled)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float p_masking;
++
++ /**
++ * darkness masking (0-> disabled)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ float dark_masking;
++
++ /**
++ * noise vs. sse weight for the nsse comparison function
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int nsse_weight;
++
++ /**
++ * motion estimation comparison function
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int me_cmp;
++ /**
++ * subpixel motion estimation comparison function
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int me_sub_cmp;
++ /**
++ * macroblock comparison function (not supported yet)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int mb_cmp;
++ /**
++ * interlaced DCT comparison function
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int ildct_cmp;
++#define FF_CMP_SAD 0
++#define FF_CMP_SSE 1
++#define FF_CMP_SATD 2
++#define FF_CMP_DCT 3
++#define FF_CMP_PSNR 4
++#define FF_CMP_BIT 5
++#define FF_CMP_RD 6
++#define FF_CMP_ZERO 7
++#define FF_CMP_VSAD 8
++#define FF_CMP_VSSE 9
++#define FF_CMP_NSSE 10
++#define FF_CMP_W53 11
++#define FF_CMP_W97 12
++#define FF_CMP_DCTMAX 13
++#define FF_CMP_DCT264 14
++#define FF_CMP_MEDIAN_SAD 15
++#define FF_CMP_CHROMA 256
++
++ /**
++ * ME diamond size & shape
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int dia_size;
++
++ /**
++ * amount of previous MV predictors (2a+1 x 2a+1 square)
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int last_predictor_count;
++
++ /**
++ * motion estimation prepass comparison function
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int me_pre_cmp;
++
++ /**
++ * ME prepass diamond size & shape
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int pre_dia_size;
++
++ /**
++ * subpel ME quality
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int me_subpel_quality;
++
++ /**
++ * maximum motion estimation search range in subpel units
++ * If 0 then no limit.
++ *
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int me_range;
++
++ /**
++ * macroblock decision mode
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int mb_decision;
++#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
++#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the
fewest bits
++#define FF_MB_DECISION_RD 2 ///< rate distortion
++
++ /**
++ * custom intra quantization matrix
++ * Must be allocated with the av_malloc() family of functions, and will
be freed in
++ * avcodec_free_context().
++ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
++ * - decoding: Set/allocated/freed by libavcodec.
++ */
++ uint16_t *intra_matrix;
++
++ /**
++ * custom inter quantization matrix
++ * Must be allocated with the av_malloc() family of functions, and will
be freed in
++ * avcodec_free_context().
++ * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL.
++ * - decoding: Set/allocated/freed by libavcodec.
++ */
++ uint16_t *inter_matrix;
++
++ /**
++ * custom intra quantization matrix
++ * - encoding: Set by user, can be NULL.
++ * - decoding: unused.
++ */
++ uint16_t *chroma_intra_matrix;
++
++ /**
++ * precision of the intra DC coefficient - 8
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec
++ */
++ int intra_dc_precision;
++
++ /**
++ * minimum MB Lagrange multiplier
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int mb_lmin;
++
++ /**
++ * maximum MB Lagrange multiplier
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int mb_lmax;
++
++ /**
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int bidir_refine;
++
++ /**
++ * minimum GOP size
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int keyint_min;
++
++ /**
++ * the number of pictures in a group of pictures, or 0 for intra_only
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int gop_size;
++
++ /**
++ * Note: Value depends upon the compare function used for fullpel ME.
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int mv0_threshold;
++
++ /**
++ * Number of slices.
++ * Indicates number of picture subdivisions. Used for parallelized
++ * decoding.
++ * - encoding: Set by user
++ * - decoding: unused
++ */
++ int slices;
++
++ /* audio only */
++ int sample_rate; ///< samples per second
++
++ /**
++ * audio sample format
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ enum AVSampleFormat sample_fmt; ///< sample format
++
++ /**
++ * Audio channel layout.
++ * - encoding: must be set by the caller, to one of AVCodec.ch_layouts.
++ * - decoding: may be set by the caller if known e.g. from the
container.
++ * The decoder can then override during decoding as needed.
++ */
++ AVChannelLayout ch_layout;
++
++ /* The following data should not be initialized. */
++ /**
++ * Number of samples per channel in an audio frame.
++ *
++ * - encoding: set by libavcodec in avcodec_open2(). Each submitted
frame
++ * except the last must contain exactly frame_size samples per
channel.
++ * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set,
then the
++ * frame size is not restricted.
++ * - decoding: may be set by some decoders to indicate constant frame
size
++ */
++ int frame_size;
++
++ /**
++ * number of bytes per packet if constant and known or 0
++ * Used by some WAV based audio codecs.
++ */
++ int block_align;
++
++ /**
++ * Audio cutoff bandwidth (0 means "automatic")
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int cutoff;
++
++ /**
++ * Type of service that the audio stream conveys.
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ enum AVAudioServiceType audio_service_type;
++
++ /**
++ * desired sample format
++ * - encoding: Not used.
++ * - decoding: Set by user.
++ * Decoder will decode to this format if it can.
++ */
++ enum AVSampleFormat request_sample_fmt;
++
++ /**
++ * Audio only. The number of "priming" samples (padding) inserted by the
++ * encoder at the beginning of the audio. I.e. this number of leading
++ * decoded samples must be discarded by the caller to get the original
audio
++ * without leading padding.
++ *
++ * - decoding: unused
++ * - encoding: Set by libavcodec. The timestamps on the output packets
are
++ * adjusted by the encoder so that they always refer to the
++ * first sample of the data actually contained in the
packet,
++ * including any added padding. E.g. if the timebase is
++ * 1/samplerate and the timestamp of the first input sample
is
++ * 0, the timestamp of the first output packet will be
++ * -initial_padding.
++ */
++ int initial_padding;
++
++ /**
++ * Audio only. The amount of padding (in samples) appended by the
encoder to
++ * the end of the audio. I.e. this number of decoded samples must be
++ * discarded by the caller from the end of the stream to get the
original
++ * audio without any trailing padding.
++ *
++ * - decoding: unused
++ * - encoding: unused
++ */
++ int trailing_padding;
++
++ /**
++ * Number of samples to skip after a discontinuity
++ * - decoding: unused
++ * - encoding: set by libavcodec
++ */
++ int seek_preroll;
++
++ /**
++ * This callback is called at the beginning of each frame to get data
++ * buffer(s) for it. There may be one contiguous buffer for all the
data or
++ * there may be a buffer per each data plane or anything in between.
What
++ * this means is, you may set however many entries in buf[] you feel
necessary.
++ * Each buffer must be reference-counted using the AVBuffer API (see
description
++ * of buf[] below).
++ *
++ * The following fields will be set in the frame before this callback is
++ * called:
++ * - format
++ * - width, height (video only)
++ * - sample_rate, channel_layout, nb_samples (audio only)
++ * Their values may differ from the corresponding values in
++ * AVCodecContext. This callback must use the frame values, not the
codec
++ * context values, to calculate the required buffer size.
++ *
++ * This callback must fill the following fields in the frame:
++ * - data[]
++ * - linesize[]
++ * - extended_data:
++ * * if the data is planar audio with more than 8 channels, then this
++ * callback must allocate and fill extended_data to contain all
pointers
++ * to all data planes. data[] must hold as many pointers as it can.
++ * extended_data must be allocated with av_malloc() and will be
freed in
++ * av_frame_unref().
++ * * otherwise extended_data must point to data
++ * - buf[] must contain one or more pointers to AVBufferRef structures.
Each of
++ * the frame's data and extended_data pointers must be contained in
these. That
++ * is, one AVBufferRef for each allocated chunk of memory, not
necessarily one
++ * AVBufferRef per data[] entry. See: av_buffer_create(),
av_buffer_alloc(),
++ * and av_buffer_ref().
++ * - extended_buf and nb_extended_buf must be allocated with
av_malloc() by
++ * this callback and filled with the extra buffers if there are more
++ * buffers than buf[] can hold. extended_buf will be freed in
++ * av_frame_unref().
++ * Decoders will generally initialize the whole buffer before it is
output
++ * but it can in rare error conditions happen that uninitialized data
is passed
++ * through. \important The buffers returned by get_buffer* should
thus not contain sensitive
++ * data.
++ *
++ * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
++ * avcodec_default_get_buffer2() instead of providing buffers allocated
by
++ * some other means.
++ *
++ * Each data plane must be aligned to the maximum required by the target
++ * CPU.
++ *
++ * @see avcodec_default_get_buffer2()
++ *
++ * Video:
++ *
++ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be
reused
++ * (read and/or written to if it is writable) later by libavcodec.
++ *
++ * avcodec_align_dimensions2() should be used to find the required
width and
++ * height, as they normally need to be rounded up to the next multiple
of 16.
++ *
++ * Some decoders do not support linesizes changing between frames.
++ *
++ * If frame multithreading is used, this callback may be called from a
++ * different thread, but not from more than one at once. Does not need
to be
++ * reentrant.
++ *
++ * @see avcodec_align_dimensions2()
++ *
++ * Audio:
++ *
++ * Decoders request a buffer of a particular size by setting
++ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
++ * however, utilize only part of the buffer by setting
AVFrame.nb_samples
++ * to a smaller value in the output frame.
++ *
++ * As a convenience, av_samples_get_buffer_size() and
++ * av_samples_fill_arrays() in libavutil may be used by custom
get_buffer2()
++ * functions to find the required data size and to fill data pointers
and
++ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
++ * since all planes must be the same size.
++ *
++ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
++ *
++ * - encoding: unused
++ * - decoding: Set by libavcodec, user can override.
++ */
++ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
++
++ /* - encoding parameters */
++ /**
++ * number of bits the bitstream is allowed to diverge from the
reference.
++ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
++ * - encoding: Set by user; unused for constant quantizer encoding.
++ * - decoding: unused
++ */
++ int bit_rate_tolerance;
++
++ /**
++ * Global quality for codecs which cannot change it per frame.
++ * This should be proportional to MPEG-1/2/4 qscale.
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int global_quality;
++
++ /**
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int compression_level;
++#define FF_COMPRESSION_DEFAULT -1
++
++ float qcompress; ///< amount of qscale change between easy & hard
scenes (0.0-1.0)
++ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
++
++ /**
++ * minimum quantizer
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int qmin;
++
++ /**
++ * maximum quantizer
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int qmax;
++
++ /**
++ * maximum quantizer difference between frames
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int max_qdiff;
++
++ /**
++ * decoder bitstream buffer size
++ * - encoding: Set by user.
++ * - decoding: May be set by libavcodec.
++ */
++ int rc_buffer_size;
++
++ /**
++ * ratecontrol override, see RcOverride
++ * - encoding: Allocated/set/freed by user.
++ * - decoding: unused
++ */
++ int rc_override_count;
++ RcOverride *rc_override;
++
++ /**
++ * maximum bitrate
++ * - encoding: Set by user.
++ * - decoding: Set by user, may be overwritten by libavcodec.
++ */
++ int64_t rc_max_rate;
++
++ /**
++ * minimum bitrate
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int64_t rc_min_rate;
++
++ /**
++ * Ratecontrol attempt to use, at maximum, <value> of what can be used
without an underflow.
++ * - encoding: Set by user.
++ * - decoding: unused.
++ */
++ float rc_max_available_vbv_use;
++
++ /**
++ * Ratecontrol attempt to use, at least, <value> times the amount
needed to prevent a vbv overflow.
++ * - encoding: Set by user.
++ * - decoding: unused.
++ */
++ float rc_min_vbv_overflow_use;
++
++ /**
++ * Number of bits which should be loaded into the rc buffer before
decoding starts.
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int rc_initial_buffer_occupancy;
++
++ /**
++ * trellis RD quantization
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int trellis;
++
++ /**
++ * pass1 encoding statistics output buffer
++ * - encoding: Set by libavcodec.
++ * - decoding: unused
++ */
++ char *stats_out;
++
++ /**
++ * pass2 encoding statistics input buffer
++ * Concatenated stuff from stats_out of pass1 should be placed here.
++ * - encoding: Allocated/set/freed by user.
++ * - decoding: unused
++ */
++ char *stats_in;
++
++ /**
++ * Work around bugs in encoders which sometimes cannot be detected
automatically.
++ * - encoding: Set by user
++ * - decoding: Set by user
++ */
++ int workaround_bugs;
++#define FF_BUG_AUTODETECT 1 ///< autodetection
++#define FF_BUG_XVID_ILACE 4
++#define FF_BUG_UMP4 8
++#define FF_BUG_NO_PADDING 16
++#define FF_BUG_AMV 32
++#define FF_BUG_QPEL_CHROMA 64
++#define FF_BUG_STD_QPEL 128
++#define FF_BUG_QPEL_CHROMA2 256
++#define FF_BUG_DIRECT_BLOCKSIZE 512
++#define FF_BUG_EDGE 1024
++#define FF_BUG_HPEL_CHROMA 2048
++#define FF_BUG_DC_CLIP 4096
++#define FF_BUG_MS 8192 ///< Work around various bugs in
Microsoft's broken decoders.
++#define FF_BUG_TRUNCATED 16384
++#define FF_BUG_IEDGE 32768
++
++ /**
++ * strictly follow the standard (MPEG-4, ...).
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ * Setting this to STRICT or higher means the encoder and decoder will
++ * generally do stupid things, whereas setting it to unofficial or lower
++ * will mean the encoder might produce output that is not supported by
all
++ * spec-compliant decoders. Decoders don't differentiate between normal,
++ * unofficial and experimental (that is, they always try to decode
things
++ * when they can) unless they are explicitly asked to behave stupidly
++ * (=strictly conform to the specs)
++ * This may only be set to one of the FF_COMPLIANCE_* values in defs.h.
++ */
++ int strict_std_compliance;
++
++ /**
++ * error concealment flags
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ int error_concealment;
++#define FF_EC_GUESS_MVS 1
++#define FF_EC_DEBLOCK 2
++#define FF_EC_FAVOR_INTER 256
++
++ /**
++ * debug
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int debug;
++#define FF_DEBUG_PICT_INFO 1
++#define FF_DEBUG_RC 2
++#define FF_DEBUG_BITSTREAM 4
++#define FF_DEBUG_MB_TYPE 8
++#define FF_DEBUG_QP 16
++#define FF_DEBUG_DCT_COEFF 0x00000040
++#define FF_DEBUG_SKIP 0x00000080
++#define FF_DEBUG_STARTCODE 0x00000100
++#define FF_DEBUG_ER 0x00000400
++#define FF_DEBUG_MMCO 0x00000800
++#define FF_DEBUG_BUGS 0x00001000
++#define FF_DEBUG_BUFFERS 0x00008000
++#define FF_DEBUG_THREADS 0x00010000
++#define FF_DEBUG_GREEN_MD 0x00800000
++#define FF_DEBUG_NOMC 0x01000000
++
++ /**
++ * Error recognition; may misdetect some more or less valid parts as
errors.
++ * This is a bitfield of the AV_EF_* values defined in defs.h.
++ *
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int err_recognition;
++
++ /**
++ * Hardware accelerator in use
++ * - encoding: unused.
++ * - decoding: Set by libavcodec
++ */
++ const struct AVHWAccel *hwaccel;
++
++ /**
++ * Legacy hardware accelerator context.
++ *
++ * For some hardware acceleration methods, the caller may use this
field to
++ * signal hwaccel-specific data to the codec. The struct pointed to by
this
++ * pointer is hwaccel-dependent and defined in the respective header.
Please
++ * refer to the FFmpeg HW accelerator documentation to know how to fill
++ * this.
++ *
++ * In most cases this field is optional - the necessary information may
also
++ * be provided to libavcodec through @ref hw_frames_ctx or @ref
++ * hw_device_ctx (see avcodec_get_hw_config()). However, in some cases
it
++ * may be the only method of signalling some (optional) information.
++ *
++ * The struct and its contents are owned by the caller.
++ *
++ * - encoding: May be set by the caller before avcodec_open2(). Must
remain
++ * valid until avcodec_free_context().
++ * - decoding: May be set by the caller in the get_format() callback.
++ * Must remain valid until the next get_format() call,
++ * or avcodec_free_context() (whichever comes first).
++ */
++ void *hwaccel_context;
++
++ /**
++ * A reference to the AVHWFramesContext describing the input (for
encoding)
++ * or output (decoding) frames. The reference is set by the caller and
++ * afterwards owned (and freed) by libavcodec - it should never be read
by
++ * the caller after being set.
++ *
++ * - decoding: This field should be set by the caller from the
get_format()
++ * callback. The previous reference (if any) will always be
++ * unreffed by libavcodec before the get_format() call.
++ *
++ * If the default get_buffer2() is used with a hwaccel pixel
++ * format, then this AVHWFramesContext will be used for
++ * allocating the frame buffers.
++ *
++ * - encoding: For hardware encoders configured to use a hwaccel pixel
++ * format, this field should be set by the caller to a
reference
++ * to the AVHWFramesContext describing input frames.
++ * AVHWFramesContext.format must be equal to
++ * AVCodecContext.pix_fmt.
++ *
++ * This field should be set before avcodec_open2() is
called.
++ */
++ AVBufferRef *hw_frames_ctx;
++
++ /**
++ * A reference to the AVHWDeviceContext describing the device which will
++ * be used by a hardware encoder/decoder. The reference is set by the
++ * caller and afterwards owned (and freed) by libavcodec.
++ *
++ * This should be used if either the codec device does not require
++ * hardware frames or any that are used are to be allocated internally
by
++ * libavcodec. If the user wishes to supply any of the frames used as
++ * encoder input or decoder output then hw_frames_ctx should be used
++ * instead. When hw_frames_ctx is set in get_format() for a decoder,
this
++ * field will be ignored while decoding the associated stream segment,
but
++ * may again be used on a following one after another get_format() call.
++ *
++ * For both encoders and decoders this field should be set before
++ * avcodec_open2() is called and must not be written to thereafter.
++ *
++ * Note that some decoders may require this field to be set initially in
++ * order to support hw_frames_ctx at all - in that case, all frames
++ * contexts used must be created on the same device.
++ */
++ AVBufferRef *hw_device_ctx;
++
++ /**
++ * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated
++ * decoding (if active).
++ * - encoding: unused
++ * - decoding: Set by user (either before avcodec_open2(), or in the
++ * AVCodecContext.get_format callback)
++ */
++ int hwaccel_flags;
++
++ /**
++ * Video decoding only. Sets the number of extra hardware frames which
++ * the decoder will allocate for use by the caller. This must be set
++ * before avcodec_open2() is called.
++ *
++ * Some hardware decoders require all frames that they will use for
++ * output to be defined in advance before decoding starts. For such
++ * decoders, the hardware frame pool must therefore be of a fixed size.
++ * The extra frames set here are on top of any number that the decoder
++ * needs internally in order to operate normally (for example, frames
++ * used as reference pictures).
++ */
++ int extra_hw_frames;
++
++ /**
++ * error
++ * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR.
++ * - decoding: unused
++ */
++ uint64_t error[AV_NUM_DATA_POINTERS];
++
++ /**
++ * DCT algorithm, see FF_DCT_* below
++ * - encoding: Set by user.
++ * - decoding: unused
++ */
++ int dct_algo;
++#define FF_DCT_AUTO 0
++#define FF_DCT_FASTINT 1
++#define FF_DCT_INT 2
++#define FF_DCT_MMX 3
++#define FF_DCT_ALTIVEC 5
++#define FF_DCT_FAAN 6
++#define FF_DCT_NEON 7
++
++ /**
++ * IDCT algorithm, see FF_IDCT_* below.
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int idct_algo;
++#define FF_IDCT_AUTO 0
++#define FF_IDCT_INT 1
++#define FF_IDCT_SIMPLE 2
++#define FF_IDCT_SIMPLEMMX 3
++#define FF_IDCT_ARM 7
++#define FF_IDCT_ALTIVEC 8
++#define FF_IDCT_SIMPLEARM 10
++#define FF_IDCT_XVID 14
++#define FF_IDCT_SIMPLEARMV5TE 16
++#define FF_IDCT_SIMPLEARMV6 17
++#define FF_IDCT_FAAN 20
++#define FF_IDCT_SIMPLENEON 22
++#define FF_IDCT_SIMPLEAUTO 128
++
++ /**
++ * bits per sample/pixel from the demuxer (needed for huffyuv).
++ * - encoding: Set by libavcodec.
++ * - decoding: Set by user.
++ */
++ int bits_per_coded_sample;
++
++ /**
++ * Bits per sample/pixel of internal libavcodec pixel/sample format.
++ * - encoding: set by user.
++ * - decoding: set by libavcodec.
++ */
++ int bits_per_raw_sample;
++
++ /**
++ * thread count
++ * is used to decide how many independent tasks should be passed to
execute()
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ int thread_count;
++
++ /**
++ * Which multithreading methods to use.
++ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per
thread,
++ * so clients which cannot provide future frames should not use it.
++ *
++ * - encoding: Set by user, otherwise the default is used.
++ * - decoding: Set by user, otherwise the default is used.
++ */
++ int thread_type;
++#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
++#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single
frame at once
++
++ /**
++ * Which multithreading methods are in use by the codec.
++ * - encoding: Set by libavcodec.
++ * - decoding: Set by libavcodec.
++ */
++ int active_thread_type;
++
++ /**
++ * The codec may call this to execute several independent things.
++ * It will return only after finishing all tasks.
++ * The user may replace this with some multithreaded implementation,
++ * the default implementation will execute the parts serially.
++ * @param count the number of things to execute
++ * - encoding: Set by libavcodec, user can override.
++ * - decoding: Set by libavcodec, user can override.
++ */
++ int (*execute)(struct AVCodecContext *c, int (*func)(struct
AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
++
++ /**
++ * The codec may call this to execute several independent things.
++ * It will return only after finishing all tasks.
++ * The user may replace this with some multithreaded implementation,
++ * the default implementation will execute the parts serially.
++ * @param c context passed also to func
++ * @param count the number of things to execute
++ * @param arg2 argument passed unchanged to func
++ * @param ret return values of executed functions, must have space for
"count" values. May be NULL.
++ * @param func function that will be called count times, with jobnr
from 0 to count-1.
++ * threadnr will be in the range 0 to c->thread_count-1 <
MAX_THREADS and so that no
++ * two instances of func executing at the same time will
have the same threadnr.
++ * @return always 0 currently, but code should handle a future
improvement where when any call to func
++ * returns < 0 no further calls to func may be done and < 0 is
returned.
++ * - encoding: Set by libavcodec, user can override.
++ * - decoding: Set by libavcodec, user can override.
++ */
++ int (*execute2)(struct AVCodecContext *c, int (*func)(struct
AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int
*ret, int count);
++
++ /**
++ * profile
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ * See the AV_PROFILE_* defines in defs.h.
++ */
++ int profile;
++
++ /**
++ * Encoding level descriptor.
++ * - encoding: Set by user, corresponds to a specific level defined by
the
++ * codec, usually corresponding to the profile level, if not
specified it
++ * is set to AV_LEVEL_UNKNOWN.
++ * - decoding: Set by libavcodec.
++ * See AV_LEVEL_* in defs.h.
++ */
++ int level;
++
++#if FF_API_CODEC_PROPS
++ /**
++ * Properties of the stream that gets decoded
++ * - encoding: unused
++ * - decoding: set by libavcodec
++ */
++ attribute_deprecated
++ unsigned properties;
++#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
++#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
++#define FF_CODEC_PROPERTY_FILM_GRAIN 0x00000004
++#endif
++
++ /**
++ * Skip loop filtering for selected frames.
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ enum AVDiscard skip_loop_filter;
++
++ /**
++ * Skip IDCT/dequantization for selected frames.
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ enum AVDiscard skip_idct;
++
++ /**
++ * Skip decoding for selected frames.
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ enum AVDiscard skip_frame;
++
++ /**
++ * Skip processing alpha if supported by codec.
++ * Note that if the format uses pre-multiplied alpha (common with VP6,
++ * and recommended due to better video quality/compression)
++ * the image will look as if alpha-blended onto a black background.
++ * However for formats that do not use pre-multiplied alpha
++ * there might be serious artefacts (though e.g. libswscale currently
++ * assumes pre-multiplied alpha anyway).
++ *
++ * - decoding: set by user
++ * - encoding: unused
++ */
++ int skip_alpha;
++
++ /**
++ * Number of macroblock rows at the top which are skipped.
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ int skip_top;
++
++ /**
++ * Number of macroblock rows at the bottom which are skipped.
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ int skip_bottom;
++
++ /**
++ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
++ * - encoding: unused
++ * - decoding: Set by user.
++ */
++ int lowres;
++
++ /**
++ * AVCodecDescriptor
++ * - encoding: unused.
++ * - decoding: set by libavcodec.
++ */
++ const struct AVCodecDescriptor *codec_descriptor;
++
++ /**
++ * Character encoding of the input subtitles file.
++ * - decoding: set by user
++ * - encoding: unused
++ */
++ char *sub_charenc;
++
++ /**
++ * Subtitles character encoding mode. Formats or codecs might be
adjusting
++ * this setting (if they are doing the conversion themselves for
instance).
++ * - decoding: set by libavcodec
++ * - encoding: unused
++ */
++ int sub_charenc_mode;
++#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer
outputs a stream supposed to be already in UTF-8, or the codec is bitmap for
instance)
++#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the
mode itself
++#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to
be recoded to UTF-8 before being fed to the decoder, requires iconv
++#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the
subtitles, nor check them for valid UTF-8
++
++ /**
++ * Header containing style information for text subtitles.
++ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
++ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
++ * the Format line following. It shouldn't include any Dialogue line.
++ *
++ * - encoding: May be set by the caller before avcodec_open2() to an
array
++ * allocated with the av_malloc() family of functions.
++ * - decoding: May be set by libavcodec in avcodec_open2().
++ *
++ * After being set, the array is owned by the codec and freed in
++ * avcodec_free_context().
++ */
++ int subtitle_header_size;
++ uint8_t *subtitle_header;
++
++ /**
++ * dump format separator.
++ * can be ", " or "\n " or anything else
++ * - encoding: Set by user.
++ * - decoding: Set by user.
++ */
++ uint8_t *dump_separator;
++
++ /**
++ * ',' separated list of allowed decoders.
++ * If NULL then all are allowed
++ * - encoding: unused
++ * - decoding: set by user
++ */
++ char *codec_whitelist;
++
++ /**
++ * Additional data associated with the entire coded stream.
++ *
++ * - decoding: may be set by user before calling avcodec_open2().
++ * - encoding: may be set by libavcodec after avcodec_open2().
++ */
++ AVPacketSideData *coded_side_data;
++ int nb_coded_side_data;
++
++ /**
++ * Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of
++ * metadata exported in frame, packet, or coded stream side data by
++ * decoders and encoders.
++ *
++ * - decoding: set by user
++ * - encoding: set by user
++ */
++ int export_side_data;
++
++ /**
++ * The number of pixels per image to maximally accept.
++ *
++ * - decoding: set by user
++ * - encoding: set by user
++ */
++ int64_t max_pixels;
++
++ /**
++ * Video decoding only. Certain video codecs support cropping, meaning
that
++ * only a sub-rectangle of the decoded frame is intended for display.
This
++ * option controls how cropping is handled by libavcodec.
++ *
++ * When set to 1 (the default), libavcodec will apply cropping
internally.
++ * I.e. it will modify the output frame width/height fields and offset
the
++ * data pointers (only by as much as possible while preserving
alignment, or
++ * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so
that
++ * the frames output by the decoder refer only to the cropped area. The
++ * crop_* fields of the output frames will be zero.
++ *
++ * When set to 0, the width/height fields of the output frames will be
set
++ * to the coded dimensions and the crop_* fields will describe the
cropping
++ * rectangle. Applying the cropping is left to the caller.
++ *
++ * @warning When hardware acceleration with opaque output frames is
used,
++ * libavcodec is unable to apply cropping from the top/left border.
++ *
++ * @note when this option is set to zero, the width/height fields of the
++ * AVCodecContext and output AVFrames have different meanings. The codec
++ * context fields store display dimensions (with the coded dimensions in
++ * coded_width/height), while the frame fields store the coded
dimensions
++ * (with the display dimensions being determined by the crop_* fields).
++ */
++ int apply_cropping;
++
++ /**
++ * The percentage of damaged samples to discard a frame.
++ *
++ * - decoding: set by user
++ * - encoding: unused
++ */
++ int discard_damaged_percentage;
++
++ /**
++ * The number of samples per frame to maximally accept.
++ *
++ * - decoding: set by user
++ * - encoding: set by user
++ */
++ int64_t max_samples;
++
++ /**
++ * This callback is called at the beginning of each packet to get a data
++ * buffer for it.
++ *
++ * The following field will be set in the packet before this callback is
++ * called:
++ * - size
++ * This callback must use the above value to calculate the required
buffer size,
++ * which must padded by at least AV_INPUT_BUFFER_PADDING_SIZE bytes.
++ *
++ * In some specific cases, the encoder may not use the entire buffer
allocated by this
++ * callback. This will be reflected in the size value in the packet
once returned by
++ * avcodec_receive_packet().
++ *
++ * This callback must fill the following fields in the packet:
++ * - data: alignment requirements for AVPacket apply, if any. Some
architectures and
++ * encoders may benefit from having aligned data.
++ * - buf: must contain a pointer to an AVBufferRef structure. The
packet's
++ * data pointer must be contained in it. See: av_buffer_create(),
av_buffer_alloc(),
++ * and av_buffer_ref().
++ *
++ * If AV_CODEC_CAP_DR1 is not set then get_encode_buffer() must call
++ * avcodec_default_get_encode_buffer() instead of providing a buffer
allocated by
++ * some other means.
++ *
++ * The flags field may contain a combination of
AV_GET_ENCODE_BUFFER_FLAG_ flags.
++ * They may be used for example to hint what use the buffer may get
after being
++ * created.
++ * Implementations of this callback may ignore flags they don't
understand.
++ * If AV_GET_ENCODE_BUFFER_FLAG_REF is set in flags then the packet may
be reused
++ * (read and/or written to if it is writable) later by libavcodec.
++ *
++ * This callback must be thread-safe, as when frame threading is used,
it may
++ * be called from multiple threads simultaneously.
++ *
++ * @see avcodec_default_get_encode_buffer()
++ *
++ * - encoding: Set by libavcodec, user can override.
++ * - decoding: unused
++ */
++ int (*get_encode_buffer)(struct AVCodecContext *s, AVPacket *pkt, int
flags);
++
++ /**
++ * Frame counter, set by libavcodec.
++ *
++ * - decoding: total number of frames returned from the decoder so far.
++ * - encoding: total number of frames passed to the encoder so far.
++ *
++ * @note the counter is not incremented if encoding/decoding resulted
in
++ * an error.
++ */
++ int64_t frame_num;
++
++ /**
++ * Decoding only. May be set by the caller before avcodec_open2() to an
++ * av_malloc()'ed array (or via AVOptions). Owned and freed by the
decoder
++ * afterwards.
++ *
++ * Side data attached to decoded frames may come from several sources:
++ * 1. coded_side_data, which the decoder will for certain types
translate
++ * from packet-type to frame-type and attach to frames;
++ * 2. side data attached to an AVPacket sent for decoding (same
++ * considerations as above);
++ * 3. extracted from the coded bytestream.
++ * The first two cases are supplied by the caller and typically come
from a
++ * container.
++ *
++ * This array configures decoder behaviour in cases when side data of
the
++ * same type is present both in the coded bytestream and in the
++ * user-supplied side data (items 1. and 2. above). In all cases, at
most
++ * one instance of each side data type will be attached to output
frames. By
++ * default it will be the bytestream side data. Adding an
++ * AVPacketSideDataType value to this array will flip the preference for
++ * this type, thus making the decoder prefer user-supplied side data
over
++ * bytestream. In case side data of the same type is present both in
++ * coded_data and attacked to a packet, the packet instance always has
++ * priority.
++ *
++ * The array may also contain a single -1, in which case the preference
is
++ * switched for all side data types.
++ */
++ int *side_data_prefer_packet;
++ /**
++ * Number of entries in side_data_prefer_packet.
++ */
++ unsigned nb_side_data_prefer_packet;
++
++ /**
++ * Array containing static side data, such as HDR10 CLL / MDCV
structures.
++ * Side data entries should be allocated by usage of helpers defined in
++ * libavutil/frame.h.
++ *
++ * - encoding: may be set by user before calling avcodec_open2() for
++ * encoder configuration. Afterwards owned and freed by the
++ * encoder.
++ * - decoding: may be set by libavcodec in avcodec_open2().
++ */
++ AVFrameSideData **decoded_side_data;
++ int nb_decoded_side_data;
++} AVCodecContext;
++
++/**
++ * @defgroup lavc_hwaccel AVHWAccel
++ *
++ * @note Nothing in this structure should be accessed by the user. At some
++ * point in future it will not be externally visible at all.
++ *
++ * @{
++ */
++typedef struct AVHWAccel {
++ /**
++ * Name of the hardware accelerated codec.
++ * The name is globally unique among encoders and among decoders (but an
++ * encoder and a decoder can share the same name).
++ */
++ const char *name;
++
++ /**
++ * Type of codec implemented by the hardware accelerator.
++ *
++ * See AVMEDIA_TYPE_xxx
++ */
++ enum AVMediaType type;
++
++ /**
++ * Codec implemented by the hardware accelerator.
++ *
++ * See AV_CODEC_ID_xxx
++ */
++ enum AVCodecID id;
++
++ /**
++ * Supported pixel format.
++ *
++ * Only hardware accelerated formats are supported here.
++ */
++ enum AVPixelFormat pix_fmt;
++
++ /**
++ * Hardware accelerated codec capabilities.
++ * see AV_HWACCEL_CODEC_CAP_*
++ */
++ int capabilities;
++} AVHWAccel;
++
++/**
++ * HWAccel is experimental and is thus avoided in favor of non experimental
++ * codecs
++ */
++#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
++
++/**
++ * Hardware acceleration should be used for decoding even if the codec level
++ * used is unknown or higher than the maximum supported level reported by
the
++ * hardware driver.
++ *
++ * It's generally a good idea to pass this flag unless you have a specific
++ * reason not to, as hardware tends to under-report supported levels.
++ */
++#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
++
++/**
++ * Hardware acceleration can output YUV pixel formats with a different
chroma
++ * sampling than 4:2:0 and/or other than 8 bits per component.
++ */
++#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
++
++/**
++ * Hardware acceleration should still be attempted for decoding when the
++ * codec profile does not match the reported capabilities of the hardware.
++ *
++ * For example, this can be used to try to decode baseline profile H.264
++ * streams in hardware - it will often succeed, because many streams marked
++ * as baseline profile actually conform to constrained baseline profile.
++ *
++ * @warning If the stream is actually not supported then the behaviour is
++ * undefined, and may include returning entirely incorrect output
++ * while indicating success.
++ */
++#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
++
++/**
++ * Some hardware decoders (namely nvdec) can either output direct decoder
++ * surfaces, or make an on-device copy and return said copy.
++ * There is a hard limit on how many decoder surfaces there can be, and it
++ * cannot be accurately guessed ahead of time.
++ * For some processing chains, this can be okay, but others will run into
the
++ * limit and in turn produce very confusing errors that require fine tuning
of
++ * more or less obscure options by the user, or in extreme cases cannot be
++ * resolved at all without inserting an avfilter that forces a copy.
++ *
++ * Thus, the hwaccel will by default make a copy for safety and resilience.
++ * If a users really wants to minimize the amount of copies, they can set
this
++ * flag and ensure their processing chain does not exhaust the surface pool.
++ */
++#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
++
++/**
++ * @}
++ */
++
++enum AVSubtitleType {
++ SUBTITLE_NONE,
++
++ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
++
++ /**
++ * Plain text, the text field must be set by the decoder and is
++ * authoritative. ass and pict fields may contain approximations.
++ */
++ SUBTITLE_TEXT,
++
++ /**
++ * Formatted text, the ass field must be set by the decoder and is
++ * authoritative. pict and text fields may contain approximations.
++ */
++ SUBTITLE_ASS,
++};
++
++#define AV_SUBTITLE_FLAG_FORCED 0x00000001
++
++typedef struct AVSubtitleRect {
++ int x; ///< top left corner of pict, undefined when pict is
not set
++ int y; ///< top left corner of pict, undefined when pict is
not set
++ int w; ///< width of pict, undefined when pict is
not set
++ int h; ///< height of pict, undefined when pict is
not set
++ int nb_colors; ///< number of colors in pict, undefined when pict is
not set
++
++ /**
++ * data+linesize for the bitmap of this subtitle.
++ * Can be set for text/ass as well once they are rendered.
++ */
++ uint8_t *data[4];
++ int linesize[4];
++
++ int flags;
++ enum AVSubtitleType type;
++
++ char *text; ///< 0 terminated plain UTF-8 text
++
++ /**
++ * 0 terminated ASS/SSA compatible event line.
++ * The presentation of this is unaffected by the other values in this
++ * struct.
++ */
++ char *ass;
++} AVSubtitleRect;
++
++typedef struct AVSubtitle {
++ uint16_t format; /* 0 = graphics */
++ uint32_t start_display_time; /* relative to packet pts, in ms */
++ uint32_t end_display_time; /* relative to packet pts, in ms */
++ unsigned num_rects;
++ AVSubtitleRect **rects;
++ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
++} AVSubtitle;
++
++/**
++ * Return the LIBAVCODEC_VERSION_INT constant.
++ */
++unsigned avcodec_version(void);
++
++/**
++ * Return the libavcodec build-time configuration.
++ */
++const char *avcodec_configuration(void);
++
++/**
++ * Return the libavcodec license.
++ */
++const char *avcodec_license(void);
++
++/**
++ * Allocate an AVCodecContext and set its fields to default values. The
++ * resulting struct should be freed with avcodec_free_context().
++ *
++ * @param codec if non-NULL, allocate private data and initialize defaults
++ * for the given codec. It is illegal to then call
avcodec_open2()
++ * with a different codec.
++ * If NULL, then the codec-specific defaults won't be
initialized,
++ * which may result in suboptimal default settings (this is
++ * important mainly for encoders, e.g. libx264).
++ *
++ * @return An AVCodecContext filled with default values or NULL on failure.
++ */
++AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
++
++/**
++ * Free the codec context and everything associated with it and write NULL
to
++ * the provided pointer.
++ */
++void avcodec_free_context(AVCodecContext **avctx);
++
++/**
++ * Get the AVClass for AVCodecContext. It can be used in combination with
++ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
++ *
++ * @see av_opt_find().
++ */
++const AVClass *avcodec_get_class(void);
++
++/**
++ * Get the AVClass for AVSubtitleRect. It can be used in combination with
++ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
++ *
++ * @see av_opt_find().
++ */
++const AVClass *avcodec_get_subtitle_rect_class(void);
++
++/**
++ * Fill the parameters struct based on the values from the supplied codec
++ * context. Any allocated fields in par are freed and replaced with
duplicates
++ * of the corresponding fields in codec.
++ *
++ * @return >= 0 on success, a negative AVERROR code on failure
++ */
++int avcodec_parameters_from_context(struct AVCodecParameters *par,
++ const AVCodecContext *codec);
++
++/**
++ * Fill the codec context based on the values from the supplied codec
++ * parameters. Any allocated fields in codec that have a corresponding
field in
++ * par are freed and replaced with duplicates of the corresponding field in
par.
++ * Fields in codec that do not have a counterpart in par are not touched.
++ *
++ * @return >= 0 on success, a negative AVERROR code on failure.
++ */
++int avcodec_parameters_to_context(AVCodecContext *codec,
++ const struct AVCodecParameters *par);
++
++/**
++ * Initialize the AVCodecContext to use the given AVCodec. Prior to using
this
++ * function the context has to be allocated with avcodec_alloc_context3().
++ *
++ * The functions avcodec_find_decoder_by_name(),
avcodec_find_encoder_by_name(),
++ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
++ * retrieving a codec.
++ *
++ * Depending on the codec, you might need to set options in the codec
context
++ * also for decoding (e.g. width, height, or the pixel or audio sample
format in
++ * the case the information is not available in the bitstream, as when
decoding
++ * raw audio or video).
++ *
++ * Options in the codec context can be set either by setting them in the
options
++ * AVDictionary, or by setting the values in the context itself, directly
or by
++ * using the av_opt_set() API before calling this function.
++ *
++ * Example:
++ * @code
++ * av_dict_set(&opts, "b", "2.5M", 0);
++ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
++ * if (!codec)
++ * exit(1);
++ *
++ * context = avcodec_alloc_context3(codec);
++ *
++ * if (avcodec_open2(context, codec, opts) < 0)
++ * exit(1);
++ * @endcode
++ *
++ * In the case AVCodecParameters are available (e.g. when demuxing a stream
++ * using libavformat, and accessing the AVStream contained in the demuxer),
the
++ * codec parameters can be copied to the codec context using
++ * avcodec_parameters_to_context(), as in the following example:
++ *
++ * @code
++ * AVStream *stream = ...;
++ * context = avcodec_alloc_context3(codec);
++ * if (avcodec_parameters_to_context(context, stream->codecpar) < 0)
++ * exit(1);
++ * if (avcodec_open2(context, codec, NULL) < 0)
++ * exit(1);
++ * @endcode
++ *
++ * @note Always call this function before using decoding routines (such as
++ * @ref avcodec_receive_frame()).
++ *
++ * @param avctx The context to initialize.
++ * @param codec The codec to open this context for. If a non-NULL codec has
been
++ * previously passed to avcodec_alloc_context3() or
++ * for this context, then this parameter MUST be either NULL or
++ * equal to the previously passed codec.
++ * @param options A dictionary filled with AVCodecContext and codec-private
++ * options, which are set on top of the options already set
in
++ * avctx, can be NULL. On return this object will be filled
with
++ * options that were not found in the avctx codec context.
++ *
++ * @return zero on success, a negative value on error
++ * @see avcodec_alloc_context3(), avcodec_find_decoder(),
avcodec_find_encoder(),
++ * av_dict_set(), av_opt_set(), av_opt_find(),
avcodec_parameters_to_context()
++ */
++int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary
**options);
++
++/**
++ * Free all allocated data in the given subtitle struct.
++ *
++ * @param sub AVSubtitle to free.
++ */
++void avsubtitle_free(AVSubtitle *sub);
++
++/**
++ * @}
++ */
++
++/**
++ * @addtogroup lavc_decoding
++ * @{
++ */
++
++/**
++ * The default callback for AVCodecContext.get_buffer2(). It is made public
so
++ * it can be called by custom get_buffer2() implementations for decoders
without
++ * AV_CODEC_CAP_DR1 set.
++ */
++int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int
flags);
++
++/**
++ * The default callback for AVCodecContext.get_encode_buffer(). It is made
public so
++ * it can be called by custom get_encode_buffer() implementations for
encoders without
++ * AV_CODEC_CAP_DR1 set.
++ */
++int avcodec_default_get_encode_buffer(AVCodecContext *s, AVPacket *pkt, int
flags);
++
++/**
++ * Modify width and height values so that they will result in a memory
++ * buffer that is acceptable for the codec if you do not use any horizontal
++ * padding.
++ *
++ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
++ */
++void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
++
++/**
++ * Modify width and height values so that they will result in a memory
++ * buffer that is acceptable for the codec if you also ensure that all
++ * line sizes are a multiple of the respective linesize_align[i].
++ *
++ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
++ */
++void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
++ int linesize_align[AV_NUM_DATA_POINTERS]);
++
++/**
++ * Decode a subtitle message.
++ * Return a negative value on error, otherwise return the number of bytes
used.
++ * If no subtitle could be decompressed, got_sub_ptr is zero.
++ * Otherwise, the subtitle is stored in *sub.
++ * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is
for
++ * simplicity, because the performance difference is expected to be
negligible
++ * and reusing a get_buffer written for video codecs would probably perform
badly
++ * due to a potentially very different allocation pattern.
++ *
++ * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay
between input
++ * and output. This means that for some packets they will not immediately
++ * produce decoded output and need to be flushed at the end of decoding to
get
++ * all the decoded data. Flushing is done by calling this function with
packets
++ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
++ * returning subtitles. It is safe to flush even those decoders that are not
++ * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned.
++ *
++ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
++ * before packets may be fed to the decoder.
++ *
++ * @param avctx the codec context
++ * @param[out] sub The preallocated AVSubtitle in which the decoded
subtitle will be stored,
++ * must be freed with avsubtitle_free if *got_sub_ptr is
set.
++ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed,
otherwise, it is nonzero.
++ * @param[in] avpkt The input AVPacket containing the input buffer.
++ */
++int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
++ int *got_sub_ptr, const AVPacket *avpkt);
++
++/**
++ * Supply raw packet data as input to a decoder.
++ *
++ * Internally, this call will copy relevant AVCodecContext fields, which can
++ * influence decoding per-packet, and apply them when the packet is actually
++ * decoded. (For example AVCodecContext.skip_frame, which might direct the
++ * decoder to drop the frame contained by the packet sent with this
function.)
++ *
++ * @warning The input buffer, avpkt->data must be
AV_INPUT_BUFFER_PADDING_SIZE
++ * larger than the actual read bytes because some optimized
bitstream
++ * readers read 32 or 64 bits at once and could read over the end.
++ *
++ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
++ * before packets may be fed to the decoder.
++ *
++ * @param avctx codec context
++ * @param[in] avpkt The input AVPacket. Usually, this will be a single video
++ * frame, or several complete audio frames.
++ * Ownership of the packet remains with the caller, and the
++ * decoder will not write to the packet. The decoder may
create
++ * a reference to the packet data (or copy it if the
packet is
++ * not reference-counted).
++ * Unlike with older APIs, the packet is always fully
consumed,
++ * and if it contains multiple frames (e.g. some audio
codecs),
++ * will require you to call avcodec_receive_frame()
multiple
++ * times afterwards before you can send a new packet.
++ * It can be NULL (or an AVPacket with data set to NULL and
++ * size set to 0); in this case, it is considered a flush
++ * packet, which signals the end of the stream. Sending the
++ * first flush packet will return success. Subsequent ones
are
++ * unnecessary and will return AVERROR_EOF. If the decoder
++ * still has frames buffered, it will return them after
sending
++ * a flush packet.
++ *
++ * @retval 0 success
++ * @retval AVERROR(EAGAIN) input is not accepted in the current state -
user
++ * must read output with avcodec_receive_frame()
(once
++ * all output is read, the packet should be
resent,
++ * and the call will not fail with EAGAIN).
++ * @retval AVERROR_EOF the decoder has been flushed, and no new
packets can be
++ * sent to it (also returned if more than 1 flush
++ * packet is sent)
++ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or
requires flush
++ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or
similar
++ * @retval "another negative error code" legitimate decoding errors
++ */
++int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
++
++/**
++ * Return decoded output data from a decoder or encoder (when the
++ * @ref AV_CODEC_FLAG_RECON_FRAME flag is used).
++ *
++ * @param avctx codec context
++ * @param frame This will be set to a reference-counted video or audio
++ * frame (depending on the decoder type) allocated by the
++ * codec. Note that the function will always call
++ * av_frame_unref(frame) before doing anything else.
++ *
++ * @retval 0 success, a frame was returned
++ * @retval AVERROR(EAGAIN) output is not available in this state - user
must
++ * try to send new input
++ * @retval AVERROR_EOF the codec has been fully flushed, and there
will be
++ * no more output frames
++ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without
the
++ * @ref AV_CODEC_FLAG_RECON_FRAME flag enabled
++ * @retval "other negative error code" legitimate decoding errors
++ */
++int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
++
++/**
++ * Supply a raw video or audio frame to the encoder. Use
avcodec_receive_packet()
++ * to retrieve buffered output packets.
++ *
++ * @param avctx codec context
++ * @param[in] frame AVFrame containing the raw audio or video frame to be
encoded.
++ * Ownership of the frame remains with the caller, and the
++ * encoder will not write to the frame. The encoder may
create
++ * a reference to the frame data (or copy it if the frame
is
++ * not reference-counted).
++ * It can be NULL, in which case it is considered a flush
++ * packet. This signals the end of the stream. If the
encoder
++ * still has packets buffered, it will return them after
this
++ * call. Once flushing mode has been entered, additional
flush
++ * packets are ignored, and sending frames will return
++ * AVERROR_EOF.
++ *
++ * For audio:
++ * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each
frame
++ * can have any number of samples.
++ * If it is not set, frame->nb_samples must be equal to
++ * avctx->frame_size for all frames except the last.
++ * The final frame may be smaller than avctx->frame_size.
++ * @retval 0 success
++ * @retval AVERROR(EAGAIN) input is not accepted in the current state -
user must
++ * read output with avcodec_receive_packet()
(once all
++ * output is read, the packet should be resent,
and the
++ * call will not fail with EAGAIN).
++ * @retval AVERROR_EOF the encoder has been flushed, and no new
frames can
++ * be sent to it
++ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires
flush
++ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or
similar
++ * @retval "another negative error code" legitimate encoding errors
++ */
++int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
++
++/**
++ * Read encoded data from the encoder.
++ *
++ * @param avctx codec context
++ * @param avpkt This will be set to a reference-counted packet allocated by
the
++ * encoder. Note that the function will always call
++ * av_packet_unref(avpkt) before doing anything else.
++ * @retval 0 success
++ * @retval AVERROR(EAGAIN) output is not available in the current state -
user must
++ * try to send input
++ * @retval AVERROR_EOF the encoder has been fully flushed, and there
will be no
++ * more output packets
++ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
++ * @retval "another negative error code" legitimate encoding errors
++ */
++int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
++
++/**
++ * Create and return a AVHWFramesContext with values adequate for hardware
++ * decoding. This is meant to get called from the get_format callback, and
is
++ * a helper for preparing a AVHWFramesContext for
AVCodecContext.hw_frames_ctx.
++ * This API is for decoding with certain hardware acceleration modes/APIs
only.
++ *
++ * The returned AVHWFramesContext is not initialized. The caller must do
this
++ * with av_hwframe_ctx_init().
++ *
++ * Calling this function is not a requirement, but makes it simpler to avoid
++ * codec or hardware API specific details when manually allocating frames.
++ *
++ * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx,
++ * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes
++ * it unnecessary to call this function or having to care about
++ * AVHWFramesContext initialization at all.
++ *
++ * There are a number of requirements for calling this function:
++ *
++ * - It must be called from get_format with the same avctx parameter that
was
++ * passed to get_format. Calling it outside of get_format is not allowed,
and
++ * can trigger undefined behavior.
++ * - The function is not always supported (see description of return
values).
++ * Even if this function returns successfully, hwaccel initialization
could
++ * fail later. (The degree to which implementations check whether the
stream
++ * is actually supported varies. Some do this check only after the user's
++ * get_format callback returns.)
++ * - The hw_pix_fmt must be one of the choices suggested by get_format. If
the
++ * user decides to use a AVHWFramesContext prepared with this API
function,
++ * the user must return the same hw_pix_fmt from get_format.
++ * - The device_ref passed to this function must support the given
hw_pix_fmt.
++ * - After calling this API function, it is the user's responsibility to
++ * initialize the AVHWFramesContext (returned by the out_frames_ref
parameter),
++ * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be
done
++ * before returning from get_format (this is implied by the normal
++ * AVCodecContext.hw_frames_ctx API rules).
++ * - The AVHWFramesContext parameters may change every time time get_format
is
++ * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format.
So
++ * you are inherently required to go through this process again on every
++ * get_format call.
++ * - It is perfectly possible to call this function without actually using
++ * the resulting AVHWFramesContext. One use-case might be trying to reuse
a
++ * previously initialized AVHWFramesContext, and calling this API function
++ * only to test whether the required frame parameters have changed.
++ * - Fields that use dynamically allocated values of any kind must not be
set
++ * by the user unless setting them is explicitly allowed by the
documentation.
++ * If the user sets AVHWFramesContext.free and
AVHWFramesContext.user_opaque,
++ * the new free callback must call the potentially set previous free
callback.
++ * This API call may set any dynamically allocated fields, including the
free
++ * callback.
++ *
++ * The function will set at least the following fields on AVHWFramesContext
++ * (potentially more, depending on hwaccel API):
++ *
++ * - All fields set by av_hwframe_ctx_alloc().
++ * - Set the format field to hw_pix_fmt.
++ * - Set the sw_format field to the most suited and most versatile format.
(An
++ * implication is that this will prefer generic formats over opaque
formats
++ * with arbitrary restrictions, if possible.)
++ * - Set the width/height fields to the coded frame size, rounded up to the
++ * API-specific minimum alignment.
++ * - Only _if_ the hwaccel requires a pre-allocated pool: set the
initial_pool_size
++ * field to the number of maximum reference surfaces possible with the
codec,
++ * plus 1 surface for the user to work (meaning the user can safely
reference
++ * at most 1 decoded surface at a time), plus additional buffering
introduced
++ * by frame threading. If the hwaccel does not require pre-allocation, the
++ * field is left to 0, and the decoder will allocate new surfaces on
demand
++ * during decoding.
++ * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying
++ * hardware API.
++ *
++ * Essentially, out_frames_ref returns the same as av_hwframe_ctx_alloc(),
but
++ * with basic frame parameters set.
++ *
++ * The function is stateless, and does not change the AVCodecContext or the
++ * device_ref AVHWDeviceContext.
++ *
++ * @param avctx The context which is currently calling get_format, and which
++ * implicitly contains all state needed for filling the
returned
++ * AVHWFramesContext properly.
++ * @param device_ref A reference to the AVHWDeviceContext describing the
device
++ * which will be used by the hardware decoder.
++ * @param hw_pix_fmt The hwaccel format you are going to return from
get_format.
++ * @param out_frames_ref On success, set to a reference to an
_uninitialized_
++ * AVHWFramesContext, created from the given
device_ref.
++ * Fields will be set to values required for decoding.
++ * Not changed if an error is returned.
++ * @return zero on success, a negative value on error. The following error
codes
++ * have special semantics:
++ * AVERROR(ENOENT): the decoder does not support this functionality.
Setup
++ * is always manual, or it is a decoder which does not
++ * support setting AVCodecContext.hw_frames_ctx at
all,
++ * or it is a software format.
++ * AVERROR(EINVAL): it is known that hardware decoding is not
supported for
++ * this configuration, or the device_ref is not
supported
++ * for the hwaccel referenced by hw_pix_fmt.
++ */
++int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
++ AVBufferRef *device_ref,
++ enum AVPixelFormat hw_pix_fmt,
++ AVBufferRef **out_frames_ref);
++
++enum AVCodecConfig {
++ AV_CODEC_CONFIG_PIX_FORMAT, ///< AVPixelFormat, terminated by
AV_PIX_FMT_NONE
++ AV_CODEC_CONFIG_FRAME_RATE, ///< AVRational, terminated by {0, 0}
++ AV_CODEC_CONFIG_SAMPLE_RATE, ///< int, terminated by 0
++ AV_CODEC_CONFIG_SAMPLE_FORMAT, ///< AVSampleFormat, terminated by
AV_SAMPLE_FMT_NONE
++ AV_CODEC_CONFIG_CHANNEL_LAYOUT, ///< AVChannelLayout, terminated by {0}
++ AV_CODEC_CONFIG_COLOR_RANGE, ///< AVColorRange, terminated by
AVCOL_RANGE_UNSPECIFIED
++ AV_CODEC_CONFIG_COLOR_SPACE, ///< AVColorSpace, terminated by
AVCOL_SPC_UNSPECIFIED
++};
++
++/**
++ * Retrieve a list of all supported values for a given configuration type.
++ *
++ * @param avctx An optional context to use. Values such as
++ * `strict_std_compliance` may affect the result. If NULL,
++ * default values are used.
++ * @param codec The codec to query, or NULL to use avctx->codec.
++ * @param config The configuration to query.
++ * @param flags Currently unused; should be set to zero.
++ * @param out_configs On success, set to a list of configurations,
terminated
++ * by a config-specific terminator, or NULL if all
++ * possible values are supported.
++ * @param out_num_configs On success, set to the number of elements in
++ *out_configs, excluding the terminator. Optional.
++ */
++int avcodec_get_supported_config(const AVCodecContext *avctx,
++ const AVCodec *codec, enum AVCodecConfig
config,
++ unsigned flags, const void **out_configs,
++ int *out_num_configs);
++
++
++
++/**
++ * @defgroup lavc_parsing Frame parsing
++ * @{
++ */
++
++enum AVPictureStructure {
++ AV_PICTURE_STRUCTURE_UNKNOWN, ///< unknown
++ AV_PICTURE_STRUCTURE_TOP_FIELD, ///< coded as top field
++ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, ///< coded as bottom field
++ AV_PICTURE_STRUCTURE_FRAME, ///< coded as frame
++};
++
++typedef struct AVCodecParserContext {
++ void *priv_data;
++ const struct AVCodecParser *parser;
++ int64_t frame_offset; /* offset of the current frame */
++ int64_t cur_offset; /* current offset
++ (incremented by each av_parser_parse()) */
++ int64_t next_frame_offset; /* offset of the next frame */
++ /* video info */
++ int pict_type; /* XXX: Put it back in AVCodecContext. */
++ /**
++ * This field is used for proper frame duration computation in lavf.
++ * It signals, how much longer the frame duration of the current frame
++ * is compared to normal frame duration.
++ *
++ * frame_duration = (1 + repeat_pict) * time_base
++ *
++ * It is used by codecs like H.264 to display telecined material.
++ */
++ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
++ int64_t pts; /* pts of the current frame */
++ int64_t dts; /* dts of the current frame */
++
++ /* private data */
++ int64_t last_pts;
++ int64_t last_dts;
++ int fetch_timestamp;
++
++#define AV_PARSER_PTS_NB 4
++ int cur_frame_start_index;
++ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
++ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
++ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
++
++ int flags;
++#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
++#define PARSER_FLAG_ONCE 0x0002
++/// Set if the parser has a valid file offset
++#define PARSER_FLAG_FETCHED_OFFSET 0x0004
++#define PARSER_FLAG_USE_CODEC_TS 0x1000
++
++ int64_t offset; ///< byte offset from starting packet start
++ int64_t cur_frame_end[AV_PARSER_PTS_NB];
++
++ /**
++ * Set by parser to 1 for key frames and 0 for non-key frames.
++ * It is initialized to -1, so if the parser doesn't set this flag,
++ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
++ * will be used.
++ */
++ int key_frame;
++
++ // Timestamp generation support:
++ /**
++ * Synchronization point for start of timestamp generation.
++ *
++ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
++ * (default).
++ *
++ * For example, this corresponds to presence of H.264 buffering period
++ * SEI message.
++ */
++ int dts_sync_point;
++
++ /**
++ * Offset of the current timestamp against last timestamp sync point in
++ * units of AVCodecContext.time_base.
++ *
++ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
++ * contain a valid timestamp offset.
++ *
++ * Note that the timestamp of sync point has usually a nonzero
++ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
++ * the next frame after timestamp sync point will be usually 1.
++ *
++ * For example, this corresponds to H.264 cpb_removal_delay.
++ */
++ int dts_ref_dts_delta;
++
++ /**
++ * Presentation delay of current frame in units of
AVCodecContext.time_base.
++ *
++ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
++ * contain valid non-negative timestamp delta (presentation time of a
frame
++ * must not lie in the past).
++ *
++ * This delay represents the difference between decoding and
presentation
++ * time of the frame.
++ *
++ * For example, this corresponds to H.264 dpb_output_delay.
++ */
++ int pts_dts_delta;
++
++ /**
++ * Position of the packet in file.
++ *
++ * Analogous to cur_frame_pts/dts
++ */
++ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
++
++ /**
++ * Byte position of currently parsed frame in stream.
++ */
++ int64_t pos;
++
++ /**
++ * Previous frame byte position.
++ */
++ int64_t last_pos;
++
++ /**
++ * Duration of the current frame.
++ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
++ * For all other types, this is in units of AVCodecContext.time_base.
++ */
++ int duration;
++
++ enum AVFieldOrder field_order;
++
++ /**
++ * Indicate whether a picture is coded as a frame, top field or bottom
field.
++ *
++ * For example, H.264 field_pic_flag equal to 0 corresponds to
++ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
++ * equal to 1 and bottom_field_flag equal to 0 corresponds to
++ * AV_PICTURE_STRUCTURE_TOP_FIELD.
++ */
++ enum AVPictureStructure picture_structure;
++
++ /**
++ * Picture number incremented in presentation or output order.
++ * This field may be reinitialized at the first picture of a new
sequence.
++ *
++ * For example, this corresponds to H.264 PicOrderCnt.
++ */
++ int output_picture_number;
++
++ /**
++ * Dimensions of the decoded video intended for presentation.
++ */
++ int width;
++ int height;
++
++ /**
++ * Dimensions of the coded video.
++ */
++ int coded_width;
++ int coded_height;
++
++ /**
++ * The format of the coded data, corresponds to enum AVPixelFormat for
video
++ * and for enum AVSampleFormat for audio.
++ *
++ * Note that a decoder can have considerable freedom in how exactly it
++ * decodes the data, so the format reported here might be different
from the
++ * one returned by a decoder.
++ */
++ int format;
++} AVCodecParserContext;
++
++typedef struct AVCodecParser {
++ int codec_ids[7]; /* several codec IDs are permitted */
++ int priv_data_size;
++ int (*parser_init)(AVCodecParserContext *s);
++ /* This callback never returns an error, a negative value means that
++ * the frame start was in a previous packet. */
++ int (*parser_parse)(AVCodecParserContext *s,
++ AVCodecContext *avctx,
++ const uint8_t **poutbuf, int *poutbuf_size,
++ const uint8_t *buf, int buf_size);
++ void (*parser_close)(AVCodecParserContext *s);
++ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
++} AVCodecParser;
++
++/**
++ * Iterate over all registered codec parsers.
++ *
++ * @param opaque a pointer where libavcodec will store the iteration state.
Must
++ * point to NULL to start the iteration.
++ *
++ * @return the next registered codec parser or NULL when the iteration is
++ * finished
++ */
++const AVCodecParser *av_parser_iterate(void **opaque);
++
++AVCodecParserContext *av_parser_init(int codec_id);
++
++/**
++ * Parse a packet.
++ *
++ * @param s parser context.
++ * @param avctx codec context.
++ * @param poutbuf set to pointer to parsed buffer or NULL if not yet
finished.
++ * @param poutbuf_size set to size of parsed buffer or zero if not yet
finished.
++ * @param buf input buffer.
++ * @param buf_size buffer size in bytes without the padding. I.e. the
full buffer
++ size is assumed to be buf_size +
AV_INPUT_BUFFER_PADDING_SIZE.
++ To signal EOF, this should be 0 (so that the last
frame
++ can be output).
++ * @param pts input presentation timestamp.
++ * @param dts input decoding timestamp.
++ * @param pos input byte position in stream.
++ * @return the number of bytes of the input bitstream used.
++ *
++ * Example:
++ * @code
++ * while(in_len){
++ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
++ * in_data, in_len,
++ * pts, dts, pos);
++ * in_data += len;
++ * in_len -= len;
++ *
++ * if(size)
++ * decode_frame(data, size);
++ * }
++ * @endcode
++ */
++int av_parser_parse2(AVCodecParserContext *s,
++ AVCodecContext *avctx,
++ uint8_t **poutbuf, int *poutbuf_size,
++ const uint8_t *buf, int buf_size,
++ int64_t pts, int64_t dts,
++ int64_t pos);
++
++void av_parser_close(AVCodecParserContext *s);
++
++/**
++ * @}
++ * @}
++ */
++
++/**
++ * @addtogroup lavc_encoding
++ * @{
++ */
++
++int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int
buf_size,
++ const AVSubtitle *sub);
++
++
++/**
++ * @}
++ */
++
++/**
++ * @defgroup lavc_misc Utility functions
++ * @ingroup libavc
++ *
++ * Miscellaneous utility functions related to both encoding and decoding
++ * (or neither).
++ * @{
++ */
++
++/**
++ * @defgroup lavc_misc_pixfmt Pixel formats
++ *
++ * Functions for working with pixel formats.
++ * @{
++ */
++
++/**
++ * Return a value representing the fourCC code associated to the
++ * pixel format pix_fmt, or 0 if no associated fourCC code can be
++ * found.
++ */
++unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
++
++/**
++ * Find the best pixel format to convert to given a certain source pixel
++ * format. When converting from one pixel format to another, information
loss
++ * may occur. For example, when converting from RGB24 to GRAY, the color
++ * information will be lost. Similarly, other losses occur when converting
from
++ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches
which of
++ * the given pixel formats should be used to suffer the least amount of
loss.
++ * The pixel formats from which it chooses one, are determined by the
++ * pix_fmt_list parameter.
++ *
++ *
++ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel
formats to choose from
++ * @param[in] src_pix_fmt source pixel format
++ * @param[in] has_alpha Whether the source pixel format alpha channel is
used.
++ * @param[out] loss_ptr Combination of flags informing you what kind of
losses will occur.
++ * @return The best pixel format to convert to or -1 if none was found.
++ */
++enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum
AVPixelFormat *pix_fmt_list,
++ enum AVPixelFormat src_pix_fmt,
++ int has_alpha, int *loss_ptr);
++
++enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s,
const enum AVPixelFormat * fmt);
++
++/**
++ * @}
++ */
++
++void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int
encode);
++
++int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext
*c2, void *arg2),void *arg, int *ret, int count, int size);
++int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext
*c2, void *arg2, int, int),void *arg, int *ret, int count);
++//FIXME func typedef
++
++/**
++ * Fill AVFrame audio data and linesize pointers.
++ *
++ * The buffer buf must be a preallocated buffer with a size big enough
++ * to contain the specified samples amount. The filled AVFrame data
++ * pointers will point to this buffer.
++ *
++ * AVFrame extended_data channel pointers are allocated if necessary for
++ * planar audio.
++ *
++ * @param frame the AVFrame
++ * frame->nb_samples must be set prior to calling the
++ * function. This function fills in frame->data,
++ * frame->extended_data, frame->linesize[0].
++ * @param nb_channels channel count
++ * @param sample_fmt sample format
++ * @param buf buffer to use for frame data
++ * @param buf_size size of buffer
++ * @param align plane size sample alignment (0 = default)
++ * @return >=0 on success, negative error code on failure
++ * @todo return the size in bytes required to store the samples in
++ * case of success, at the next libavutil bump
++ */
++int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
++ enum AVSampleFormat sample_fmt, const uint8_t
*buf,
++ int buf_size, int align);
++
++/**
++ * Reset the internal codec state / flush internal buffers. Should be called
++ * e.g. when seeking or when switching to a different stream.
++ *
++ * @note for decoders, this function just releases any references the
decoder
++ * might keep internally, but the caller's references remain valid.
++ *
++ * @note for encoders, this function will only do something if the encoder
++ * declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
++ * will drain any remaining packets, and can then be reused for a different
++ * stream (as opposed to sending a null frame which will leave the encoder
++ * in a permanent EOF state after draining). This can be desirable if the
++ * cost of tearing down and replacing the encoder instance is high.
++ */
++void avcodec_flush_buffers(AVCodecContext *avctx);
++
++/**
++ * Return audio frame duration.
++ *
++ * @param avctx codec context
++ * @param frame_bytes size of the frame, or 0 if unknown
++ * @return frame duration, in samples, if known. 0 if not able
to
++ * determine.
++ */
++int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
++
++/* memory */
++
++/**
++ * Same behaviour av_fast_malloc but the buffer has additional
++ * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
++ *
++ * In addition the whole buffer will initially and after resizes
++ * be 0-initialized so that no uninitialized data will ever appear.
++ */
++void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
++
++/**
++ * Same behaviour av_fast_padded_malloc except that buffer will always
++ * be 0-initialized after call.
++ */
++void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
++
++/**
++ * @return a positive value if s is open (i.e. avcodec_open2() was called
on it),
++ * 0 otherwise.
++ */
++int avcodec_is_open(AVCodecContext *s);
++
++/**
++ * @}
++ */
++
++#endif /* AVCODEC_AVCODEC_H */
+diff --git a/media/ffvpx/libavcodec/avdct.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/avdct.h
+copy from media/ffvpx/libavcodec/avdct.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/avdct.h
+diff --git a/media/ffvpx/libavcodec/bsf.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/bsf.h
+copy from media/ffvpx/libavcodec/bsf.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/bsf.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec.h
+@@ -0,0 +1,367 @@
++/*
++ * AVCodec public API
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_CODEC_H
++#define AVCODEC_CODEC_H
++
++#include <stdint.h>
++
++#include "libavutil/avutil.h"
++#include "libavutil/hwcontext.h"
++#include "libavutil/log.h"
++#include "libavutil/pixfmt.h"
++#include "libavutil/rational.h"
++#include "libavutil/samplefmt.h"
++
++#include "libavcodec/codec_id.h"
++#include "libavcodec/version_major.h"
++
++/**
++ * @addtogroup lavc_core
++ * @{
++ */
++
++/**
++ * Decoder can use draw_horiz_band callback.
++ */
++#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
++/**
++ * Codec uses get_buffer() or get_encode_buffer() for allocating buffers and
++ * supports custom allocators.
++ * If not set, it might not use get_buffer() or get_encode_buffer() at all,
or
++ * use operations that assume the buffer was allocated by
++ * avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
++ */
++#define AV_CODEC_CAP_DR1 (1 << 1)
++/**
++ * Encoder or decoder requires flushing with NULL input at the end in order
to
++ * give the complete and correct output.
++ *
++ * NOTE: If this flag is not set, the codec is guaranteed to never be fed
with
++ * with NULL data. The user can still send NULL data to the public
encode
++ * or decode function, but libavcodec will not pass it along to the
codec
++ * unless this flag is set.
++ *
++ * Decoders:
++ * The decoder has a non-zero delay and needs to be fed with
avpkt->data=NULL,
++ * avpkt->size=0 at the end to get the delayed data until the decoder no
longer
++ * returns frames.
++ *
++ * Encoders:
++ * The encoder needs to be fed with NULL data at the end of encoding until
the
++ * encoder no longer returns data.
++ *
++ * NOTE: For encoders implementing the AVCodec.encode2() function, setting
this
++ * flag also means that the encoder must set the pts and duration for
++ * each output packet. If this flag is not set, the pts and duration
will
++ * be determined by libavcodec from the input frame.
++ */
++#define AV_CODEC_CAP_DELAY (1 << 5)
++/**
++ * Codec can be fed a final frame with a smaller size.
++ * This can be used to prevent truncation of the last audio samples.
++ */
++#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
++
++/**
++ * Codec is experimental and is thus avoided in favor of non experimental
++ * encoders
++ */
++#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
++/**
++ * Codec should fill in channel configuration and samplerate instead of
container
++ */
++#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
++/**
++ * Codec supports frame-level multithreading.
++ */
++#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
++/**
++ * Codec supports slice-based (or partition-based) multithreading.
++ */
++#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
++/**
++ * Codec supports changed parameters at any point.
++ */
++#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
++/**
++ * Codec supports multithreading through a method other than slice- or
++ * frame-level multithreading. Typically this marks wrappers around
++ * multithreading-capable external libraries.
++ */
++#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
++/**
++ * Audio encoder supports receiving a different number of samples in each
call.
++ */
++#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
++/**
++ * Decoder is not a preferred choice for probing.
++ * This indicates that the decoder is not a good choice for probing.
++ * It could for example be an expensive to spin up hardware decoder,
++ * or it could simply not provide a lot of useful information about
++ * the stream.
++ * A decoder marked with this flag should only be used as last resort
++ * choice for probing.
++ */
++#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
++
++/**
++ * Codec is backed by a hardware implementation. Typically used to
++ * identify a non-hwaccel hardware decoder. For information about hwaccels,
use
++ * avcodec_get_hw_config() instead.
++ */
++#define AV_CODEC_CAP_HARDWARE (1 << 18)
++
++/**
++ * Codec is potentially backed by a hardware implementation, but not
++ * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
++ * implementation provides some sort of internal fallback.
++ */
++#define AV_CODEC_CAP_HYBRID (1 << 19)
++
++/**
++ * This encoder can reorder user opaque values from input AVFrames and
return
++ * them with corresponding output packets.
++ * @see AV_CODEC_FLAG_COPY_OPAQUE
++ */
++#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
++
++/**
++ * This encoder can be flushed using avcodec_flush_buffers(). If this flag
is
++ * not set, the encoder must be closed and reopened to ensure that no frames
++ * remain pending.
++ */
++#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
++
++/**
++ * The encoder is able to output reconstructed frame data, i.e. raw frames
that
++ * would be produced by decoding the encoded bitstream.
++ *
++ * Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME
flag.
++ */
++#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
++
++/**
++ * AVProfile.
++ */
++typedef struct AVProfile {
++ int profile;
++ const char *name; ///< short name for the profile
++} AVProfile;
++
++/**
++ * AVCodec.
++ */
++typedef struct AVCodec {
++ /**
++ * Name of the codec implementation.
++ * The name is globally unique among encoders and among decoders (but an
++ * encoder and a decoder can share the same name).
++ * This is the primary way to find a codec from the user perspective.
++ */
++ const char *name;
++ /**
++ * Descriptive name for the codec, meant to be more human readable than
name.
++ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
++ */
++ const char *long_name;
++ enum AVMediaType type;
++ enum AVCodecID id;
++ /**
++ * Codec capabilities.
++ * see AV_CODEC_CAP_*
++ */
++ int capabilities;
++ uint8_t max_lowres; ///< maximum value for lowres
supported by the decoder
++
++ /**
++ * Deprecated codec capabilities.
++ */
++ attribute_deprecated
++ const AVRational *supported_framerates; ///< @deprecated use
avcodec_get_supported_config()
++ attribute_deprecated
++ const enum AVPixelFormat *pix_fmts; ///< @deprecated use
avcodec_get_supported_config()
++ attribute_deprecated
++ const int *supported_samplerates; ///< @deprecated use
avcodec_get_supported_config()
++ attribute_deprecated
++ const enum AVSampleFormat *sample_fmts; ///< @deprecated use
avcodec_get_supported_config()
++
++ const AVClass *priv_class; ///< AVClass for the private
context
++ const AVProfile *profiles; ///< array of recognized
profiles, or NULL if unknown, array is terminated by {AV_PROFILE_UNKNOWN}
++
++ /**
++ * Group name of the codec implementation.
++ * This is a short symbolic name of the wrapper backing this codec. A
++ * wrapper uses some kind of external implementation for the codec, such
++ * as an external library, or a codec implementation provided by the OS
or
++ * the hardware.
++ * If this field is NULL, this is a builtin, libavcodec native codec.
++ * If non-NULL, this will be the suffix in AVCodec.name in most cases
++ * (usually AVCodec.name will be of the form
"<codec_name>_<wrapper_name>").
++ */
++ const char *wrapper_name;
++
++ /**
++ * Array of supported channel layouts, terminated with a zeroed layout.
++ * @deprecated use avcodec_get_supported_config()
++ */
++ attribute_deprecated
++ const AVChannelLayout *ch_layouts;
++} AVCodec;
++
++/**
++ * Iterate over all registered codecs.
++ *
++ * @param opaque a pointer where libavcodec will store the iteration state.
Must
++ * point to NULL to start the iteration.
++ *
++ * @return the next registered codec or NULL when the iteration is
++ * finished
++ */
++const AVCodec *av_codec_iterate(void **opaque);
++
++/**
++ * Find a registered decoder with a matching codec ID.
++ *
++ * @param id AVCodecID of the requested decoder
++ * @return A decoder if one was found, NULL otherwise.
++ */
++const AVCodec *avcodec_find_decoder(enum AVCodecID id);
++
++/**
++ * Find a registered decoder with the specified name.
++ *
++ * @param name name of the requested decoder
++ * @return A decoder if one was found, NULL otherwise.
++ */
++const AVCodec *avcodec_find_decoder_by_name(const char *name);
++
++/**
++ * Find a registered encoder with a matching codec ID.
++ *
++ * @param id AVCodecID of the requested encoder
++ * @return An encoder if one was found, NULL otherwise.
++ */
++const AVCodec *avcodec_find_encoder(enum AVCodecID id);
++
++/**
++ * Find a registered encoder with the specified name.
++ *
++ * @param name name of the requested encoder
++ * @return An encoder if one was found, NULL otherwise.
++ */
++const AVCodec *avcodec_find_encoder_by_name(const char *name);
++/**
++ * @return a non-zero number if codec is an encoder, zero otherwise
++ */
++int av_codec_is_encoder(const AVCodec *codec);
++
++/**
++ * @return a non-zero number if codec is a decoder, zero otherwise
++ */
++int av_codec_is_decoder(const AVCodec *codec);
++
++/**
++ * Return a name for the specified profile, if available.
++ *
++ * @param codec the codec that is searched for the given profile
++ * @param profile the profile value for which a name is requested
++ * @return A name for the profile if found, NULL otherwise.
++ */
++const char *av_get_profile_name(const AVCodec *codec, int profile);
++
++enum {
++ /**
++ * The codec supports this format via the hw_device_ctx interface.
++ *
++ * When selecting this format, AVCodecContext.hw_device_ctx should
++ * have been set to a device of the specified type before calling
++ * avcodec_open2().
++ */
++ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
++ /**
++ * The codec supports this format via the hw_frames_ctx interface.
++ *
++ * When selecting this format for a decoder,
++ * AVCodecContext.hw_frames_ctx should be set to a suitable frames
++ * context inside the get_format() callback. The frames context
++ * must have been created on a device of the specified type.
++ *
++ * When selecting this format for an encoder,
++ * AVCodecContext.hw_frames_ctx should be set to the context which
++ * will be used for the input frames before calling avcodec_open2().
++ */
++ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
++ /**
++ * The codec supports this format by some internal method.
++ *
++ * This format can be selected without any additional configuration -
++ * no device or frames context is required.
++ */
++ AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
++ /**
++ * The codec supports this format by some ad-hoc method.
++ *
++ * Additional settings and/or function calls are required. See the
++ * codec-specific documentation for details. (Methods requiring
++ * this sort of configuration are deprecated and others should be
++ * used in preference.)
++ */
++ AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
++};
++
++typedef struct AVCodecHWConfig {
++ /**
++ * For decoders, a hardware pixel format which that decoder may be
++ * able to decode to if suitable hardware is available.
++ *
++ * For encoders, a pixel format which the encoder may be able to
++ * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
++ * formats supported by the codec.
++ */
++ enum AVPixelFormat pix_fmt;
++ /**
++ * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
++ * setup methods which can be used with this configuration.
++ */
++ int methods;
++ /**
++ * The device type associated with the configuration.
++ *
++ * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
++ * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
++ */
++ enum AVHWDeviceType device_type;
++} AVCodecHWConfig;
++
++/**
++ * Retrieve supported hardware configurations for a codec.
++ *
++ * Values of index from zero to some maximum return the indexed
configuration
++ * descriptor; all other values return NULL. If the codec does not support
++ * any hardware configurations then it will always return NULL.
++ */
++const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int
index);
++
++/**
++ * @}
++ */
++
++#endif /* AVCODEC_CODEC_H */
+diff --git a/media/ffvpx/libavcodec/codec_desc.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_desc.h
+copy from media/ffvpx/libavcodec/codec_desc.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_desc.h
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_id.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_id.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_id.h
+@@ -0,0 +1,683 @@
++/*
++ * Codec IDs
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_CODEC_ID_H
++#define AVCODEC_CODEC_ID_H
++
++#include "libavutil/avutil.h"
++#include "libavutil/samplefmt.h"
++
++#include "version_major.h"
++
++/**
++ * @addtogroup lavc_core
++ * @{
++ */
++
++/**
++ * Identify the syntax and semantics of the bitstream.
++ * The principle is roughly:
++ * Two decoders with the same ID can decode the same streams.
++ * Two encoders with the same ID can encode compatible streams.
++ * There may be slight deviations from the principle due to implementation
++ * details.
++ *
++ * If you add a codec ID to this list, add it so that
++ * 1. no value of an existing codec ID changes (that would break ABI),
++ * 2. it is as close as possible to similar codecs
++ *
++ * After adding new codec IDs, do not forget to add an entry to the codec
++ * descriptor list and bump libavcodec minor version.
++ */
++enum AVCodecID {
++ AV_CODEC_ID_NONE,
++
++ /* video codecs */
++ AV_CODEC_ID_MPEG1VIDEO,
++ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
++ AV_CODEC_ID_H261,
++ AV_CODEC_ID_H263,
++ AV_CODEC_ID_RV10,
++ AV_CODEC_ID_RV20,
++ AV_CODEC_ID_MJPEG,
++ AV_CODEC_ID_MJPEGB,
++ AV_CODEC_ID_LJPEG,
++ AV_CODEC_ID_SP5X,
++ AV_CODEC_ID_JPEGLS,
++ AV_CODEC_ID_MPEG4,
++ AV_CODEC_ID_RAWVIDEO,
++ AV_CODEC_ID_MSMPEG4V1,
++ AV_CODEC_ID_MSMPEG4V2,
++ AV_CODEC_ID_MSMPEG4V3,
++ AV_CODEC_ID_WMV1,
++ AV_CODEC_ID_WMV2,
++ AV_CODEC_ID_H263P,
++ AV_CODEC_ID_H263I,
++ AV_CODEC_ID_FLV1,
++ AV_CODEC_ID_SVQ1,
++ AV_CODEC_ID_SVQ3,
++ AV_CODEC_ID_DVVIDEO,
++ AV_CODEC_ID_HUFFYUV,
++ AV_CODEC_ID_CYUV,
++ AV_CODEC_ID_H264,
++ AV_CODEC_ID_INDEO3,
++ AV_CODEC_ID_VP3,
++ AV_CODEC_ID_THEORA,
++ AV_CODEC_ID_ASV1,
++ AV_CODEC_ID_ASV2,
++ AV_CODEC_ID_FFV1,
++ AV_CODEC_ID_4XM,
++ AV_CODEC_ID_VCR1,
++ AV_CODEC_ID_CLJR,
++ AV_CODEC_ID_MDEC,
++ AV_CODEC_ID_ROQ,
++ AV_CODEC_ID_INTERPLAY_VIDEO,
++ AV_CODEC_ID_XAN_WC3,
++ AV_CODEC_ID_XAN_WC4,
++ AV_CODEC_ID_RPZA,
++ AV_CODEC_ID_CINEPAK,
++ AV_CODEC_ID_WS_VQA,
++ AV_CODEC_ID_MSRLE,
++ AV_CODEC_ID_MSVIDEO1,
++ AV_CODEC_ID_IDCIN,
++ AV_CODEC_ID_8BPS,
++ AV_CODEC_ID_SMC,
++ AV_CODEC_ID_FLIC,
++ AV_CODEC_ID_TRUEMOTION1,
++ AV_CODEC_ID_VMDVIDEO,
++ AV_CODEC_ID_MSZH,
++ AV_CODEC_ID_ZLIB,
++ AV_CODEC_ID_QTRLE,
++ AV_CODEC_ID_TSCC,
++ AV_CODEC_ID_ULTI,
++ AV_CODEC_ID_QDRAW,
++ AV_CODEC_ID_VIXL,
++ AV_CODEC_ID_QPEG,
++ AV_CODEC_ID_PNG,
++ AV_CODEC_ID_PPM,
++ AV_CODEC_ID_PBM,
++ AV_CODEC_ID_PGM,
++ AV_CODEC_ID_PGMYUV,
++ AV_CODEC_ID_PAM,
++ AV_CODEC_ID_FFVHUFF,
++ AV_CODEC_ID_RV30,
++ AV_CODEC_ID_RV40,
++ AV_CODEC_ID_VC1,
++ AV_CODEC_ID_WMV3,
++ AV_CODEC_ID_LOCO,
++ AV_CODEC_ID_WNV1,
++ AV_CODEC_ID_AASC,
++ AV_CODEC_ID_INDEO2,
++ AV_CODEC_ID_FRAPS,
++ AV_CODEC_ID_TRUEMOTION2,
++ AV_CODEC_ID_BMP,
++ AV_CODEC_ID_CSCD,
++ AV_CODEC_ID_MMVIDEO,
++ AV_CODEC_ID_ZMBV,
++ AV_CODEC_ID_AVS,
++ AV_CODEC_ID_SMACKVIDEO,
++ AV_CODEC_ID_NUV,
++ AV_CODEC_ID_KMVC,
++ AV_CODEC_ID_FLASHSV,
++ AV_CODEC_ID_CAVS,
++ AV_CODEC_ID_JPEG2000,
++ AV_CODEC_ID_VMNC,
++ AV_CODEC_ID_VP5,
++ AV_CODEC_ID_VP6,
++ AV_CODEC_ID_VP6F,
++ AV_CODEC_ID_TARGA,
++ AV_CODEC_ID_DSICINVIDEO,
++ AV_CODEC_ID_TIERTEXSEQVIDEO,
++ AV_CODEC_ID_TIFF,
++ AV_CODEC_ID_GIF,
++ AV_CODEC_ID_DXA,
++ AV_CODEC_ID_DNXHD,
++ AV_CODEC_ID_THP,
++ AV_CODEC_ID_SGI,
++ AV_CODEC_ID_C93,
++ AV_CODEC_ID_BETHSOFTVID,
++ AV_CODEC_ID_PTX,
++ AV_CODEC_ID_TXD,
++ AV_CODEC_ID_VP6A,
++ AV_CODEC_ID_AMV,
++ AV_CODEC_ID_VB,
++ AV_CODEC_ID_PCX,
++ AV_CODEC_ID_SUNRAST,
++ AV_CODEC_ID_INDEO4,
++ AV_CODEC_ID_INDEO5,
++ AV_CODEC_ID_MIMIC,
++ AV_CODEC_ID_RL2,
++ AV_CODEC_ID_ESCAPE124,
++ AV_CODEC_ID_DIRAC,
++ AV_CODEC_ID_BFI,
++ AV_CODEC_ID_CMV,
++ AV_CODEC_ID_MOTIONPIXELS,
++ AV_CODEC_ID_TGV,
++ AV_CODEC_ID_TGQ,
++ AV_CODEC_ID_TQI,
++ AV_CODEC_ID_AURA,
++ AV_CODEC_ID_AURA2,
++ AV_CODEC_ID_V210X,
++ AV_CODEC_ID_TMV,
++ AV_CODEC_ID_V210,
++ AV_CODEC_ID_DPX,
++ AV_CODEC_ID_MAD,
++ AV_CODEC_ID_FRWU,
++ AV_CODEC_ID_FLASHSV2,
++ AV_CODEC_ID_CDGRAPHICS,
++ AV_CODEC_ID_R210,
++ AV_CODEC_ID_ANM,
++ AV_CODEC_ID_BINKVIDEO,
++ AV_CODEC_ID_IFF_ILBM,
++#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
++ AV_CODEC_ID_KGV1,
++ AV_CODEC_ID_YOP,
++ AV_CODEC_ID_VP8,
++ AV_CODEC_ID_PICTOR,
++ AV_CODEC_ID_ANSI,
++ AV_CODEC_ID_A64_MULTI,
++ AV_CODEC_ID_A64_MULTI5,
++ AV_CODEC_ID_R10K,
++ AV_CODEC_ID_MXPEG,
++ AV_CODEC_ID_LAGARITH,
++ AV_CODEC_ID_PRORES,
++ AV_CODEC_ID_JV,
++ AV_CODEC_ID_DFA,
++ AV_CODEC_ID_WMV3IMAGE,
++ AV_CODEC_ID_VC1IMAGE,
++ AV_CODEC_ID_UTVIDEO,
++ AV_CODEC_ID_BMV_VIDEO,
++ AV_CODEC_ID_VBLE,
++ AV_CODEC_ID_DXTORY,
++#if FF_API_V408_CODECID
++ AV_CODEC_ID_V410,
++#endif
++ AV_CODEC_ID_XWD,
++ AV_CODEC_ID_CDXL,
++ AV_CODEC_ID_XBM,
++ AV_CODEC_ID_ZEROCODEC,
++ AV_CODEC_ID_MSS1,
++ AV_CODEC_ID_MSA1,
++ AV_CODEC_ID_TSCC2,
++ AV_CODEC_ID_MTS2,
++ AV_CODEC_ID_CLLC,
++ AV_CODEC_ID_MSS2,
++ AV_CODEC_ID_VP9,
++ AV_CODEC_ID_AIC,
++ AV_CODEC_ID_ESCAPE130,
++ AV_CODEC_ID_G2M,
++ AV_CODEC_ID_WEBP,
++ AV_CODEC_ID_HNM4_VIDEO,
++ AV_CODEC_ID_HEVC,
++#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
++ AV_CODEC_ID_FIC,
++ AV_CODEC_ID_ALIAS_PIX,
++ AV_CODEC_ID_BRENDER_PIX,
++ AV_CODEC_ID_PAF_VIDEO,
++ AV_CODEC_ID_EXR,
++ AV_CODEC_ID_VP7,
++ AV_CODEC_ID_SANM,
++ AV_CODEC_ID_SGIRLE,
++ AV_CODEC_ID_MVC1,
++ AV_CODEC_ID_MVC2,
++ AV_CODEC_ID_HQX,
++ AV_CODEC_ID_TDSC,
++ AV_CODEC_ID_HQ_HQA,
++ AV_CODEC_ID_HAP,
++ AV_CODEC_ID_DDS,
++ AV_CODEC_ID_DXV,
++ AV_CODEC_ID_SCREENPRESSO,
++ AV_CODEC_ID_RSCC,
++ AV_CODEC_ID_AVS2,
++ AV_CODEC_ID_PGX,
++ AV_CODEC_ID_AVS3,
++ AV_CODEC_ID_MSP2,
++ AV_CODEC_ID_VVC,
++#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
++ AV_CODEC_ID_Y41P,
++ AV_CODEC_ID_AVRP,
++ AV_CODEC_ID_012V,
++ AV_CODEC_ID_AVUI,
++ AV_CODEC_ID_TARGA_Y216,
++#if FF_API_V408_CODECID
++ AV_CODEC_ID_V308,
++ AV_CODEC_ID_V408,
++#endif
++ AV_CODEC_ID_YUV4,
++ AV_CODEC_ID_AVRN,
++ AV_CODEC_ID_CPIA,
++ AV_CODEC_ID_XFACE,
++ AV_CODEC_ID_SNOW,
++ AV_CODEC_ID_SMVJPEG,
++ AV_CODEC_ID_APNG,
++ AV_CODEC_ID_DAALA,
++ AV_CODEC_ID_CFHD,
++ AV_CODEC_ID_TRUEMOTION2RT,
++ AV_CODEC_ID_M101,
++ AV_CODEC_ID_MAGICYUV,
++ AV_CODEC_ID_SHEERVIDEO,
++ AV_CODEC_ID_YLC,
++ AV_CODEC_ID_PSD,
++ AV_CODEC_ID_PIXLET,
++ AV_CODEC_ID_SPEEDHQ,
++ AV_CODEC_ID_FMVC,
++ AV_CODEC_ID_SCPR,
++ AV_CODEC_ID_CLEARVIDEO,
++ AV_CODEC_ID_XPM,
++ AV_CODEC_ID_AV1,
++ AV_CODEC_ID_BITPACKED,
++ AV_CODEC_ID_MSCC,
++ AV_CODEC_ID_SRGC,
++ AV_CODEC_ID_SVG,
++ AV_CODEC_ID_GDV,
++ AV_CODEC_ID_FITS,
++ AV_CODEC_ID_IMM4,
++ AV_CODEC_ID_PROSUMER,
++ AV_CODEC_ID_MWSC,
++ AV_CODEC_ID_WCMV,
++ AV_CODEC_ID_RASC,
++ AV_CODEC_ID_HYMT,
++ AV_CODEC_ID_ARBC,
++ AV_CODEC_ID_AGM,
++ AV_CODEC_ID_LSCR,
++ AV_CODEC_ID_VP4,
++ AV_CODEC_ID_IMM5,
++ AV_CODEC_ID_MVDV,
++ AV_CODEC_ID_MVHA,
++ AV_CODEC_ID_CDTOONS,
++ AV_CODEC_ID_MV30,
++ AV_CODEC_ID_NOTCHLC,
++ AV_CODEC_ID_PFM,
++ AV_CODEC_ID_MOBICLIP,
++ AV_CODEC_ID_PHOTOCD,
++ AV_CODEC_ID_IPU,
++ AV_CODEC_ID_ARGO,
++ AV_CODEC_ID_CRI,
++ AV_CODEC_ID_SIMBIOSIS_IMX,
++ AV_CODEC_ID_SGA_VIDEO,
++ AV_CODEC_ID_GEM,
++ AV_CODEC_ID_VBN,
++ AV_CODEC_ID_JPEGXL,
++ AV_CODEC_ID_QOI,
++ AV_CODEC_ID_PHM,
++ AV_CODEC_ID_RADIANCE_HDR,
++ AV_CODEC_ID_WBMP,
++ AV_CODEC_ID_MEDIA100,
++ AV_CODEC_ID_VQC,
++ AV_CODEC_ID_PDV,
++ AV_CODEC_ID_EVC,
++ AV_CODEC_ID_RTV1,
++ AV_CODEC_ID_VMIX,
++ AV_CODEC_ID_LEAD,
++ AV_CODEC_ID_DNXUC,
++ AV_CODEC_ID_RV60,
++ AV_CODEC_ID_JPEGXL_ANIM,
++ AV_CODEC_ID_APV,
++ AV_CODEC_ID_PRORES_RAW,
++
++ /* various PCM "codecs" */
++ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the
start of audio codecs
++ AV_CODEC_ID_PCM_S16LE = 0x10000,
++ AV_CODEC_ID_PCM_S16BE,
++ AV_CODEC_ID_PCM_U16LE,
++ AV_CODEC_ID_PCM_U16BE,
++ AV_CODEC_ID_PCM_S8,
++ AV_CODEC_ID_PCM_U8,
++ AV_CODEC_ID_PCM_MULAW,
++ AV_CODEC_ID_PCM_ALAW,
++ AV_CODEC_ID_PCM_S32LE,
++ AV_CODEC_ID_PCM_S32BE,
++ AV_CODEC_ID_PCM_U32LE,
++ AV_CODEC_ID_PCM_U32BE,
++ AV_CODEC_ID_PCM_S24LE,
++ AV_CODEC_ID_PCM_S24BE,
++ AV_CODEC_ID_PCM_U24LE,
++ AV_CODEC_ID_PCM_U24BE,
++ AV_CODEC_ID_PCM_S24DAUD,
++ AV_CODEC_ID_PCM_ZORK,
++ AV_CODEC_ID_PCM_S16LE_PLANAR,
++ AV_CODEC_ID_PCM_DVD,
++ AV_CODEC_ID_PCM_F32BE,
++ AV_CODEC_ID_PCM_F32LE,
++ AV_CODEC_ID_PCM_F64BE,
++ AV_CODEC_ID_PCM_F64LE,
++ AV_CODEC_ID_PCM_BLURAY,
++ AV_CODEC_ID_PCM_LXF,
++ AV_CODEC_ID_S302M,
++ AV_CODEC_ID_PCM_S8_PLANAR,
++ AV_CODEC_ID_PCM_S24LE_PLANAR,
++ AV_CODEC_ID_PCM_S32LE_PLANAR,
++ AV_CODEC_ID_PCM_S16BE_PLANAR,
++ AV_CODEC_ID_PCM_S64LE,
++ AV_CODEC_ID_PCM_S64BE,
++ AV_CODEC_ID_PCM_F16LE,
++ AV_CODEC_ID_PCM_F24LE,
++ AV_CODEC_ID_PCM_VIDC,
++ AV_CODEC_ID_PCM_SGA,
++
++ /* various ADPCM codecs */
++ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
++ AV_CODEC_ID_ADPCM_IMA_WAV,
++ AV_CODEC_ID_ADPCM_IMA_DK3,
++ AV_CODEC_ID_ADPCM_IMA_DK4,
++ AV_CODEC_ID_ADPCM_IMA_WS,
++ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
++ AV_CODEC_ID_ADPCM_MS,
++ AV_CODEC_ID_ADPCM_4XM,
++ AV_CODEC_ID_ADPCM_XA,
++ AV_CODEC_ID_ADPCM_ADX,
++ AV_CODEC_ID_ADPCM_EA,
++ AV_CODEC_ID_ADPCM_G726,
++ AV_CODEC_ID_ADPCM_CT,
++ AV_CODEC_ID_ADPCM_SWF,
++ AV_CODEC_ID_ADPCM_YAMAHA,
++ AV_CODEC_ID_ADPCM_SBPRO_4,
++ AV_CODEC_ID_ADPCM_SBPRO_3,
++ AV_CODEC_ID_ADPCM_SBPRO_2,
++ AV_CODEC_ID_ADPCM_THP,
++ AV_CODEC_ID_ADPCM_IMA_AMV,
++ AV_CODEC_ID_ADPCM_EA_R1,
++ AV_CODEC_ID_ADPCM_EA_R3,
++ AV_CODEC_ID_ADPCM_EA_R2,
++ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
++ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
++ AV_CODEC_ID_ADPCM_EA_XAS,
++ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
++ AV_CODEC_ID_ADPCM_IMA_ISS,
++ AV_CODEC_ID_ADPCM_G722,
++ AV_CODEC_ID_ADPCM_IMA_APC,
++ AV_CODEC_ID_ADPCM_VIMA,
++ AV_CODEC_ID_ADPCM_AFC,
++ AV_CODEC_ID_ADPCM_IMA_OKI,
++ AV_CODEC_ID_ADPCM_DTK,
++ AV_CODEC_ID_ADPCM_IMA_RAD,
++ AV_CODEC_ID_ADPCM_G726LE,
++ AV_CODEC_ID_ADPCM_THP_LE,
++ AV_CODEC_ID_ADPCM_PSX,
++ AV_CODEC_ID_ADPCM_AICA,
++ AV_CODEC_ID_ADPCM_IMA_DAT4,
++ AV_CODEC_ID_ADPCM_MTAF,
++ AV_CODEC_ID_ADPCM_AGM,
++ AV_CODEC_ID_ADPCM_ARGO,
++ AV_CODEC_ID_ADPCM_IMA_SSI,
++ AV_CODEC_ID_ADPCM_ZORK,
++ AV_CODEC_ID_ADPCM_IMA_APM,
++ AV_CODEC_ID_ADPCM_IMA_ALP,
++ AV_CODEC_ID_ADPCM_IMA_MTF,
++ AV_CODEC_ID_ADPCM_IMA_CUNNING,
++ AV_CODEC_ID_ADPCM_IMA_MOFLEX,
++ AV_CODEC_ID_ADPCM_IMA_ACORN,
++ AV_CODEC_ID_ADPCM_XMD,
++ AV_CODEC_ID_ADPCM_IMA_XBOX,
++ AV_CODEC_ID_ADPCM_SANYO,
++
++ /* AMR */
++ AV_CODEC_ID_AMR_NB = 0x12000,
++ AV_CODEC_ID_AMR_WB,
++
++ /* RealAudio codecs*/
++ AV_CODEC_ID_RA_144 = 0x13000,
++ AV_CODEC_ID_RA_288,
++
++ /* various DPCM codecs */
++ AV_CODEC_ID_ROQ_DPCM = 0x14000,
++ AV_CODEC_ID_INTERPLAY_DPCM,
++ AV_CODEC_ID_XAN_DPCM,
++ AV_CODEC_ID_SOL_DPCM,
++ AV_CODEC_ID_SDX2_DPCM,
++ AV_CODEC_ID_GREMLIN_DPCM,
++ AV_CODEC_ID_DERF_DPCM,
++ AV_CODEC_ID_WADY_DPCM,
++ AV_CODEC_ID_CBD2_DPCM,
++
++ /* audio codecs */
++ AV_CODEC_ID_MP2 = 0x15000,
++ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2
or 3
++ AV_CODEC_ID_AAC,
++ AV_CODEC_ID_AC3,
++ AV_CODEC_ID_DTS,
++ AV_CODEC_ID_VORBIS,
++ AV_CODEC_ID_DVAUDIO,
++ AV_CODEC_ID_WMAV1,
++ AV_CODEC_ID_WMAV2,
++ AV_CODEC_ID_MACE3,
++ AV_CODEC_ID_MACE6,
++ AV_CODEC_ID_VMDAUDIO,
++ AV_CODEC_ID_FLAC,
++ AV_CODEC_ID_MP3ADU,
++ AV_CODEC_ID_MP3ON4,
++ AV_CODEC_ID_SHORTEN,
++ AV_CODEC_ID_ALAC,
++ AV_CODEC_ID_WESTWOOD_SND1,
++ AV_CODEC_ID_GSM, ///< as in Berlin toast format
++ AV_CODEC_ID_QDM2,
++ AV_CODEC_ID_COOK,
++ AV_CODEC_ID_TRUESPEECH,
++ AV_CODEC_ID_TTA,
++ AV_CODEC_ID_SMACKAUDIO,
++ AV_CODEC_ID_QCELP,
++ AV_CODEC_ID_WAVPACK,
++ AV_CODEC_ID_DSICINAUDIO,
++ AV_CODEC_ID_IMC,
++ AV_CODEC_ID_MUSEPACK7,
++ AV_CODEC_ID_MLP,
++ AV_CODEC_ID_GSM_MS, /* as found in WAV */
++ AV_CODEC_ID_ATRAC3,
++ AV_CODEC_ID_APE,
++ AV_CODEC_ID_NELLYMOSER,
++ AV_CODEC_ID_MUSEPACK8,
++ AV_CODEC_ID_SPEEX,
++ AV_CODEC_ID_WMAVOICE,
++ AV_CODEC_ID_WMAPRO,
++ AV_CODEC_ID_WMALOSSLESS,
++ AV_CODEC_ID_ATRAC3P,
++ AV_CODEC_ID_EAC3,
++ AV_CODEC_ID_SIPR,
++ AV_CODEC_ID_MP1,
++ AV_CODEC_ID_TWINVQ,
++ AV_CODEC_ID_TRUEHD,
++ AV_CODEC_ID_MP4ALS,
++ AV_CODEC_ID_ATRAC1,
++ AV_CODEC_ID_BINKAUDIO_RDFT,
++ AV_CODEC_ID_BINKAUDIO_DCT,
++ AV_CODEC_ID_AAC_LATM,
++ AV_CODEC_ID_QDMC,
++ AV_CODEC_ID_CELT,
++ AV_CODEC_ID_G723_1,
++ AV_CODEC_ID_G729,
++ AV_CODEC_ID_8SVX_EXP,
++ AV_CODEC_ID_8SVX_FIB,
++ AV_CODEC_ID_BMV_AUDIO,
++ AV_CODEC_ID_RALF,
++ AV_CODEC_ID_IAC,
++ AV_CODEC_ID_ILBC,
++ AV_CODEC_ID_OPUS,
++ AV_CODEC_ID_COMFORT_NOISE,
++ AV_CODEC_ID_TAK,
++ AV_CODEC_ID_METASOUND,
++ AV_CODEC_ID_PAF_AUDIO,
++ AV_CODEC_ID_ON2AVC,
++ AV_CODEC_ID_DSS_SP,
++ AV_CODEC_ID_CODEC2,
++ AV_CODEC_ID_FFWAVESYNTH,
++ AV_CODEC_ID_SONIC,
++ AV_CODEC_ID_SONIC_LS,
++ AV_CODEC_ID_EVRC,
++ AV_CODEC_ID_SMV,
++ AV_CODEC_ID_DSD_LSBF,
++ AV_CODEC_ID_DSD_MSBF,
++ AV_CODEC_ID_DSD_LSBF_PLANAR,
++ AV_CODEC_ID_DSD_MSBF_PLANAR,
++ AV_CODEC_ID_4GV,
++ AV_CODEC_ID_INTERPLAY_ACM,
++ AV_CODEC_ID_XMA1,
++ AV_CODEC_ID_XMA2,
++ AV_CODEC_ID_DST,
++ AV_CODEC_ID_ATRAC3AL,
++ AV_CODEC_ID_ATRAC3PAL,
++ AV_CODEC_ID_DOLBY_E,
++ AV_CODEC_ID_APTX,
++ AV_CODEC_ID_APTX_HD,
++ AV_CODEC_ID_SBC,
++ AV_CODEC_ID_ATRAC9,
++ AV_CODEC_ID_HCOM,
++ AV_CODEC_ID_ACELP_KELVIN,
++ AV_CODEC_ID_MPEGH_3D_AUDIO,
++ AV_CODEC_ID_SIREN,
++ AV_CODEC_ID_HCA,
++ AV_CODEC_ID_FASTAUDIO,
++ AV_CODEC_ID_MSNSIREN,
++ AV_CODEC_ID_DFPWM,
++ AV_CODEC_ID_BONK,
++ AV_CODEC_ID_MISC4,
++ AV_CODEC_ID_APAC,
++ AV_CODEC_ID_FTR,
++ AV_CODEC_ID_WAVARC,
++ AV_CODEC_ID_RKA,
++ AV_CODEC_ID_AC4,
++ AV_CODEC_ID_OSQ,
++ AV_CODEC_ID_QOA,
++ AV_CODEC_ID_LC3,
++ AV_CODEC_ID_G728,
++
++ /* subtitle codecs */
++ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing
at the start of subtitle codecs.
++ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
++ AV_CODEC_ID_DVB_SUBTITLE,
++ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
++ AV_CODEC_ID_XSUB,
++ AV_CODEC_ID_SSA,
++ AV_CODEC_ID_MOV_TEXT,
++ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
++ AV_CODEC_ID_DVB_TELETEXT,
++ AV_CODEC_ID_SRT,
++ AV_CODEC_ID_MICRODVD,
++ AV_CODEC_ID_EIA_608,
++ AV_CODEC_ID_JACOSUB,
++ AV_CODEC_ID_SAMI,
++ AV_CODEC_ID_REALTEXT,
++ AV_CODEC_ID_STL,
++ AV_CODEC_ID_SUBVIEWER1,
++ AV_CODEC_ID_SUBVIEWER,
++ AV_CODEC_ID_SUBRIP,
++ AV_CODEC_ID_WEBVTT,
++ AV_CODEC_ID_MPL2,
++ AV_CODEC_ID_VPLAYER,
++ AV_CODEC_ID_PJS,
++ AV_CODEC_ID_ASS,
++ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
++ AV_CODEC_ID_TTML,
++ AV_CODEC_ID_ARIB_CAPTION,
++ AV_CODEC_ID_IVTV_VBI,
++
++ /* other specific kind of codecs (generally used for attachments) */
++ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing
at the start of various fake codecs.
++ AV_CODEC_ID_TTF = 0x18000,
++
++ AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of
program stream.
++ AV_CODEC_ID_EPG,
++ AV_CODEC_ID_BINTEXT,
++ AV_CODEC_ID_XBIN,
++ AV_CODEC_ID_IDF,
++ AV_CODEC_ID_OTF,
++ AV_CODEC_ID_SMPTE_KLV,
++ AV_CODEC_ID_DVD_NAV,
++ AV_CODEC_ID_TIMED_ID3,
++ AV_CODEC_ID_BIN_DATA,
++ AV_CODEC_ID_SMPTE_2038,
++ AV_CODEC_ID_LCEVC,
++ AV_CODEC_ID_SMPTE_436M_ANC,
++
++
++ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like
AV_CODEC_ID_NONE) but lavf should attempt to identify it
++
++ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw
MPEG-2 TS
++ * stream (only used by libavformat) */
++ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a
MPEG-4 Systems
++ * stream (only used by libavformat) */
++ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams
containing only metadata information.
++ AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames
wrapped in AVPacket
++ /**
++ * Dummy null video codec, useful mainly for development and debugging.
++ * Null encoder/decoder discard all input and never return any output.
++ */
++ AV_CODEC_ID_VNULL,
++ /**
++ * Dummy null audio codec, useful mainly for development and debugging.
++ * Null encoder/decoder discard all input and never return any output.
++ */
++ AV_CODEC_ID_ANULL,
++};
++
++/**
++ * Get the type of the given codec.
++ */
++enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
++
++/**
++ * Get the name of a codec.
++ * @return a static string identifying the codec; never NULL
++ */
++const char *avcodec_get_name(enum AVCodecID id);
++
++/**
++ * Return codec bits per sample.
++ *
++ * @param[in] codec_id the codec
++ * @return Number of bits per sample or zero if unknown for the given codec.
++ */
++int av_get_bits_per_sample(enum AVCodecID codec_id);
++
++/**
++ * Return codec bits per sample.
++ * Only return non-zero if the bits per sample is exactly correct, not an
++ * approximation.
++ *
++ * @param[in] codec_id the codec
++ * @return Number of bits per sample or zero if unknown for the given codec.
++ */
++int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
++
++/**
++ * Return a name for the specified profile, if available.
++ *
++ * @param codec_id the ID of the codec to which the requested profile
belongs
++ * @param profile the profile value for which a name is requested
++ * @return A name for the profile if found, NULL otherwise.
++ *
++ * @note unlike av_get_profile_name(), which searches a list of profiles
++ * supported by a specific decoder or encoder implementation, this
++ * function searches the list of profiles from the AVCodecDescriptor
++ */
++const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
++
++/**
++ * Return the PCM codec associated with a sample format.
++ * @param be endianness, 0 for little, 1 for big,
++ * -1 (or anything else) for native
++ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
++ */
++enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
++
++/**
++ * @}
++ */
++
++#endif // AVCODEC_CODEC_ID_H
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_par.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_par.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/codec_par.h
+@@ -0,0 +1,248 @@
++/*
++ * Codec parameters public API
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_CODEC_PAR_H
++#define AVCODEC_CODEC_PAR_H
++
++#include <stdint.h>
++
++#include "libavutil/avutil.h"
++#include "libavutil/channel_layout.h"
++#include "libavutil/rational.h"
++#include "libavutil/pixfmt.h"
++
++#include "codec_id.h"
++#include "defs.h"
++#include "packet.h"
++
++/**
++ * @addtogroup lavc_core
++ * @{
++ */
++
++/**
++ * This struct describes the properties of an encoded stream.
++ *
++ * sizeof(AVCodecParameters) is not a part of the public ABI, this struct
must
++ * be allocated with avcodec_parameters_alloc() and freed with
++ * avcodec_parameters_free().
++ */
++typedef struct AVCodecParameters {
++ /**
++ * General type of the encoded data.
++ */
++ enum AVMediaType codec_type;
++ /**
++ * Specific type of the encoded data (the codec used).
++ */
++ enum AVCodecID codec_id;
++ /**
++ * Additional information about the codec (corresponds to the AVI
FOURCC).
++ */
++ uint32_t codec_tag;
++
++ /**
++ * Extra binary data needed for initializing the decoder,
codec-dependent.
++ *
++ * Must be allocated with av_malloc() and will be freed by
++ * avcodec_parameters_free(). The allocated size of extradata must be at
++ * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
++ * bytes zeroed.
++ */
++ uint8_t *extradata;
++ /**
++ * Size of the extradata content in bytes.
++ */
++ int extradata_size;
++
++ /**
++ * Additional data associated with the entire stream.
++ *
++ * Should be allocated with av_packet_side_data_new() or
++ * av_packet_side_data_add(), and will be freed by
avcodec_parameters_free().
++ */
++ AVPacketSideData *coded_side_data;
++
++ /**
++ * Amount of entries in @ref coded_side_data.
++ */
++ int nb_coded_side_data;
++
++ /**
++ * - video: the pixel format, the value corresponds to enum
AVPixelFormat.
++ * - audio: the sample format, the value corresponds to enum
AVSampleFormat.
++ */
++ int format;
++
++ /**
++ * The average bitrate of the encoded data (in bits per second).
++ */
++ int64_t bit_rate;
++
++ /**
++ * The number of bits per sample in the codedwords.
++ *
++ * This is basically the bitrate per sample. It is mandatory for a
bunch of
++ * formats to actually decode them. It's the number of bits for one
sample in
++ * the actual coded bitstream.
++ *
++ * This could be for example 4 for ADPCM
++ * For PCM formats this matches bits_per_raw_sample
++ * Can be 0
++ */
++ int bits_per_coded_sample;
++
++ /**
++ * This is the number of valid bits in each output sample. If the
++ * sample format has more bits, the least significant bits are
additional
++ * padding bits, which are always 0. Use right shifts to reduce the
sample
++ * to its actual size. For example, audio formats with 24 bit samples
will
++ * have bits_per_raw_sample set to 24, and format set to
AV_SAMPLE_FMT_S32.
++ * To get the original sample use "(int32_t)sample >> 8"."
++ *
++ * For ADPCM this might be 12 or 16 or similar
++ * Can be 0
++ */
++ int bits_per_raw_sample;
++
++ /**
++ * Codec-specific bitstream restrictions that the stream conforms to.
++ */
++ int profile;
++ int level;
++
++ /**
++ * Video only. The dimensions of the video frame in pixels.
++ */
++ int width;
++ int height;
++
++ /**
++ * Video only. The aspect ratio (width / height) which a single pixel
++ * should have when displayed.
++ *
++ * When the aspect ratio is unknown / undefined, the numerator should be
++ * set to 0 (the denominator may have any value).
++ */
++ AVRational sample_aspect_ratio;
++
++ /**
++ * Video only. Number of frames per second, for streams with constant
frame
++ * durations. Should be set to { 0, 1 } when some frames have differing
++ * durations or if the value is not known.
++ *
++ * @note This field corresponds to values that are stored in codec-level
++ * headers and is typically overridden by container/transport-layer
++ * timestamps, when available. It should thus be used only as a last
resort,
++ * when no higher-level timing information is available.
++ */
++ AVRational framerate;
++
++ /**
++ * Video only. The order of the fields in interlaced video.
++ */
++ enum AVFieldOrder field_order;
++
++ /**
++ * Video only. Additional colorspace characteristics.
++ */
++ enum AVColorRange color_range;
++ enum AVColorPrimaries color_primaries;
++ enum AVColorTransferCharacteristic color_trc;
++ enum AVColorSpace color_space;
++ enum AVChromaLocation chroma_location;
++
++ /**
++ * Video only. Number of delayed frames.
++ */
++ int video_delay;
++
++ /**
++ * Audio only. The channel layout and number of channels.
++ */
++ AVChannelLayout ch_layout;
++ /**
++ * Audio only. The number of audio samples per second.
++ */
++ int sample_rate;
++ /**
++ * Audio only. The number of bytes per coded audio frame, required by
some
++ * formats.
++ *
++ * Corresponds to nBlockAlign in WAVEFORMATEX.
++ */
++ int block_align;
++ /**
++ * Audio only. Audio frame size, if known. Required by some formats to
be static.
++ */
++ int frame_size;
++
++ /**
++ * Audio only. The amount of padding (in samples) inserted by the
encoder at
++ * the beginning of the audio. I.e. this number of leading decoded
samples
++ * must be discarded by the caller to get the original audio without
leading
++ * padding.
++ */
++ int initial_padding;
++ /**
++ * Audio only. The amount of padding (in samples) appended by the
encoder to
++ * the end of the audio. I.e. this number of decoded samples must be
++ * discarded by the caller from the end of the stream to get the
original
++ * audio without any trailing padding.
++ */
++ int trailing_padding;
++ /**
++ * Audio only. Number of samples to skip after a discontinuity.
++ */
++ int seek_preroll;
++} AVCodecParameters;
++
++/**
++ * Allocate a new AVCodecParameters and set its fields to default values
++ * (unknown/invalid/0). The returned struct must be freed with
++ * avcodec_parameters_free().
++ */
++AVCodecParameters *avcodec_parameters_alloc(void);
++
++/**
++ * Free an AVCodecParameters instance and everything associated with it and
++ * write NULL to the supplied pointer.
++ */
++void avcodec_parameters_free(AVCodecParameters **par);
++
++/**
++ * Copy the contents of src to dst. Any allocated fields in dst are freed
and
++ * replaced with newly allocated duplicates of the corresponding fields in
src.
++ *
++ * @return >= 0 on success, a negative AVERROR code on failure.
++ */
++int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters
*src);
++
++/**
++ * This function is the same as av_get_audio_frame_duration(), except it
works
++ * with AVCodecParameters instead of an AVCodecContext.
++ */
++int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
++
++/**
++ * @}
++ */
++
++#endif // AVCODEC_CODEC_PAR_H
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/defs.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/defs.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/defs.h
+@@ -0,0 +1,362 @@
++/*
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_DEFS_H
++#define AVCODEC_DEFS_H
++
++/**
++ * @file
++ * @ingroup libavc
++ * Misc types and constants that do not belong anywhere else.
++ */
++
++#include <stdint.h>
++#include <stdlib.h>
++
++/**
++ * @ingroup lavc_decoding
++ * Required number of additionally allocated bytes at the end of the input
bitstream for decoding.
++ * This is mainly needed because some optimized bitstream readers read
++ * 32 or 64 bit at once and could read over the end.<br>
++ * Note: If the first 23 bits of the additional bytes are not 0, then
damaged
++ * MPEG bitstreams could cause overread and segfault.
++ */
++#define AV_INPUT_BUFFER_PADDING_SIZE 64
++
++/**
++ * Verify checksums embedded in the bitstream (could be of either encoded or
++ * decoded data, depending on the format) and print an error message on
mismatch.
++ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
++ * decoder/demuxer returning an error.
++ */
++#define AV_EF_CRCCHECK (1<<0)
++#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification
deviations
++#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
++#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error
detection
++
++#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
++#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the
spec, are fast to calculate and have not been seen in the wild as errors
++#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non
compliances as errors
++#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane
encoder/muxer should not do as an error
++
++#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older
more strict version of the spec or reference software.
++#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the
things in the spec no matter what consequences.
++#define FF_COMPLIANCE_NORMAL 0
++#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
++#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized
experimental things.
++
++
++#define AV_PROFILE_UNKNOWN -99
++#define AV_PROFILE_RESERVED -100
++
++#define AV_PROFILE_AAC_MAIN 0
++#define AV_PROFILE_AAC_LOW 1
++#define AV_PROFILE_AAC_SSR 2
++#define AV_PROFILE_AAC_LTP 3
++#define AV_PROFILE_AAC_HE 4
++#define AV_PROFILE_AAC_HE_V2 28
++#define AV_PROFILE_AAC_LD 22
++#define AV_PROFILE_AAC_ELD 38
++#define AV_PROFILE_AAC_USAC 41
++#define AV_PROFILE_MPEG2_AAC_LOW 128
++#define AV_PROFILE_MPEG2_AAC_HE 131
++
++#define AV_PROFILE_DNXHD 0
++#define AV_PROFILE_DNXHR_LB 1
++#define AV_PROFILE_DNXHR_SQ 2
++#define AV_PROFILE_DNXHR_HQ 3
++#define AV_PROFILE_DNXHR_HQX 4
++#define AV_PROFILE_DNXHR_444 5
++
++#define AV_PROFILE_DTS 20
++#define AV_PROFILE_DTS_ES 30
++#define AV_PROFILE_DTS_96_24 40
++#define AV_PROFILE_DTS_HD_HRA 50
++#define AV_PROFILE_DTS_HD_MA 60
++#define AV_PROFILE_DTS_EXPRESS 70
++#define AV_PROFILE_DTS_HD_MA_X 61
++#define AV_PROFILE_DTS_HD_MA_X_IMAX 62
++
++#define AV_PROFILE_EAC3_DDP_ATMOS 30
++
++#define AV_PROFILE_TRUEHD_ATMOS 30
++
++#define AV_PROFILE_MPEG2_422 0
++#define AV_PROFILE_MPEG2_HIGH 1
++#define AV_PROFILE_MPEG2_SS 2
++#define AV_PROFILE_MPEG2_SNR_SCALABLE 3
++#define AV_PROFILE_MPEG2_MAIN 4
++#define AV_PROFILE_MPEG2_SIMPLE 5
++
++#define AV_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
++#define AV_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
++
++#define AV_PROFILE_H264_BASELINE 66
++#define AV_PROFILE_H264_CONSTRAINED_BASELINE
(66|AV_PROFILE_H264_CONSTRAINED)
++#define AV_PROFILE_H264_MAIN 77
++#define AV_PROFILE_H264_EXTENDED 88
++#define AV_PROFILE_H264_HIGH 100
++#define AV_PROFILE_H264_HIGH_10 110
++#define AV_PROFILE_H264_HIGH_10_INTRA (110|AV_PROFILE_H264_INTRA)
++#define AV_PROFILE_H264_MULTIVIEW_HIGH 118
++#define AV_PROFILE_H264_HIGH_422 122
++#define AV_PROFILE_H264_HIGH_422_INTRA (122|AV_PROFILE_H264_INTRA)
++#define AV_PROFILE_H264_STEREO_HIGH 128
++#define AV_PROFILE_H264_HIGH_444 144
++#define AV_PROFILE_H264_HIGH_444_PREDICTIVE 244
++#define AV_PROFILE_H264_HIGH_444_INTRA (244|AV_PROFILE_H264_INTRA)
++#define AV_PROFILE_H264_CAVLC_444 44
++
++#define AV_PROFILE_VC1_SIMPLE 0
++#define AV_PROFILE_VC1_MAIN 1
++#define AV_PROFILE_VC1_COMPLEX 2
++#define AV_PROFILE_VC1_ADVANCED 3
++
++#define AV_PROFILE_MPEG4_SIMPLE 0
++#define AV_PROFILE_MPEG4_SIMPLE_SCALABLE 1
++#define AV_PROFILE_MPEG4_CORE 2
++#define AV_PROFILE_MPEG4_MAIN 3
++#define AV_PROFILE_MPEG4_N_BIT 4
++#define AV_PROFILE_MPEG4_SCALABLE_TEXTURE 5
++#define AV_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
++#define AV_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
++#define AV_PROFILE_MPEG4_HYBRID 8
++#define AV_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
++#define AV_PROFILE_MPEG4_CORE_SCALABLE 10
++#define AV_PROFILE_MPEG4_ADVANCED_CODING 11
++#define AV_PROFILE_MPEG4_ADVANCED_CORE 12
++#define AV_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
++#define AV_PROFILE_MPEG4_SIMPLE_STUDIO 14
++#define AV_PROFILE_MPEG4_ADVANCED_SIMPLE 15
++
++#define AV_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1
++#define AV_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2
++#define AV_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768
++#define AV_PROFILE_JPEG2000_DCINEMA_2K 3
++#define AV_PROFILE_JPEG2000_DCINEMA_4K 4
++
++#define AV_PROFILE_VP9_0 0
++#define AV_PROFILE_VP9_1 1
++#define AV_PROFILE_VP9_2 2
++#define AV_PROFILE_VP9_3 3
++
++#define AV_PROFILE_HEVC_MAIN 1
++#define AV_PROFILE_HEVC_MAIN_10 2
++#define AV_PROFILE_HEVC_MAIN_STILL_PICTURE 3
++#define AV_PROFILE_HEVC_REXT 4
++#define AV_PROFILE_HEVC_MULTIVIEW_MAIN 6
++#define AV_PROFILE_HEVC_SCC 9
++
++#define AV_PROFILE_VVC_MAIN_10 1
++#define AV_PROFILE_VVC_MAIN_10_444 33
++
++#define AV_PROFILE_AV1_MAIN 0
++#define AV_PROFILE_AV1_HIGH 1
++#define AV_PROFILE_AV1_PROFESSIONAL 2
++
++#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
++#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
++#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
++#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
++#define AV_PROFILE_MJPEG_JPEG_LS 0xf7
++
++#define AV_PROFILE_SBC_MSBC 1
++
++#define AV_PROFILE_PRORES_PROXY 0
++#define AV_PROFILE_PRORES_LT 1
++#define AV_PROFILE_PRORES_STANDARD 2
++#define AV_PROFILE_PRORES_HQ 3
++#define AV_PROFILE_PRORES_4444 4
++#define AV_PROFILE_PRORES_XQ 5
++
++#define AV_PROFILE_PRORES_RAW 0
++#define AV_PROFILE_PRORES_RAW_HQ 1
++
++#define AV_PROFILE_ARIB_PROFILE_A 0
++#define AV_PROFILE_ARIB_PROFILE_C 1
++
++#define AV_PROFILE_KLVA_SYNC 0
++#define AV_PROFILE_KLVA_ASYNC 1
++
++#define AV_PROFILE_EVC_BASELINE 0
++#define AV_PROFILE_EVC_MAIN 1
++
++#define AV_PROFILE_APV_422_10 33
++#define AV_PROFILE_APV_422_12 44
++#define AV_PROFILE_APV_444_10 55
++#define AV_PROFILE_APV_444_12 66
++#define AV_PROFILE_APV_4444_10 77
++#define AV_PROFILE_APV_4444_12 88
++#define AV_PROFILE_APV_400_10 99
++
++
++#define AV_LEVEL_UNKNOWN -99
++
++enum AVFieldOrder {
++ AV_FIELD_UNKNOWN,
++ AV_FIELD_PROGRESSIVE,
++ AV_FIELD_TT, ///< Top coded_first, top displayed first
++ AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
++ AV_FIELD_TB, ///< Top coded first, bottom displayed first
++ AV_FIELD_BT, ///< Bottom coded first, top displayed first
++};
++
++/**
++ * @ingroup lavc_decoding
++ */
++enum AVDiscard{
++ /* We leave some space between them for extensions (drop some
++ * keyframes for intra-only or drop just some bidir frames). */
++ AVDISCARD_NONE =-16, ///< discard nothing
++ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size
packets in avi
++ AVDISCARD_NONREF = 8, ///< discard all non reference
++ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
++ AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
++ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
++ AVDISCARD_ALL = 48, ///< discard all
++};
++
++enum AVAudioServiceType {
++ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
++ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
++ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
++ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
++ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
++ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
++ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
++ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
++ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
++ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
++};
++
++/**
++ * Pan Scan area.
++ * This specifies the area which should be displayed.
++ * Note there may be multiple such areas for one frame.
++ */
++typedef struct AVPanScan {
++ /**
++ * id
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ int id;
++
++ /**
++ * width and height in 1/16 pel
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ int width;
++ int height;
++
++ /**
++ * position of the top left corner in 1/16 pel for up to 3 fields/frames
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ int16_t position[3][2];
++} AVPanScan;
++
++/**
++ * This structure describes the bitrate properties of an encoded bitstream.
It
++ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
++ * parameters for H.264/HEVC.
++ */
++typedef struct AVCPBProperties {
++ /**
++ * Maximum bitrate of the stream, in bits per second.
++ * Zero if unknown or unspecified.
++ */
++ int64_t max_bitrate;
++ /**
++ * Minimum bitrate of the stream, in bits per second.
++ * Zero if unknown or unspecified.
++ */
++ int64_t min_bitrate;
++ /**
++ * Average bitrate of the stream, in bits per second.
++ * Zero if unknown or unspecified.
++ */
++ int64_t avg_bitrate;
++
++ /**
++ * The size of the buffer to which the ratecontrol is applied, in bits.
++ * Zero if unknown or unspecified.
++ */
++ int64_t buffer_size;
++
++ /**
++ * The delay between the time the packet this structure is associated
with
++ * is received and the time when it should be decoded, in periods of a
27MHz
++ * clock.
++ *
++ * UINT64_MAX when unknown or unspecified.
++ */
++ uint64_t vbv_delay;
++} AVCPBProperties;
++
++/**
++ * Allocate a CPB properties structure and initialize its fields to default
++ * values.
++ *
++ * @param size if non-NULL, the size of the allocated struct will be written
++ * here. This is useful for embedding it in side data.
++ *
++ * @return the newly allocated struct or NULL on failure
++ */
++AVCPBProperties *av_cpb_properties_alloc(size_t *size);
++
++/**
++ * This structure supplies correlation between a packet timestamp and a
wall clock
++ * production time. The definition follows the Producer Reference Time
('prft')
++ * as defined in ISO/IEC 14496-12
++ */
++typedef struct AVProducerReferenceTime {
++ /**
++ * A UTC timestamp, in microseconds, since Unix epoch (e.g,
av_gettime()).
++ */
++ int64_t wallclock;
++ int flags;
++} AVProducerReferenceTime;
++
++/**
++ * RTCP SR (Sender Report) information
++ *
++ * The received sender report information for an RTSP
++ * stream, exposed as AV_PKT_DATA_RTCP_SR side data.
++ */
++typedef struct AVRTCPSenderReport {
++ uint32_t ssrc; ///< Synchronization source identifier
++ uint64_t ntp_timestamp; ///< NTP time when the report was sent
++ uint32_t rtp_timestamp; ///< RTP time when the report was sent
++ uint32_t sender_nb_packets; ///< Total number of packets sent
++ uint32_t sender_nb_bytes; ///< Total number of bytes sent (excluding
headers or padding)
++} AVRTCPSenderReport;
++
++/**
++ * Encode extradata length to a buffer. Used by xiph codecs.
++ *
++ * @param s buffer to write to; must be at least (v/255+1) bytes long
++ * @param v size of extradata in bytes
++ * @return number of bytes written to the buffer.
++ */
++unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
++
++#endif // AVCODEC_DEFS_H
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/packet.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/packet.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/packet.h
+@@ -0,0 +1,907 @@
++/*
++ * AVPacket public API
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_PACKET_H
++#define AVCODEC_PACKET_H
++
++#include <stddef.h>
++#include <stdint.h>
++
++#include "libavutil/attributes.h"
++#include "libavutil/buffer.h"
++#include "libavutil/dict.h"
++#include "libavutil/rational.h"
++#include "libavutil/version.h"
++
++#include "libavcodec/version_major.h"
++
++/**
++ * @defgroup lavc_packet_side_data AVPacketSideData
++ *
++ * Types and functions for working with AVPacketSideData.
++ * @{
++ */
++enum AVPacketSideDataType {
++ /**
++ * An AV_PKT_DATA_PALETTE side data packet contains exactly
AVPALETTE_SIZE
++ * bytes worth of palette. This side data signals that a new palette is
++ * present.
++ */
++ AV_PKT_DATA_PALETTE,
++
++ /**
++ * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the
format
++ * that the extradata buffer was changed and the receiving side should
++ * act upon it appropriately. The new extradata is embedded in the side
++ * data buffer and should be immediately used for processing the current
++ * frame or packet.
++ */
++ AV_PKT_DATA_NEW_EXTRADATA,
++
++ /**
++ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
++ * @code
++ * u32le param_flags
++ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
++ * s32le sample_rate
++ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
++ * s32le width
++ * s32le height
++ * @endcode
++ */
++ AV_PKT_DATA_PARAM_CHANGE,
++
++ /**
++ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
++ * structures with info about macroblocks relevant to splitting the
++ * packet into smaller packets on macroblock edges (e.g. as for RFC
2190).
++ * That is, it does not necessarily contain info about all macroblocks,
++ * as long as the distance between macroblocks in the info is smaller
++ * than the target payload size.
++ * Each MB info structure is 12 bytes, and is laid out as follows:
++ * @code
++ * u32le bit offset from the start of the packet
++ * u8 current quantizer at the start of the macroblock
++ * u8 GOB number
++ * u16le macroblock address within the GOB
++ * u8 horizontal MV predictor
++ * u8 vertical MV predictor
++ * u8 horizontal MV predictor for block number 3
++ * u8 vertical MV predictor for block number 3
++ * @endcode
++ */
++ AV_PKT_DATA_H263_MB_INFO,
++
++ /**
++ * This side data should be associated with an audio stream and contains
++ * ReplayGain information in form of the AVReplayGain struct.
++ */
++ AV_PKT_DATA_REPLAYGAIN,
++
++ /**
++ * This side data contains a 3x3 transformation matrix describing an
affine
++ * transformation that needs to be applied to the decoded video frames
for
++ * correct presentation.
++ *
++ * See libavutil/display.h for a detailed description of the data.
++ */
++ AV_PKT_DATA_DISPLAYMATRIX,
++
++ /**
++ * This side data should be associated with a video stream and contains
++ * Stereoscopic 3D information in form of the AVStereo3D struct.
++ */
++ AV_PKT_DATA_STEREO3D,
++
++ /**
++ * This side data should be associated with an audio stream and
corresponds
++ * to enum AVAudioServiceType.
++ */
++ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
++
++ /**
++ * This side data contains quality related information from the encoder.
++ * @code
++ * u32le quality factor of the compressed frame. Allowed range is
between 1 (good) and FF_LAMBDA_MAX (bad).
++ * u8 picture type
++ * u8 error count
++ * u16 reserved
++ * u64le[error count] sum of squared differences between encoder in and
output
++ * @endcode
++ */
++ AV_PKT_DATA_QUALITY_STATS,
++
++ /**
++ * This side data contains an integer value representing the stream
index
++ * of a "fallback" track. A fallback track indicates an alternate
++ * track to use when the current track can not be decoded for some
reason.
++ * e.g. no decoder available for codec.
++ */
++ AV_PKT_DATA_FALLBACK_TRACK,
++
++ /**
++ * This side data corresponds to the AVCPBProperties struct.
++ */
++ AV_PKT_DATA_CPB_PROPERTIES,
++
++ /**
++ * Recommends skipping the specified number of samples
++ * @code
++ * u32le number of samples to skip from start of this packet
++ * u32le number of samples to skip from end of this packet
++ * u8 reason for start skip
++ * u8 reason for end skip (0=padding silence, 1=convergence)
++ * @endcode
++ */
++ AV_PKT_DATA_SKIP_SAMPLES,
++
++ /**
++ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
++ * the packet may contain "dual mono" audio specific to Japanese DTV
++ * and if it is true, recommends only the selected channel to be used.
++ * @code
++ * u8 selected channels (0=main/left, 1=sub/right, 2=both)
++ * @endcode
++ */
++ AV_PKT_DATA_JP_DUALMONO,
++
++ /**
++ * A list of zero terminated key/value strings. There is no end marker
for
++ * the list, so it is required to rely on the side data size to stop.
++ */
++ AV_PKT_DATA_STRINGS_METADATA,
++
++ /**
++ * Subtitle event position
++ * @code
++ * u32le x1
++ * u32le y1
++ * u32le x2
++ * u32le y2
++ * @endcode
++ */
++ AV_PKT_DATA_SUBTITLE_POSITION,
++
++ /**
++ * Data found in BlockAdditional element of matroska container. There is
++ * no end marker for the data, so it is required to rely on the side
data
++ * size to recognize the end. 8 byte id (as found in BlockAddId)
followed
++ * by data.
++ */
++ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
++
++ /**
++ * The optional first identifier line of a WebVTT cue.
++ */
++ AV_PKT_DATA_WEBVTT_IDENTIFIER,
++
++ /**
++ * The optional settings (rendering instructions) that immediately
++ * follow the timestamp specifier of a WebVTT cue.
++ */
++ AV_PKT_DATA_WEBVTT_SETTINGS,
++
++ /**
++ * A list of zero terminated key/value strings. There is no end marker
for
++ * the list, so it is required to rely on the side data size to stop.
This
++ * side data includes updated metadata which appeared in the stream.
++ */
++ AV_PKT_DATA_METADATA_UPDATE,
++
++ /**
++ * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
++ * information from the demuxer to the corresponding muxer.
++ */
++ AV_PKT_DATA_MPEGTS_STREAM_ID,
++
++ /**
++ * Mastering display metadata (based on SMPTE-2086:2014). This metadata
++ * should be associated with a video stream and contains data in the
form
++ * of the AVMasteringDisplayMetadata struct.
++ */
++ AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
++
++ /**
++ * This side data should be associated with a video stream and
corresponds
++ * to the AVSphericalMapping structure.
++ */
++ AV_PKT_DATA_SPHERICAL,
++
++ /**
++ * Content light level (based on CTA-861.3). This metadata should be
++ * associated with a video stream and contains data in the form of the
++ * AVContentLightMetadata struct.
++ */
++ AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
++
++ /**
++ * ATSC A53 Part 4 Closed Captions. This metadata should be associated
with
++ * a video stream. A53 CC bitstream is stored as uint8_t in
AVPacketSideData.data.
++ * The number of bytes of CC data is AVPacketSideData.size.
++ */
++ AV_PKT_DATA_A53_CC,
++
++ /**
++ * This side data is encryption initialization data.
++ * The format is not part of ABI, use av_encryption_init_info_* methods
to
++ * access.
++ */
++ AV_PKT_DATA_ENCRYPTION_INIT_INFO,
++
++ /**
++ * This side data contains encryption info for how to decrypt the
packet.
++ * The format is not part of ABI, use av_encryption_info_* methods to
access.
++ */
++ AV_PKT_DATA_ENCRYPTION_INFO,
++
++ /**
++ * Active Format Description data consisting of a single byte as
specified
++ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
++ */
++ AV_PKT_DATA_AFD,
++
++ /**
++ * Producer Reference Time data corresponding to the
AVProducerReferenceTime struct,
++ * usually exported by some encoders (on demand through the prft flag
set in the
++ * AVCodecContext export_side_data field).
++ */
++ AV_PKT_DATA_PRFT,
++
++ /**
++ * ICC profile data consisting of an opaque octet buffer following the
++ * format described by ISO 15076-1.
++ */
++ AV_PKT_DATA_ICC_PROFILE,
++
++ /**
++ * DOVI configuration
++ * ref:
++ *
dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2, section
2.2
++ * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2,
section 3.3
++ * Tags are stored in struct AVDOVIDecoderConfigurationRecord.
++ */
++ AV_PKT_DATA_DOVI_CONF,
++
++ /**
++ * Timecode which conforms to SMPTE ST 12-1:2014. The data is an array
of 4 uint32_t
++ * where the first uint32_t describes how many (1-3) of the other
timecodes are used.
++ * The timecode format is described in the documentation of
av_timecode_get_smpte_from_framenum()
++ * function in libavutil/timecode.h.
++ */
++ AV_PKT_DATA_S12M_TIMECODE,
++
++ /**
++ * HDR10+ dynamic metadata associated with a video frame. The metadata
is in
++ * the form of the AVDynamicHDRPlus struct and contains
++ * information for color volume transform - application 4 of
++ * SMPTE 2094-40:2016 standard.
++ */
++ AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
++
++ /**
++ * IAMF Mix Gain Parameter Data associated with the audio frame. This
metadata
++ * is in the form of the AVIAMFParamDefinition struct and contains
information
++ * defined in sections 3.6.1 and 3.8.1 of the Immersive Audio Model and
++ * Formats standard.
++ */
++ AV_PKT_DATA_IAMF_MIX_GAIN_PARAM,
++
++ /**
++ * IAMF Demixing Info Parameter Data associated with the audio frame.
This
++ * metadata is in the form of the AVIAMFParamDefinition struct and
contains
++ * information defined in sections 3.6.1 and 3.8.2 of the Immersive
Audio Model
++ * and Formats standard.
++ */
++ AV_PKT_DATA_IAMF_DEMIXING_INFO_PARAM,
++
++ /**
++ * IAMF Recon Gain Info Parameter Data associated with the audio frame.
This
++ * metadata is in the form of the AVIAMFParamDefinition struct and
contains
++ * information defined in sections 3.6.1 and 3.8.3 of the Immersive
Audio Model
++ * and Formats standard.
++ */
++ AV_PKT_DATA_IAMF_RECON_GAIN_INFO_PARAM,
++
++ /**
++ * Ambient viewing environment metadata, as defined by H.274. This
metadata
++ * should be associated with a video stream and contains data in the
form
++ * of the AVAmbientViewingEnvironment struct.
++ */
++ AV_PKT_DATA_AMBIENT_VIEWING_ENVIRONMENT,
++
++ /**
++ * The number of pixels to discard from the top/bottom/left/right
border of the
++ * decoded frame to obtain the sub-rectangle intended for presentation.
++ *
++ * @code
++ * u32le crop_top
++ * u32le crop_bottom
++ * u32le crop_left
++ * u32le crop_right
++ * @endcode
++ */
++ AV_PKT_DATA_FRAME_CROPPING,
++
++ /**
++ * Raw LCEVC payload data, as a uint8_t array, with NAL emulation
++ * bytes intact.
++ */
++ AV_PKT_DATA_LCEVC,
++
++ /**
++ * This side data contains information about the reference display
width(s)
++ * and reference viewing distance(s) as well as information about the
++ * corresponding reference stereo pair(s), i.e., the pair(s) of views
to be
++ * displayed for the viewer's left and right eyes on the reference
display
++ * at the reference viewing distance.
++ * The payload is the AV3DReferenceDisplaysInfo struct defined in
++ * libavutil/tdrdi.h.
++ */
++ AV_PKT_DATA_3D_REFERENCE_DISPLAYS,
++
++ /**
++ * Contains the last received RTCP SR (Sender Report) information
++ * in the form of the AVRTCPSenderReport struct.
++ */
++ AV_PKT_DATA_RTCP_SR,
++
++ /**
++ * The number of side data types.
++ * This is not part of the public API/ABI in the sense that it may
++ * change when new side data types are added.
++ * This must stay the last enum value.
++ * If its value becomes huge, some code using it
++ * needs to be updated as it assumes it to be smaller than other limits.
++ */
++ AV_PKT_DATA_NB
++};
++
++/**
++ * This structure stores auxiliary information for decoding, presenting, or
++ * otherwise processing the coded stream. It is typically exported by
demuxers
++ * and encoders and can be fed to decoders and muxers either in a per packet
++ * basis, or as global side data (applying to the entire coded stream).
++ *
++ * Global side data is handled as follows:
++ * - During demuxing, it may be exported through
++ * @ref AVCodecParameters.coded_side_data "AVStream's codec parameters",
which can
++ * then be passed as input to decoders through the
++ * @ref AVCodecContext.coded_side_data "decoder context's side data", for
++ * initialization.
++ * - For muxing, it can be fed through @ref
AVCodecParameters.coded_side_data
++ * "AVStream's codec parameters", typically the output of encoders
through
++ * the @ref AVCodecContext.coded_side_data "encoder context's side data",
for
++ * initialization.
++ *
++ * Packet specific side data is handled as follows:
++ * - During demuxing, it may be exported through @ref AVPacket.side_data
++ * "AVPacket's side data", which can then be passed as input to decoders.
++ * - For muxing, it can be fed through @ref AVPacket.side_data "AVPacket's
++ * side data", typically the output of encoders.
++ *
++ * Different modules may accept or export different types of side data
++ * depending on media type and codec. Refer to @ref AVPacketSideDataType
for a
++ * list of defined types and where they may be found or used.
++ */
++typedef struct AVPacketSideData {
++ uint8_t *data;
++ size_t size;
++ enum AVPacketSideDataType type;
++} AVPacketSideData;
++
++/**
++ * Allocate a new packet side data.
++ *
++ * @param sd pointer to an array of side data to which the side data
should
++ * be added. *sd may be NULL, in which case the array will be
++ * initialized.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array. The integer value will be increased by 1 on
success.
++ * @param type side data type
++ * @param size desired side data size
++ * @param flags currently unused. Must be zero
++ *
++ * @return pointer to freshly allocated side data on success, or NULL
otherwise.
++ */
++AVPacketSideData *av_packet_side_data_new(AVPacketSideData **psd, int
*pnb_sd,
++ enum AVPacketSideDataType type,
++ size_t size, int flags);
++
++/**
++ * Wrap existing data as packet side data.
++ *
++ * @param sd pointer to an array of side data to which the side data
should
++ * be added. *sd may be NULL, in which case the array will be
++ * initialized
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array. The integer value will be increased by 1 on
success.
++ * @param type side data type
++ * @param data a data array. It must be allocated with the av_malloc()
family
++ * of functions. The ownership of the data is transferred to
the
++ * side data array on success
++ * @param size size of the data array
++ * @param flags currently unused. Must be zero
++ *
++ * @return pointer to freshly allocated side data on success, or NULL
otherwise
++ * On failure, the side data array is unchanged and the data remains
++ * owned by the caller.
++ */
++AVPacketSideData *av_packet_side_data_add(AVPacketSideData **sd, int *nb_sd,
++ enum AVPacketSideDataType type,
++ void *data, size_t size, int
flags);
++
++/**
++ * Get side information from a side data array.
++ *
++ * @param sd the array from which the side data should be fetched
++ * @param nb_sd value containing the number of entries in the array.
++ * @param type desired side information type
++ *
++ * @return pointer to side data if present or NULL otherwise
++ */
++const AVPacketSideData *av_packet_side_data_get(const AVPacketSideData *sd,
++ int nb_sd,
++ enum AVPacketSideDataType
type);
++
++/**
++ * Remove side data of the given type from a side data array.
++ *
++ * @param sd the array from which the side data should be removed
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array. Will be reduced by the amount of entries removed
++ * upon return
++ * @param type side information type
++ */
++void av_packet_side_data_remove(AVPacketSideData *sd, int *nb_sd,
++ enum AVPacketSideDataType type);
++
++/**
++ * Convenience function to free all the side data stored in an array, and
++ * the array itself.
++ *
++ * @param sd pointer to array of side data to free. Will be set to NULL
++ * upon return.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array. Will be set to 0 upon return.
++ */
++void av_packet_side_data_free(AVPacketSideData **sd, int *nb_sd);
++
++const char *av_packet_side_data_name(enum AVPacketSideDataType type);
++
++/**
++ * @}
++ */
++
++/**
++ * @defgroup lavc_packet AVPacket
++ *
++ * Types and functions for working with AVPacket.
++ * @{
++ */
++
++/**
++ * This structure stores compressed data. It is typically exported by
demuxers
++ * and then passed as input to decoders, or received as output from
encoders and
++ * then passed to muxers.
++ *
++ * For video, it should typically contain one compressed frame. For audio
it may
++ * contain several compressed frames. Encoders are allowed to output empty
++ * packets, with no compressed data, containing only side data
++ * (e.g. to update some stream parameters at the end of encoding).
++ *
++ * The semantics of data ownership depends on the buf field.
++ * If it is set, the packet data is dynamically allocated and is
++ * valid indefinitely until a call to av_packet_unref() reduces the
++ * reference count to 0.
++ *
++ * If the buf field is not set av_packet_ref() would make a copy instead
++ * of increasing the reference count.
++ *
++ * The side data is always allocated with av_malloc(), copied by
++ * av_packet_ref() and freed by av_packet_unref().
++ *
++ * sizeof(AVPacket) being a part of the public ABI is deprecated. once
++ * av_init_packet() is removed, new packets will only be able to be
allocated
++ * with av_packet_alloc(), and new fields may be added to the end of the
struct
++ * with a minor bump.
++ *
++ * @see av_packet_alloc
++ * @see av_packet_ref
++ * @see av_packet_unref
++ */
++typedef struct AVPacket {
++ /**
++ * A reference to the reference-counted buffer where the packet data is
++ * stored.
++ * May be NULL, then the packet data is not reference-counted.
++ */
++ AVBufferRef *buf;
++ /**
++ * Presentation timestamp in AVStream->time_base units; the time at
which
++ * the decompressed packet will be presented to the user.
++ * Can be AV_NOPTS_VALUE if it is not stored in the file.
++ * pts MUST be larger or equal to dts as presentation cannot happen
before
++ * decompression, unless one wants to view hex dumps. Some formats
misuse
++ * the terms dts and pts/cts to mean something different. Such
timestamps
++ * must be converted to true pts/dts before they are stored in AVPacket.
++ */
++ int64_t pts;
++ /**
++ * Decompression timestamp in AVStream->time_base units; the time at
which
++ * the packet is decompressed.
++ * Can be AV_NOPTS_VALUE if it is not stored in the file.
++ */
++ int64_t dts;
++ uint8_t *data;
++ int size;
++ int stream_index;
++ /**
++ * A combination of AV_PKT_FLAG values
++ */
++ int flags;
++ /**
++ * Additional packet data that can be provided by the container.
++ * Packet can contain several types of side information.
++ */
++ AVPacketSideData *side_data;
++ int side_data_elems;
++
++ /**
++ * Duration of this packet in AVStream->time_base units, 0 if unknown.
++ * Equals next_pts - this_pts in presentation order.
++ */
++ int64_t duration;
++
++ int64_t pos; ///< byte position in stream,
-1 if unknown
++
++ /**
++ * for some private data of the user
++ */
++ void *opaque;
++
++ /**
++ * AVBufferRef for free use by the API user. FFmpeg will never check the
++ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
++ * the packet is unreferenced. av_packet_copy_props() calls create a new
++ * reference with av_buffer_ref() for the target packet's opaque_ref
field.
++ *
++ * This is unrelated to the opaque field, although it serves a similar
++ * purpose.
++ */
++ AVBufferRef *opaque_ref;
++
++ /**
++ * Time base of the packet's timestamps.
++ * In the future, this field may be set on packets output by encoders or
++ * demuxers, but its value will be by default ignored on input to
decoders
++ * or muxers.
++ */
++ AVRational time_base;
++} AVPacket;
++
++#if FF_API_INIT_PACKET
++attribute_deprecated
++typedef struct AVPacketList {
++ AVPacket pkt;
++ struct AVPacketList *next;
++} AVPacketList;
++#endif
++
++#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
++#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
++/**
++ * Flag is used to discard packets which are required to maintain valid
++ * decoder state but are not required for output and should be dropped
++ * after decoding.
++ **/
++#define AV_PKT_FLAG_DISCARD 0x0004
++/**
++ * The packet comes from a trusted source.
++ *
++ * Otherwise-unsafe constructs such as arbitrary pointers to data
++ * outside the packet may be followed.
++ */
++#define AV_PKT_FLAG_TRUSTED 0x0008
++/**
++ * Flag is used to indicate packets that contain frames that can
++ * be discarded by the decoder. I.e. Non-reference frames.
++ */
++#define AV_PKT_FLAG_DISPOSABLE 0x0010
++
++enum AVSideDataParamChangeFlags {
++ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
++ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
++};
++
++/**
++ * Allocate an AVPacket and set its fields to default values. The resulting
++ * struct must be freed using av_packet_free().
++ *
++ * @return An AVPacket filled with default values or NULL on failure.
++ *
++ * @note this only allocates the AVPacket itself, not the data buffers.
Those
++ * must be allocated through other means such as av_new_packet.
++ *
++ * @see av_new_packet
++ */
++AVPacket *av_packet_alloc(void);
++
++/**
++ * Create a new packet that references the same data as src.
++ *
++ * This is a shortcut for av_packet_alloc()+av_packet_ref().
++ *
++ * @return newly created AVPacket on success, NULL on error.
++ *
++ * @see av_packet_alloc
++ * @see av_packet_ref
++ */
++AVPacket *av_packet_clone(const AVPacket *src);
++
++/**
++ * Free the packet, if the packet is reference counted, it will be
++ * unreferenced first.
++ *
++ * @param pkt packet to be freed. The pointer will be set to NULL.
++ * @note passing NULL is a no-op.
++ */
++void av_packet_free(AVPacket **pkt);
++
++#if FF_API_INIT_PACKET
++/**
++ * Initialize optional fields of a packet with default values.
++ *
++ * Note, this does not touch the data and size members, which have to be
++ * initialized separately.
++ *
++ * @param pkt packet
++ *
++ * @see av_packet_alloc
++ * @see av_packet_unref
++ *
++ * @deprecated This function is deprecated. Once it's removed,
++ sizeof(AVPacket) will not be a part of the ABI anymore.
++ */
++attribute_deprecated
++void av_init_packet(AVPacket *pkt);
++#endif
++
++/**
++ * Allocate the payload of a packet and initialize its fields with
++ * default values.
++ *
++ * @param pkt packet
++ * @param size wanted payload size
++ * @return 0 if OK, AVERROR_xxx otherwise
++ */
++int av_new_packet(AVPacket *pkt, int size);
++
++/**
++ * Reduce packet size, correctly zeroing padding
++ *
++ * @param pkt packet
++ * @param size new size
++ */
++void av_shrink_packet(AVPacket *pkt, int size);
++
++/**
++ * Increase packet size, correctly zeroing padding
++ *
++ * @param pkt packet
++ * @param grow_by number of bytes by which to increase the size of the
packet
++ */
++int av_grow_packet(AVPacket *pkt, int grow_by);
++
++/**
++ * Initialize a reference-counted packet from av_malloc()ed data.
++ *
++ * @param pkt packet to be initialized. This function will set the data,
size,
++ * and buf fields, all others are left untouched.
++ * @param data Data allocated by av_malloc() to be used as packet data. If
this
++ * function returns successfully, the data is owned by the
underlying AVBuffer.
++ * The caller may not access the data through other means.
++ * @param size size of data in bytes, without the padding. I.e. the full
buffer
++ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
++ *
++ * @return 0 on success, a negative AVERROR on error
++ */
++int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
++
++/**
++ * Allocate new information of a packet.
++ *
++ * @param pkt packet
++ * @param type side information type
++ * @param size side information size
++ * @return pointer to fresh allocated data or NULL otherwise
++ */
++uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType
type,
++ size_t size);
++
++/**
++ * Wrap an existing array as a packet side data.
++ *
++ * @param pkt packet
++ * @param type side information type
++ * @param data the side data array. It must be allocated with the
av_malloc()
++ * family of functions. The ownership of the data is
transferred to
++ * pkt.
++ * @param size side information size
++ * @return a non-negative number on success, a negative AVERROR code on
++ * failure. On failure, the packet is unchanged and the data remains
++ * owned by the caller.
++ */
++int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
++ uint8_t *data, size_t size);
++
++/**
++ * Shrink the already allocated side data buffer
++ *
++ * @param pkt packet
++ * @param type side information type
++ * @param size new side information size
++ * @return 0 on success, < 0 on failure
++ */
++int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType
type,
++ size_t size);
++
++/**
++ * Get side information from packet.
++ *
++ * @param pkt packet
++ * @param type desired side information type
++ * @param size If supplied, *size will be set to the size of the side data
++ * or to zero if the desired side data is not present.
++ * @return pointer to data if present or NULL otherwise
++ */
++uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum
AVPacketSideDataType type,
++ size_t *size);
++
++/**
++ * Pack a dictionary for use in side_data.
++ *
++ * @param dict The dictionary to pack.
++ * @param size pointer to store the size of the returned data
++ * @return pointer to data if successful, NULL otherwise
++ */
++uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size);
++/**
++ * Unpack a dictionary from side_data.
++ *
++ * @param data data from side_data
++ * @param size size of the data
++ * @param dict the metadata storage dictionary
++ * @return 0 on success, < 0 on failure
++ */
++int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
++ AVDictionary **dict);
++
++/**
++ * Convenience function to free all the side data stored.
++ * All the other fields stay untouched.
++ *
++ * @param pkt packet
++ */
++void av_packet_free_side_data(AVPacket *pkt);
++
++/**
++ * Setup a new reference to the data described by a given packet
++ *
++ * If src is reference-counted, setup dst as a new reference to the
++ * buffer in src. Otherwise allocate a new buffer in dst and copy the
++ * data from src into it.
++ *
++ * All the other fields are copied from src.
++ *
++ * @see av_packet_unref
++ *
++ * @param dst Destination packet. Will be completely overwritten.
++ * @param src Source packet
++ *
++ * @return 0 on success, a negative AVERROR on error. On error, dst
++ * will be blank (as if returned by av_packet_alloc()).
++ */
++int av_packet_ref(AVPacket *dst, const AVPacket *src);
++
++/**
++ * Wipe the packet.
++ *
++ * Unreference the buffer referenced by the packet and reset the
++ * remaining packet fields to their default values.
++ *
++ * @param pkt The packet to be unreferenced.
++ */
++void av_packet_unref(AVPacket *pkt);
++
++/**
++ * Move every field in src to dst and reset src.
++ *
++ * @see av_packet_unref
++ *
++ * @param src Source packet, will be reset
++ * @param dst Destination packet
++ */
++void av_packet_move_ref(AVPacket *dst, AVPacket *src);
++
++/**
++ * Copy only "properties" fields from src to dst.
++ *
++ * Properties for the purpose of this function are all the fields
++ * beside those related to the packet data (buf, data, size)
++ *
++ * @param dst Destination packet
++ * @param src Source packet
++ *
++ * @return 0 on success AVERROR on failure.
++ */
++int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
++
++/**
++ * Ensure the data described by a given packet is reference counted.
++ *
++ * @note This function does not ensure that the reference will be writable.
++ * Use av_packet_make_writable instead for that purpose.
++ *
++ * @see av_packet_ref
++ * @see av_packet_make_writable
++ *
++ * @param pkt packet whose data should be made reference counted.
++ *
++ * @return 0 on success, a negative AVERROR on error. On failure, the
++ * packet is unchanged.
++ */
++int av_packet_make_refcounted(AVPacket *pkt);
++
++/**
++ * Create a writable reference for the data described by a given packet,
++ * avoiding data copy if possible.
++ *
++ * @param pkt Packet whose data should be made writable.
++ *
++ * @return 0 on success, a negative AVERROR on failure. On failure, the
++ * packet is unchanged.
++ */
++int av_packet_make_writable(AVPacket *pkt);
++
++/**
++ * Convert valid timing fields (timestamps / durations) in a packet from one
++ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE)
will be
++ * ignored.
++ *
++ * @param pkt packet on which the conversion will be performed
++ * @param tb_src source timebase, in which the timing fields in pkt are
++ * expressed
++ * @param tb_dst destination timebase, to which the timing fields will be
++ * converted
++ */
++void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational
tb_dst);
++
++/**
++ * Allocate an AVContainerFifo instance for AVPacket.
++ *
++ * @param flags currently unused
++ */
++struct AVContainerFifo *av_container_fifo_alloc_avpacket(unsigned flags);
++
++/**
++ * @}
++ */
++
++#endif // AVCODEC_PACKET_H
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/vdpau.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/vdpau.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/vdpau.h
+@@ -0,0 +1,138 @@
++/*
++ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
++ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
++ *
++ * Copyright (C) 2008 NVIDIA
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_VDPAU_H
++#define AVCODEC_VDPAU_H
++
++/**
++ * @file
++ * @ingroup lavc_codec_hwaccel_vdpau
++ * Public libavcodec VDPAU header.
++ */
++
++
++/**
++ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
++ * @ingroup lavc_codec_hwaccel
++ *
++ * VDPAU hardware acceleration has two modules
++ * - VDPAU decoding
++ * - VDPAU presentation
++ *
++ * The VDPAU decoding module parses all headers using FFmpeg
++ * parsing mechanisms and uses VDPAU for the actual decoding.
++ *
++ * As per the current implementation, the actual decoding
++ * and rendering (API calls) are done as part of the VDPAU
++ * presentation (vo_vdpau.c) module.
++ *
++ * @{
++ */
++
++#include <vdpau/vdpau.h>
++
++#include "libavutil/avconfig.h"
++#include "libavutil/attributes.h"
++
++#include "avcodec.h"
++
++struct AVCodecContext;
++struct AVFrame;
++
++typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,
++ const VdpPictureInfo *, uint32_t,
++ const VdpBitstreamBuffer *);
++
++/**
++ * This structure is used to share data between the libavcodec library and
++ * the client video application.
++ * This structure will be allocated and stored in
AVCodecContext.hwaccel_context
++ * by av_vdpau_bind_context(). Members can be set by the user once
++ * during initialization or through each AVCodecContext.get_buffer()
++ * function call. In any case, they must be valid prior to calling
++ * decoding functions.
++ *
++ * The size of this structure is not a part of the public ABI and must not
++ * be used outside of libavcodec.
++ */
++typedef struct AVVDPAUContext {
++ /**
++ * VDPAU decoder handle
++ *
++ * Set by user.
++ */
++ VdpDecoder decoder;
++
++ /**
++ * VDPAU decoder render callback
++ *
++ * Set by the user.
++ */
++ VdpDecoderRender *render;
++
++ AVVDPAU_Render2 render2;
++} AVVDPAUContext;
++
++/**
++ * Associate a VDPAU device with a codec context for hardware acceleration.
++ * This function is meant to be called from the get_format() codec callback,
++ * or earlier. It can also be called after avcodec_flush_buffers() to change
++ * the underlying VDPAU device mid-stream (e.g. to recover from
non-transparent
++ * display preemption).
++ *
++ * @note get_format() must return AV_PIX_FMT_VDPAU if this function
completes
++ * successfully.
++ *
++ * @param avctx decoding context whose get_format() callback is invoked
++ * @param device VDPAU device handle to use for hardware acceleration
++ * @param get_proc_address VDPAU device driver
++ * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
++ *
++ * @return 0 on success, an AVERROR code on failure.
++ */
++int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
++ VdpGetProcAddress *get_proc_address, unsigned
flags);
++
++/**
++ * Gets the parameters to create an adequate VDPAU video surface for the
codec
++ * context using VDPAU hardware decoding acceleration.
++ *
++ * @note Behavior is undefined if the context was not successfully bound to
a
++ * VDPAU device using av_vdpau_bind_context().
++ *
++ * @param avctx the codec context being used for decoding the stream
++ * @param type storage space for the VDPAU video surface chroma type
++ * (or NULL to ignore)
++ * @param width storage space for the VDPAU video surface pixel width
++ * (or NULL to ignore)
++ * @param height storage space for the VDPAU video surface pixel height
++ * (or NULL to ignore)
++ *
++ * @return 0 on success, a negative AVERROR code on failure.
++ */
++int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType
*type,
++ uint32_t *width, uint32_t *height);
++
++/** @} */
++
++#endif /* AVCODEC_VDPAU_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version.h
+@@ -0,0 +1,45 @@
++/*
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_VERSION_H
++#define AVCODEC_VERSION_H
++
++/**
++ * @file
++ * @ingroup libavc
++ * Libavcodec version macros.
++ */
++
++#include "libavutil/version.h"
++
++#include "version_major.h"
++
++#define LIBAVCODEC_VERSION_MINOR 11
++#define LIBAVCODEC_VERSION_MICRO 100
++
++#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
++ LIBAVCODEC_VERSION_MINOR, \
++ LIBAVCODEC_VERSION_MICRO)
++#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
++ LIBAVCODEC_VERSION_MINOR, \
++ LIBAVCODEC_VERSION_MICRO)
++#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
++
++#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
++
++#endif /* AVCODEC_VERSION_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version_major.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version_major.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavcodec/version_major.h
+@@ -0,0 +1,55 @@
++/*
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVCODEC_VERSION_MAJOR_H
++#define AVCODEC_VERSION_MAJOR_H
++
++/**
++ * @file
++ * @ingroup libavc
++ * Libavcodec version macros.
++ */
++
++#define LIBAVCODEC_VERSION_MAJOR 62
++
++/**
++ * FF_API_* defines may be placed below to indicate public API that will be
++ * dropped at a future version bump. The defines themselves are not part of
++ * the public API and may change, break or disappear at any time.
++ *
++ * @note, when bumping the major version it is recommended to manually
++ * disable each FF_API_* in its own commit instead of disabling them all
++ * at once through the bump. This improves the git bisect-ability of the
change.
++ */
++
++#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 63)
++
++#define FF_API_V408_CODECID (LIBAVCODEC_VERSION_MAJOR < 63)
++#define FF_API_CODEC_PROPS (LIBAVCODEC_VERSION_MAJOR < 63)
++#define FF_API_EXR_GAMMA (LIBAVCODEC_VERSION_MAJOR < 63)
++
++#define FF_API_NVDEC_OLD_PIX_FMTS (LIBAVCODEC_VERSION_MAJOR < 63)
++
++// reminder to remove the OMX encoder on next major bump
++#define FF_CODEC_OMX (LIBAVCODEC_VERSION_MAJOR < 63)
++// reminder to remove Sonic Lossy/Lossless encoders on next major bump
++#define FF_CODEC_SONIC_ENC (LIBAVCODEC_VERSION_MAJOR < 63)
++// reminder to remove Sonic decoder on next-next major bump
++#define FF_CODEC_SONIC_DEC (LIBAVCODEC_VERSION_MAJOR < 63)
++
++#endif /* AVCODEC_VERSION_MAJOR_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/attributes.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/attributes.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/attributes.h
+@@ -0,0 +1,175 @@
++/*
++ * copyright (c) 2006 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * Macro definitions for various function/variable attributes
++ */
++
++#ifndef AVUTIL_ATTRIBUTES_H
++#define AVUTIL_ATTRIBUTES_H
++
++#ifdef __GNUC__
++# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x)
&& __GNUC_MINOR__ >= (y))
++# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x)
&& __GNUC_MINOR__ <= (y))
++#else
++# define AV_GCC_VERSION_AT_LEAST(x,y) 0
++# define AV_GCC_VERSION_AT_MOST(x,y) 0
++#endif
++
++#ifdef __has_builtin
++# define AV_HAS_BUILTIN(x) __has_builtin(x)
++#else
++# define AV_HAS_BUILTIN(x) 0
++#endif
++
++#ifndef av_always_inline
++#if AV_GCC_VERSION_AT_LEAST(3,1)
++# define av_always_inline __attribute__((always_inline)) inline
++#elif defined(_MSC_VER)
++# define av_always_inline __forceinline
++#else
++# define av_always_inline inline
++#endif
++#endif
++
++#ifndef av_extern_inline
++#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
++# define av_extern_inline extern inline
++#else
++# define av_extern_inline inline
++#endif
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(3,4)
++# define av_warn_unused_result __attribute__((warn_unused_result))
++#else
++# define av_warn_unused_result
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(3,1)
++# define av_noinline __attribute__((noinline))
++#elif defined(_MSC_VER)
++# define av_noinline __declspec(noinline)
++#else
++# define av_noinline
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
++# define av_pure __attribute__((pure))
++#else
++# define av_pure
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(2,6) || defined(__clang__)
++# define av_const __attribute__((const))
++#else
++# define av_const
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(4,3) || defined(__clang__)
++# define av_cold __attribute__((cold))
++#else
++# define av_cold
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
++# define av_flatten __attribute__((flatten))
++#else
++# define av_flatten
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(3,1)
++# define attribute_deprecated __attribute__((deprecated))
++#elif defined(_MSC_VER)
++# define attribute_deprecated __declspec(deprecated)
++#else
++# define attribute_deprecated
++#endif
++
++/**
++ * Disable warnings about deprecated features
++ * This is useful for sections of code kept for backward compatibility and
++ * scheduled for removal.
++ */
++#ifndef AV_NOWARN_DEPRECATED
++#if AV_GCC_VERSION_AT_LEAST(4,6) || defined(__clang__)
++# define AV_NOWARN_DEPRECATED(code) \
++ _Pragma("GCC diagnostic push") \
++ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
++ code \
++ _Pragma("GCC diagnostic pop")
++#elif defined(_MSC_VER)
++# define AV_NOWARN_DEPRECATED(code) \
++ __pragma(warning(push)) \
++ __pragma(warning(disable : 4996)) \
++ code; \
++ __pragma(warning(pop))
++#else
++# define AV_NOWARN_DEPRECATED(code) code
++#endif
++#endif
++
++#if defined(__GNUC__) || defined(__clang__)
++# define av_unused __attribute__((unused))
++#else
++# define av_unused
++#endif
++
++/**
++ * Mark a variable as used and prevent the compiler from optimizing it
++ * away. This is useful for variables accessed only from inline
++ * assembler without the compiler being aware.
++ */
++#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__)
++# define av_used __attribute__((used))
++#else
++# define av_used
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(3,3) || defined(__clang__)
++# define av_alias __attribute__((may_alias))
++#else
++# define av_alias
++#endif
++
++#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER)
++# define av_uninit(x) x=x
++#else
++# define av_uninit(x) x
++#endif
++
++#if defined(__GNUC__) || defined(__clang__)
++# define av_builtin_constant_p __builtin_constant_p
++# define av_printf_format(fmtpos, attrpos)
__attribute__((__format__(__printf__, fmtpos, attrpos)))
++# define av_scanf_format(fmtpos, attrpos)
__attribute__((__format__(__scanf__, fmtpos, attrpos)))
++#else
++# define av_builtin_constant_p(x) 0
++# define av_printf_format(fmtpos, attrpos)
++# define av_scanf_format(fmtpos, attrpos)
++#endif
++
++#if AV_GCC_VERSION_AT_LEAST(2,5) || defined(__clang__)
++# define av_noreturn __attribute__((noreturn))
++#else
++# define av_noreturn
++#endif
++
++#endif /* AVUTIL_ATTRIBUTES_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avconfig.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/avconfig.h
+copy from dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avconfig.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/avconfig.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/avutil.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/avutil.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/avutil.h
+@@ -0,0 +1,364 @@
++/*
++ * copyright (c) 2006 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVUTIL_AVUTIL_H
++#define AVUTIL_AVUTIL_H
++
++/**
++ * @file
++ * @ingroup lavu
++ * Convenience header that includes @ref lavu "libavutil"'s core.
++ */
++
++/**
++ * @mainpage
++ *
++ * @section ffmpeg_intro Introduction
++ *
++ * This document describes the usage of the different libraries
++ * provided by FFmpeg.
++ *
++ * @li @ref libavc "libavcodec" encoding/decoding library
++ * @li @ref lavfi "libavfilter" graph-based frame editing library
++ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
++ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
++ * @li @ref lavu "libavutil" common utility library
++ * @li @ref lswr "libswresample" audio resampling, format conversion and
mixing
++ * @li @ref libsws "libswscale" color conversion and scaling library
++ *
++ * @section ffmpeg_versioning Versioning and compatibility
++ *
++ * Each of the FFmpeg libraries contains a version.h header, which defines a
++ * major, minor and micro version number with the
++ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major
version
++ * number is incremented with backward incompatible changes - e.g. removing
++ * parts of the public API, reordering public struct members, etc. The minor
++ * version number is incremented for backward compatible API changes or
major
++ * new features - e.g. adding a new public function or a new decoder. The
micro
++ * version number is incremented for smaller changes that a calling program
++ * might still want to check for - e.g. changing behavior in a previously
++ * unspecified situation.
++ *
++ * FFmpeg guarantees backward API and ABI compatibility for each library as
long
++ * as its major version number is unchanged. This means that no public
symbols
++ * will be removed or renamed. Types and names of the public struct members
and
++ * values of public macros and enums will remain the same (unless they were
++ * explicitly declared as not part of the public API). Documented behavior
will
++ * not change.
++ *
++ * In other words, any correct program that works with a given FFmpeg
snapshot
++ * should work just as well without any changes with any later snapshot
with the
++ * same major versions. This applies to both rebuilding the program against
new
++ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a
program
++ * links against.
++ *
++ * However, new public symbols may be added and new members may be appended
to
++ * public structs whose size is not part of public ABI (most public structs
in
++ * FFmpeg). New macros and enum values may be added. Behavior in
undocumented
++ * situations may change slightly (and be documented). All those are
accompanied
++ * by an entry in doc/APIchanges and incrementing either the minor or micro
++ * version number.
++ */
++
++/**
++ * @defgroup lavu libavutil
++ * Common code shared across all FFmpeg libraries.
++ *
++ * @note
++ * libavutil is designed to be modular. In most cases, in order to use the
++ * functions provided by one component of libavutil you must explicitly
include
++ * the specific header containing that feature. If you are only using
++ * media-related components, you could simply include libavutil/avutil.h,
which
++ * brings in most of the "core" components.
++ *
++ * @{
++ *
++ * @defgroup lavu_crypto Crypto and Hashing
++ *
++ * @{
++ * @}
++ *
++ * @defgroup lavu_math Mathematics
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_string String Manipulation
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_mem Memory Management
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_data Data Structures
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_video Video related
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_audio Audio related
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_error Error Codes
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_log Logging Facility
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup lavu_misc Other
++ *
++ * @{
++ *
++ * @defgroup preproc_misc Preprocessor String Macros
++ *
++ * @{
++ *
++ * @}
++ *
++ * @defgroup version_utils Library Version Macros
++ *
++ * @{
++ *
++ * @}
++ */
++
++
++/**
++ * @addtogroup lavu_ver
++ * @{
++ */
++
++/**
++ * Return the LIBAVUTIL_VERSION_INT constant.
++ */
++unsigned avutil_version(void);
++
++/**
++ * Return an informative version string. This usually is the actual release
++ * version number or a git commit description. This string has no fixed
format
++ * and can change any time. It should never be parsed by code.
++ */
++const char *av_version_info(void);
++
++/**
++ * Return the libavutil build-time configuration.
++ */
++const char *avutil_configuration(void);
++
++/**
++ * Return the libavutil license.
++ */
++const char *avutil_license(void);
++
++/**
++ * @}
++ */
++
++/**
++ * @addtogroup lavu_media Media Type
++ * @brief Media Type
++ */
++
++enum AVMediaType {
++ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
++ AVMEDIA_TYPE_VIDEO,
++ AVMEDIA_TYPE_AUDIO,
++ AVMEDIA_TYPE_DATA, ///< Opaque data information usually
continuous
++ AVMEDIA_TYPE_SUBTITLE,
++ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
++ AVMEDIA_TYPE_NB
++};
++
++/**
++ * Return a string describing the media_type enum, NULL if media_type
++ * is unknown.
++ */
++const char *av_get_media_type_string(enum AVMediaType media_type);
++
++/**
++ * @defgroup lavu_const Constants
++ * @{
++ *
++ * @defgroup lavu_enc Encoding specific
++ *
++ * @note those definition should move to avcodec
++ * @{
++ */
++
++#define FF_LAMBDA_SHIFT 7
++#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
++#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
++#define FF_LAMBDA_MAX (256*128-1)
++
++#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
++
++/**
++ * @}
++ * @defgroup lavu_time Timestamp specific
++ *
++ * FFmpeg internal timebase and timestamp definitions
++ *
++ * @{
++ */
++
++/**
++ * @brief Undefined timestamp value
++ *
++ * Usually reported by demuxer that work on containers that do not provide
++ * either pts or dts.
++ */
++
++#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
++
++/**
++ * Internal time base represented as integer
++ */
++
++#define AV_TIME_BASE 1000000
++
++/**
++ * Internal time base represented as fractional value
++ */
++
++#ifdef __cplusplus
++/* ISO C++ forbids compound-literals. */
++#define AV_TIME_BASE_Q av_make_q(1, AV_TIME_BASE)
++#else
++#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
++#endif
++
++/**
++ * @}
++ * @}
++ * @defgroup lavu_picture Image related
++ *
++ * AVPicture types, pixel formats and basic image planes manipulation.
++ *
++ * @{
++ */
++
++enum AVPictureType {
++ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
++ AV_PICTURE_TYPE_I, ///< Intra
++ AV_PICTURE_TYPE_P, ///< Predicted
++ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
++ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
++ AV_PICTURE_TYPE_SI, ///< Switching Intra
++ AV_PICTURE_TYPE_SP, ///< Switching Predicted
++ AV_PICTURE_TYPE_BI, ///< BI type
++};
++
++/**
++ * Return a single letter to describe the given picture type
++ * pict_type.
++ *
++ * @param[in] pict_type the picture type @return a single character
++ * representing the picture type, '?' if pict_type is unknown
++ */
++char av_get_picture_type_char(enum AVPictureType pict_type);
++
++/**
++ * @}
++ */
++
++#include "common.h"
++#include "rational.h"
++#include "version.h"
++#include "macros.h"
++#include "mathematics.h"
++#include "log.h"
++#include "pixfmt.h"
++
++/**
++ * Return x default pointer in case p is NULL.
++ */
++static inline void *av_x_if_null(const void *p, const void *x)
++{
++ return (void *)(intptr_t)(p ? p : x);
++}
++
++#if FF_API_OPT_INT_LIST
++/**
++ * Compute the length of an integer list.
++ *
++ * @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
++ * @param term list terminator (usually 0 or -1)
++ * @param list pointer to the list
++ * @return length of the list, in elements, not counting the terminator
++ */
++attribute_deprecated
++unsigned av_int_list_length_for_size(unsigned elsize,
++ const void *list, uint64_t term)
av_pure;
++
++/**
++ * Compute the length of an integer list.
++ *
++ * @param term list terminator (usually 0 or -1)
++ * @param list pointer to the list
++ * @return length of the list, in elements, not counting the terminator
++ */
++#define av_int_list_length(list, term) \
++ av_int_list_length_for_size(sizeof(*(list)), list, term)
++#endif
++
++/**
++ * Return the fractional representation of the internal time base.
++ */
++AVRational av_get_time_base_q(void);
++
++#define AV_FOURCC_MAX_STRING_SIZE 32
++
++#define av_fourcc2str(fourcc)
av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc)
++
++/**
++ * Fill the provided buffer with a string containing a FourCC
(four-character
++ * code) representation.
++ *
++ * @param buf a buffer with size in bytes of at least
AV_FOURCC_MAX_STRING_SIZE
++ * @param fourcc the fourcc to represent
++ * @return the buffer in input
++ */
++char *av_fourcc_make_string(char *buf, uint32_t fourcc);
++
++/**
++ * @}
++ * @}
++ */
++
++#endif /* AVUTIL_AVUTIL_H */
+diff --git a/media/ffvpx/libavutil/buffer.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/buffer.h
+copy from media/ffvpx/libavutil/buffer.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/buffer.h
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/channel_layout.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/channel_layout.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/channel_layout.h
+@@ -0,0 +1,762 @@
++/*
++ * Copyright (c) 2006 Michael Niedermayer <michaelni AT gmx.at>
++ * Copyright (c) 2008 Peter Ross
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVUTIL_CHANNEL_LAYOUT_H
++#define AVUTIL_CHANNEL_LAYOUT_H
++
++#include <stdint.h>
++#include <stdlib.h>
++
++#include "version.h"
++#include "attributes.h"
++
++/**
++ * @file
++ * @ingroup lavu_audio_channels
++ * Public libavutil channel layout APIs header.
++ */
++
++
++/**
++ * @defgroup lavu_audio_channels Audio channels
++ * @ingroup lavu_audio
++ *
++ * Audio channel layout utility functions
++ *
++ * @{
++ */
++
++enum AVChannel {
++ /// Invalid channel index
++ AV_CHAN_NONE = -1,
++ AV_CHAN_FRONT_LEFT,
++ AV_CHAN_FRONT_RIGHT,
++ AV_CHAN_FRONT_CENTER,
++ AV_CHAN_LOW_FREQUENCY,
++ AV_CHAN_BACK_LEFT,
++ AV_CHAN_BACK_RIGHT,
++ AV_CHAN_FRONT_LEFT_OF_CENTER,
++ AV_CHAN_FRONT_RIGHT_OF_CENTER,
++ AV_CHAN_BACK_CENTER,
++ AV_CHAN_SIDE_LEFT,
++ AV_CHAN_SIDE_RIGHT,
++ AV_CHAN_TOP_CENTER,
++ AV_CHAN_TOP_FRONT_LEFT,
++ AV_CHAN_TOP_FRONT_CENTER,
++ AV_CHAN_TOP_FRONT_RIGHT,
++ AV_CHAN_TOP_BACK_LEFT,
++ AV_CHAN_TOP_BACK_CENTER,
++ AV_CHAN_TOP_BACK_RIGHT,
++ /** Stereo downmix. */
++ AV_CHAN_STEREO_LEFT = 29,
++ /** See above. */
++ AV_CHAN_STEREO_RIGHT,
++ AV_CHAN_WIDE_LEFT,
++ AV_CHAN_WIDE_RIGHT,
++ AV_CHAN_SURROUND_DIRECT_LEFT,
++ AV_CHAN_SURROUND_DIRECT_RIGHT,
++ AV_CHAN_LOW_FREQUENCY_2,
++ AV_CHAN_TOP_SIDE_LEFT,
++ AV_CHAN_TOP_SIDE_RIGHT,
++ AV_CHAN_BOTTOM_FRONT_CENTER,
++ AV_CHAN_BOTTOM_FRONT_LEFT,
++ AV_CHAN_BOTTOM_FRONT_RIGHT,
++ AV_CHAN_SIDE_SURROUND_LEFT, ///< +90 degrees, Lss, SiL
++ AV_CHAN_SIDE_SURROUND_RIGHT, ///< -90 degrees, Rss, SiR
++ AV_CHAN_TOP_SURROUND_LEFT, ///< +110 degrees, Lvs, TpLS
++ AV_CHAN_TOP_SURROUND_RIGHT, ///< -110 degrees, Rvs, TpRS
++
++ AV_CHAN_BINAURAL_LEFT = 61,
++ AV_CHAN_BINAURAL_RIGHT,
++
++ /** Channel is empty can be safely skipped. */
++ AV_CHAN_UNUSED = 0x200,
++
++ /** Channel contains data, but its position is unknown. */
++ AV_CHAN_UNKNOWN = 0x300,
++
++ /**
++ * Range of channels between AV_CHAN_AMBISONIC_BASE and
++ * AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN
system.
++ *
++ * Given a channel id `<i>` between AV_CHAN_AMBISONIC_BASE and
++ * AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel
`<n>` is
++ * `<n> = <i> - AV_CHAN_AMBISONIC_BASE`.
++ *
++ * @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
++ * orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the
channels
++ * implicitly by their position in the stream.
++ */
++ AV_CHAN_AMBISONIC_BASE = 0x400,
++ // leave space for 1024 ids, which correspond to maximum order-32
harmonics,
++ // which should be enough for the foreseeable use cases
++ AV_CHAN_AMBISONIC_END = 0x7ff,
++};
++
++enum AVChannelOrder {
++ /**
++ * Only the channel count is specified, without any further information
++ * about the channel order.
++ */
++ AV_CHANNEL_ORDER_UNSPEC,
++ /**
++ * The native channel order, i.e. the channels are in the same order in
++ * which they are defined in the AVChannel enum. This supports up to 63
++ * different channels.
++ */
++ AV_CHANNEL_ORDER_NATIVE,
++ /**
++ * The channel order does not correspond to any other predefined order
and
++ * is stored as an explicit map. For example, this could be used to
support
++ * layouts with 64 or more channels, or with empty/skipped
(AV_CHAN_UNUSED)
++ * channels at arbitrary positions.
++ */
++ AV_CHANNEL_ORDER_CUSTOM,
++ /**
++ * The audio is represented as the decomposition of the sound field into
++ * spherical harmonics. Each channel corresponds to a single expansion
++ * component. Channels are ordered according to ACN (Ambisonic Channel
++ * Number).
++ *
++ * The channel with the index n in the stream contains the spherical
++ * harmonic of degree l and order m given by
++ * @code{.unparsed}
++ * l = floor(sqrt(n)),
++ * m = n - l * (l + 1).
++ * @endcode
++ *
++ * Conversely given a spherical harmonic of degree l and order m, the
++ * corresponding channel index n is given by
++ * @code{.unparsed}
++ * n = l * (l + 1) + m.
++ * @endcode
++ *
++ * Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
++ * as defined in AmbiX format $ 2.1.
++ */
++ AV_CHANNEL_ORDER_AMBISONIC,
++ /**
++ * Number of channel orders, not part of ABI/API
++ */
++ FF_CHANNEL_ORDER_NB
++};
++
++
++/**
++ * @defgroup channel_masks Audio channel masks
++ *
++ * A channel layout is a 64-bits integer with a bit set for every channel.
++ * The number of bits set must be equal to the number of channels.
++ * The value 0 means that the channel layout is not known.
++ * @note this data structure is not powerful enough to handle channels
++ * combinations that have the same channel multiple times, such as
++ * dual-mono.
++ *
++ * @{
++ */
++#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT )
++#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT )
++#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER )
++#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY )
++#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT )
++#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT )
++#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER )
++#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER )
++#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT )
++#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT )
++#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER )
++#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT )
++#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER )
++#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT )
++#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT )
++#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER )
++#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT )
++#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT )
++#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT )
++#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT )
++#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT )
++#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT )
++#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT)
++#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2 )
++#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT )
++#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT )
++#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER )
++#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT )
++#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT )
++#define AV_CH_SIDE_SURROUND_LEFT (1ULL << AV_CHAN_SIDE_SURROUND_LEFT )
++#define AV_CH_SIDE_SURROUND_RIGHT (1ULL << AV_CHAN_SIDE_SURROUND_RIGHT )
++#define AV_CH_TOP_SURROUND_LEFT (1ULL << AV_CHAN_TOP_SURROUND_LEFT )
++#define AV_CH_TOP_SURROUND_RIGHT (1ULL << AV_CHAN_TOP_SURROUND_RIGHT )
++#define AV_CH_BINAURAL_LEFT (1ULL << AV_CHAN_BINAURAL_LEFT )
++#define AV_CH_BINAURAL_RIGHT (1ULL << AV_CHAN_BINAURAL_RIGHT )
++
++/**
++ * @}
++ * @defgroup channel_mask_c Audio channel layouts
++ * @{
++ * */
++#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
++#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
++#define AV_CH_LAYOUT_2POINT1
(AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_2_1
(AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_SURROUND
(AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
++#define AV_CH_LAYOUT_3POINT1
(AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_4POINT0
(AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_4POINT1
(AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_2_2
(AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
++#define AV_CH_LAYOUT_QUAD
(AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
++#define AV_CH_LAYOUT_5POINT0
(AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
++#define AV_CH_LAYOUT_5POINT1
(AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_5POINT0_BACK
(AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
++#define AV_CH_LAYOUT_5POINT1_BACK
(AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_6POINT0
(AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_6POINT0_FRONT
(AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_LAYOUT_HEXAGONAL
(AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_3POINT1POINT2
(AV_CH_LAYOUT_3POINT1|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
++#define AV_CH_LAYOUT_6POINT1
(AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_6POINT1_BACK
(AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
++#define AV_CH_LAYOUT_6POINT1_FRONT
(AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
++#define AV_CH_LAYOUT_7POINT0
(AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
++#define AV_CH_LAYOUT_7POINT0_FRONT
(AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_LAYOUT_7POINT1
(AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
++#define AV_CH_LAYOUT_7POINT1_WIDE
(AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_LAYOUT_7POINT1_WIDE_BACK
(AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_LAYOUT_5POINT1POINT2
(AV_CH_LAYOUT_5POINT1|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
++#define AV_CH_LAYOUT_5POINT1POINT2_BACK
(AV_CH_LAYOUT_5POINT1_BACK|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
++#define AV_CH_LAYOUT_OCTAGONAL
(AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
++#define AV_CH_LAYOUT_CUBE
(AV_CH_LAYOUT_QUAD|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
++#define AV_CH_LAYOUT_5POINT1POINT4_BACK
(AV_CH_LAYOUT_5POINT1POINT2|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
++#define AV_CH_LAYOUT_7POINT1POINT2
(AV_CH_LAYOUT_7POINT1|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
++#define AV_CH_LAYOUT_7POINT1POINT4_BACK
(AV_CH_LAYOUT_7POINT1POINT2|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
++#define AV_CH_LAYOUT_7POINT2POINT3
(AV_CH_LAYOUT_7POINT1POINT2|AV_CH_TOP_BACK_CENTER|AV_CH_LOW_FREQUENCY_2)
++#define AV_CH_LAYOUT_9POINT1POINT4_BACK
(AV_CH_LAYOUT_7POINT1POINT4_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
++#define AV_CH_LAYOUT_9POINT1POINT6
(AV_CH_LAYOUT_9POINT1POINT4_BACK|AV_CH_TOP_SIDE_LEFT|AV_CH_TOP_SIDE_RIGHT)
++#define AV_CH_LAYOUT_HEXADECAGONAL
(AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
++#define AV_CH_LAYOUT_BINAURAL
(AV_CH_BINAURAL_LEFT|AV_CH_BINAURAL_RIGHT)
++#define AV_CH_LAYOUT_STEREO_DOWNMIX
(AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
++#define AV_CH_LAYOUT_22POINT2
(AV_CH_LAYOUT_9POINT1POINT6|AV_CH_BACK_CENTER|AV_CH_LOW_FREQUENCY_2|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_CENTER|AV_CH_TOP_BACK_CENTER|AV_CH_BOTTOM_FRONT_CENTER|AV_CH_BOTTOM_FRONT_LEFT|AV_CH_BOTTOM_FRONT_RIGHT)
++
++#define AV_CH_LAYOUT_7POINT1_TOP_BACK AV_CH_LAYOUT_5POINT1POINT2_BACK
++
++enum AVMatrixEncoding {
++ AV_MATRIX_ENCODING_NONE,
++ AV_MATRIX_ENCODING_DOLBY,
++ AV_MATRIX_ENCODING_DPLII,
++ AV_MATRIX_ENCODING_DPLIIX,
++ AV_MATRIX_ENCODING_DPLIIZ,
++ AV_MATRIX_ENCODING_DOLBYEX,
++ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
++ AV_MATRIX_ENCODING_NB
++};
++
++/**
++ * @}
++ */
++
++/**
++ * An AVChannelCustom defines a single channel within a custom order layout
++ *
++ * Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of
the
++ * public ABI.
++ *
++ * No new fields may be added to it without a major version bump.
++ */
++typedef struct AVChannelCustom {
++ enum AVChannel id;
++ char name[16];
++ void *opaque;
++} AVChannelCustom;
++
++/**
++ * An AVChannelLayout holds information about the channel layout of audio
data.
++ *
++ * A channel layout here is defined as a set of channels ordered in a
specific
++ * way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case
an
++ * AVChannelLayout carries only the channel count).
++ * All orders may be treated as if they were AV_CHANNEL_ORDER_UNSPEC by
++ * ignoring everything but the channel count, as long as
av_channel_layout_check()
++ * considers they are valid.
++ *
++ * Unlike most structures in FFmpeg, sizeof(AVChannelLayout) is a part of
the
++ * public ABI and may be used by the caller. E.g. it may be allocated on
stack
++ * or embedded in caller-defined structs.
++ *
++ * AVChannelLayout can be initialized as follows:
++ * - default initialization with {0}, followed by setting all used fields
++ * correctly;
++ * - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
++ * - with a constructor function, such as av_channel_layout_default(),
++ * av_channel_layout_from_mask() or av_channel_layout_from_string().
++ *
++ * The channel layout must be uninitialized with av_channel_layout_uninit()
++ *
++ * Copying an AVChannelLayout via assigning is forbidden,
++ * av_channel_layout_copy() must be used instead (and its return value
should
++ * be checked)
++ *
++ * No new fields may be added to it without a major version bump, except for
++ * new elements of the union fitting in sizeof(uint64_t).
++ */
++typedef struct AVChannelLayout {
++ /**
++ * Channel order used in this layout.
++ * This is a mandatory field.
++ */
++ enum AVChannelOrder order;
++
++ /**
++ * Number of channels in this layout. Mandatory field.
++ */
++ int nb_channels;
++
++ /**
++ * Details about which channels are present in this layout.
++ * For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
++ * used.
++ */
++ union {
++ /**
++ * This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be
used
++ * for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
++ * It is a bitmask, where the position of each set bit means that
the
++ * AVChannel with the corresponding value is present.
++ *
++ * I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then
AV_CHAN_FOO
++ * is present in the layout. Otherwise it is not present.
++ *
++ * @note when a channel layout using a bitmask is constructed or
++ * modified manually (i.e. not using any of the av_channel_layout_*
++ * functions), the code doing it must ensure that the number of set
bits
++ * is equal to nb_channels.
++ */
++ uint64_t mask;
++ /**
++ * This member must be used when the channel order is
++ * AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with
each
++ * element signalling the presence of the AVChannel with the
++ * corresponding value in map[i].id.
++ *
++ * I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is
the
++ * i-th channel in the audio data.
++ *
++ * When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
++ * AV_CHAN_AMBISONIC_END (inclusive), the channel contains an
ambisonic
++ * component with ACN index (as defined above)
++ * n = map[i].id - AV_CHAN_AMBISONIC_BASE.
++ *
++ * map[i].name may be filled with a 0-terminated string, in which
case
++ * it will be used for the purpose of identifying the channel with
the
++ * convenience functions below. Otherwise it must be zeroed.
++ */
++ AVChannelCustom *map;
++ } u;
++
++ /**
++ * For some private data of the user.
++ */
++ void *opaque;
++} AVChannelLayout;
++
++/**
++ * Macro to define native channel layouts
++ *
++ * @note This doesn't use designated initializers for compatibility with
C++ 17 and older.
++ */
++#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
++ { /* .order */ AV_CHANNEL_ORDER_NATIVE, \
++ /* .nb_channels */ (nb), \
++ /* .u.mask */ { m }, \
++ /* .opaque */ NULL }
++
++/**
++ * @name Common pre-defined channel layouts
++ * @{
++ */
++#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1,
AV_CH_LAYOUT_MONO)
++#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2,
AV_CH_LAYOUT_STEREO)
++#define AV_CHANNEL_LAYOUT_2POINT1 AV_CHANNEL_LAYOUT_MASK(3,
AV_CH_LAYOUT_2POINT1)
++#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3,
AV_CH_LAYOUT_2_1)
++#define AV_CHANNEL_LAYOUT_SURROUND AV_CHANNEL_LAYOUT_MASK(3,
AV_CH_LAYOUT_SURROUND)
++#define AV_CHANNEL_LAYOUT_3POINT1 AV_CHANNEL_LAYOUT_MASK(4,
AV_CH_LAYOUT_3POINT1)
++#define AV_CHANNEL_LAYOUT_4POINT0 AV_CHANNEL_LAYOUT_MASK(4,
AV_CH_LAYOUT_4POINT0)
++#define AV_CHANNEL_LAYOUT_4POINT1 AV_CHANNEL_LAYOUT_MASK(5,
AV_CH_LAYOUT_4POINT1)
++#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4,
AV_CH_LAYOUT_2_2)
++#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4,
AV_CH_LAYOUT_QUAD)
++#define AV_CHANNEL_LAYOUT_5POINT0 AV_CHANNEL_LAYOUT_MASK(5,
AV_CH_LAYOUT_5POINT0)
++#define AV_CHANNEL_LAYOUT_5POINT1 AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_5POINT1)
++#define AV_CHANNEL_LAYOUT_5POINT0_BACK AV_CHANNEL_LAYOUT_MASK(5,
AV_CH_LAYOUT_5POINT0_BACK)
++#define AV_CHANNEL_LAYOUT_5POINT1_BACK AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_5POINT1_BACK)
++#define AV_CHANNEL_LAYOUT_6POINT0 AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_6POINT0)
++#define AV_CHANNEL_LAYOUT_6POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_6POINT0_FRONT)
++#define AV_CHANNEL_LAYOUT_3POINT1POINT2 AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_3POINT1POINT2)
++#define AV_CHANNEL_LAYOUT_HEXAGONAL AV_CHANNEL_LAYOUT_MASK(6,
AV_CH_LAYOUT_HEXAGONAL)
++#define AV_CHANNEL_LAYOUT_6POINT1 AV_CHANNEL_LAYOUT_MASK(7,
AV_CH_LAYOUT_6POINT1)
++#define AV_CHANNEL_LAYOUT_6POINT1_BACK AV_CHANNEL_LAYOUT_MASK(7,
AV_CH_LAYOUT_6POINT1_BACK)
++#define AV_CHANNEL_LAYOUT_6POINT1_FRONT AV_CHANNEL_LAYOUT_MASK(7,
AV_CH_LAYOUT_6POINT1_FRONT)
++#define AV_CHANNEL_LAYOUT_7POINT0 AV_CHANNEL_LAYOUT_MASK(7,
AV_CH_LAYOUT_7POINT0)
++#define AV_CHANNEL_LAYOUT_7POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(7,
AV_CH_LAYOUT_7POINT0_FRONT)
++#define AV_CHANNEL_LAYOUT_7POINT1 AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_7POINT1)
++#define AV_CHANNEL_LAYOUT_7POINT1_WIDE AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_7POINT1_WIDE)
++#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_7POINT1_WIDE_BACK)
++#define AV_CHANNEL_LAYOUT_5POINT1POINT2 AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_5POINT1POINT2)
++#define AV_CHANNEL_LAYOUT_5POINT1POINT2_BACK AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_5POINT1POINT2_BACK)
++#define AV_CHANNEL_LAYOUT_OCTAGONAL AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_OCTAGONAL)
++#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8,
AV_CH_LAYOUT_CUBE)
++#define AV_CHANNEL_LAYOUT_5POINT1POINT4_BACK AV_CHANNEL_LAYOUT_MASK(10,
AV_CH_LAYOUT_5POINT1POINT4_BACK)
++#define AV_CHANNEL_LAYOUT_7POINT1POINT2 AV_CHANNEL_LAYOUT_MASK(10,
AV_CH_LAYOUT_7POINT1POINT2)
++#define AV_CHANNEL_LAYOUT_7POINT1POINT4_BACK AV_CHANNEL_LAYOUT_MASK(12,
AV_CH_LAYOUT_7POINT1POINT4_BACK)
++#define AV_CHANNEL_LAYOUT_7POINT2POINT3 AV_CHANNEL_LAYOUT_MASK(12,
AV_CH_LAYOUT_7POINT2POINT3)
++#define AV_CHANNEL_LAYOUT_9POINT1POINT4_BACK AV_CHANNEL_LAYOUT_MASK(14,
AV_CH_LAYOUT_9POINT1POINT4_BACK)
++#define AV_CHANNEL_LAYOUT_9POINT1POINT6 AV_CHANNEL_LAYOUT_MASK(16,
AV_CH_LAYOUT_9POINT1POINT6)
++#define AV_CHANNEL_LAYOUT_HEXADECAGONAL AV_CHANNEL_LAYOUT_MASK(16,
AV_CH_LAYOUT_HEXADECAGONAL)
++#define AV_CHANNEL_LAYOUT_BINAURAL AV_CHANNEL_LAYOUT_MASK(2,
AV_CH_LAYOUT_BINAURAL)
++#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX AV_CHANNEL_LAYOUT_MASK(2,
AV_CH_LAYOUT_STEREO_DOWNMIX)
++#define AV_CHANNEL_LAYOUT_22POINT2 AV_CHANNEL_LAYOUT_MASK(24,
AV_CH_LAYOUT_22POINT2)
++
++#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK
AV_CHANNEL_LAYOUT_5POINT1POINT2_BACK
++
++#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
++ { /* .order */ AV_CHANNEL_ORDER_AMBISONIC, \
++ /* .nb_channels */ 4, \
++ /* .u.mask */ { 0 }, \
++ /* .opaque */ NULL }
++/** @} */
++
++struct AVBPrint;
++
++/**
++ * Get a human readable string in an abbreviated form describing a given
channel.
++ * This is the inverse function of @ref av_channel_from_string().
++ *
++ * @param buf pre-allocated buffer where to put the generated string
++ * @param buf_size size in bytes of the buffer.
++ * @param channel the AVChannel whose name to get
++ * @return amount of bytes needed to hold the output string, or a negative
AVERROR
++ * on failure. If the returned value is bigger than buf_size, then
the
++ * string was truncated.
++ */
++int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel);
++
++/**
++ * bprint variant of av_channel_name().
++ *
++ * @note the string will be appended to the bprint buffer.
++ */
++void av_channel_name_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
++
++/**
++ * Get a human readable string describing a given channel.
++ *
++ * @param buf pre-allocated buffer where to put the generated string
++ * @param buf_size size in bytes of the buffer.
++ * @param channel the AVChannel whose description to get
++ * @return amount of bytes needed to hold the output string, or a negative
AVERROR
++ * on failure. If the returned value is bigger than buf_size, then
the
++ * string was truncated.
++ */
++int av_channel_description(char *buf, size_t buf_size, enum AVChannel
channel);
++
++/**
++ * bprint variant of av_channel_description().
++ *
++ * @note the string will be appended to the bprint buffer.
++ */
++void av_channel_description_bprint(struct AVBPrint *bp, enum AVChannel
channel_id);
++
++/**
++ * This is the inverse function of @ref av_channel_name().
++ *
++ * @return the channel with the given name
++ * AV_CHAN_NONE when name does not identify a known channel
++ */
++enum AVChannel av_channel_from_string(const char *name);
++
++/**
++ * Initialize a custom channel layout with the specified number of channels.
++ * The channel map will be allocated and the designation of all channels
will
++ * be set to AV_CHAN_UNKNOWN.
++ *
++ * This is only a convenience helper function, a custom channel layout can
also
++ * be constructed without using this.
++ *
++ * @param channel_layout the layout structure to be initialized
++ * @param nb_channels the number of channels
++ *
++ * @return 0 on success
++ * AVERROR(EINVAL) if the number of channels <= 0
++ * AVERROR(ENOMEM) if the channel map could not be allocated
++ */
++int av_channel_layout_custom_init(AVChannelLayout *channel_layout, int
nb_channels);
++
++/**
++ * Initialize a native channel layout from a bitmask indicating which
channels
++ * are present.
++ *
++ * @param channel_layout the layout structure to be initialized
++ * @param mask bitmask describing the channel layout
++ *
++ * @return 0 on success
++ * AVERROR(EINVAL) for invalid mask values
++ */
++int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t
mask);
++
++/**
++ * Initialize a channel layout from a given string description.
++ * The input string can be represented by:
++ * - the formal channel layout name (returned by
av_channel_layout_describe())
++ * - single or multiple channel names (returned by av_channel_name(), eg.
"FL",
++ * or concatenated with "+", each optionally containing a custom name
after
++ * a "@", eg. "FL@Left+FR@Right+LFE")
++ * - a decimal or hexadecimal value of a native channel layout (eg. "4" or
"0x4")
++ * - the number of channels with default layout (eg. "4c")
++ * - the number of unordered channels (eg. "4C" or "4 channels")
++ * - the ambisonic order followed by optional non-diegetic channels (eg.
++ * "ambisonic 2+stereo")
++ * On error, the channel layout will remain uninitialized, but not
necessarily
++ * untouched.
++ *
++ * @param channel_layout uninitialized channel layout for the result
++ * @param str string describing the channel layout
++ * @return 0 on success parsing the channel layout
++ * AVERROR(EINVAL) if an invalid channel layout string was provided
++ * AVERROR(ENOMEM) if there was not enough memory
++ */
++int av_channel_layout_from_string(AVChannelLayout *channel_layout,
++ const char *str);
++
++/**
++ * Get the default channel layout for a given number of channels.
++ *
++ * @param ch_layout the layout structure to be initialized
++ * @param nb_channels number of channels
++ */
++void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels);
++
++/**
++ * Iterate over all standard channel layouts.
++ *
++ * @param opaque a pointer where libavutil will store the iteration state.
Must
++ * point to NULL to start the iteration.
++ *
++ * @return the standard channel layout or NULL when the iteration is
++ * finished
++ */
++const AVChannelLayout *av_channel_layout_standard(void **opaque);
++
++/**
++ * Free any allocated data in the channel layout and reset the channel
++ * count to 0.
++ *
++ * @param channel_layout the layout structure to be uninitialized
++ */
++void av_channel_layout_uninit(AVChannelLayout *channel_layout);
++
++/**
++ * Make a copy of a channel layout. This differs from just assigning src to
dst
++ * in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
++ *
++ * @note the destination channel_layout will be always uninitialized before
copy.
++ *
++ * @param dst destination channel layout
++ * @param src source channel layout
++ * @return 0 on success, a negative AVERROR on error.
++ */
++int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout
*src);
++
++/**
++ * Get a human-readable string describing the channel layout properties.
++ * The string will be in the same format that is accepted by
++ * @ref av_channel_layout_from_string(), allowing to rebuild the same
++ * channel layout, except for opaque pointers.
++ *
++ * @param channel_layout channel layout to be described
++ * @param buf pre-allocated buffer where to put the generated string
++ * @param buf_size size in bytes of the buffer.
++ * @return amount of bytes needed to hold the output string, or a negative
AVERROR
++ * on failure. If the returned value is bigger than buf_size, then
the
++ * string was truncated.
++ */
++int av_channel_layout_describe(const AVChannelLayout *channel_layout,
++ char *buf, size_t buf_size);
++
++/**
++ * bprint variant of av_channel_layout_describe().
++ *
++ * @note the string will be appended to the bprint buffer.
++ * @return 0 on success, or a negative AVERROR value on failure.
++ */
++int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
++ struct AVBPrint *bp);
++
++/**
++ * Get the channel with the given index in a channel layout.
++ *
++ * @param channel_layout input channel layout
++ * @param idx index of the channel
++ * @return channel with the index idx in channel_layout on success or
++ * AV_CHAN_NONE on failure (if idx is not valid or the channel
order is
++ * unspecified)
++ */
++enum AVChannel
++av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout,
unsigned int idx);
++
++/**
++ * Get the index of a given channel in a channel layout. In case multiple
++ * channels are found, only the first match will be returned.
++ *
++ * @param channel_layout input channel layout
++ * @param channel the channel whose index to obtain
++ * @return index of channel in channel_layout on success or a negative
number if
++ * channel is not present in channel_layout.
++ */
++int av_channel_layout_index_from_channel(const AVChannelLayout
*channel_layout,
++ enum AVChannel channel);
++
++/**
++ * Get the index in a channel layout of a channel described by the given
string.
++ * In case multiple channels are found, only the first match will be
returned.
++ *
++ * This function accepts channel names in the same format as
++ * @ref av_channel_from_string().
++ *
++ * @param channel_layout input channel layout
++ * @param name string describing the channel whose index to obtain
++ * @return a channel index described by the given string, or a negative
AVERROR
++ * value.
++ */
++int av_channel_layout_index_from_string(const AVChannelLayout
*channel_layout,
++ const char *name);
++
++/**
++ * Get a channel described by the given string.
++ *
++ * This function accepts channel names in the same format as
++ * @ref av_channel_from_string().
++ *
++ * @param channel_layout input channel layout
++ * @param name string describing the channel to obtain
++ * @return a channel described by the given string in channel_layout on
success
++ * or AV_CHAN_NONE on failure (if the string is not valid or the
channel
++ * order is unspecified)
++ */
++enum AVChannel
++av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
++ const char *name);
++
++/**
++ * Find out what channels from a given set are present in a channel layout,
++ * without regard for their positions.
++ *
++ * @param channel_layout input channel layout
++ * @param mask a combination of AV_CH_* representing a set of channels
++ * @return a bitfield representing all the channels from mask that are
present
++ * in channel_layout
++ */
++uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
++ uint64_t mask);
++
++/**
++ * Check whether a channel layout is valid, i.e. can possibly describe audio
++ * data.
++ *
++ * @param channel_layout input channel layout
++ * @return 1 if channel_layout is valid, 0 otherwise.
++ */
++int av_channel_layout_check(const AVChannelLayout *channel_layout);
++
++/**
++ * Check whether two channel layouts are semantically the same, i.e. the
same
++ * channels are present on the same positions in both.
++ *
++ * If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the
other is
++ * not, they are considered to be unequal. If both are
AV_CHANNEL_ORDER_UNSPEC,
++ * they are considered equal iff the channel counts are the same in both.
++ *
++ * @param chl input channel layout
++ * @param chl1 input channel layout
++ * @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
++ * AVERROR code if one or both are invalid.
++ */
++int av_channel_layout_compare(const AVChannelLayout *chl, const
AVChannelLayout *chl1);
++
++/**
++ * Return the order if the layout is n-th order standard-order ambisonic.
++ * The presence of optional extra non-diegetic channels at the end is not
taken
++ * into account.
++ *
++ * @param channel_layout input channel layout
++ * @return the order of the layout, a negative error code otherwise.
++ */
++int av_channel_layout_ambisonic_order(const AVChannelLayout
*channel_layout);
++
++/**
++ * The conversion must be lossless.
++ */
++#define AV_CHANNEL_LAYOUT_RETYPE_FLAG_LOSSLESS (1 << 0)
++
++/**
++ * The specified retype target order is ignored and the simplest possible
++ * (canonical) order is used for which the input layout can be losslessy
++ * represented.
++ */
++#define AV_CHANNEL_LAYOUT_RETYPE_FLAG_CANONICAL (1 << 1)
++
++/**
++ * Change the AVChannelOrder of a channel layout.
++ *
++ * Change of AVChannelOrder can be either lossless or lossy. In case of a
++ * lossless conversion all the channel designations and the associated
channel
++ * names (if any) are kept. On a lossy conversion the channel names and
channel
++ * designations might be lost depending on the capabilities of the desired
++ * AVChannelOrder. Note that some conversions are simply not possible in
which
++ * case this function returns AVERROR(ENOSYS).
++ *
++ * The following conversions are supported:
++ *
++ * Any -> Custom : Always possible, always lossless.
++ * Any -> Unspecified: Always possible, lossless if channel
designations
++ * are all unknown and channel names are not used, lossy otherwise.
++ * Custom -> Ambisonic : Possible if it contains ambisonic channels with
++ * optional non-diegetic channels in the end. Lossy if the channels have
++ * custom names, lossless otherwise.
++ * Custom -> Native : Possible if it contains native channels in
native
++ * order. Lossy if the channels have custom names, lossless otherwise.
++ *
++ * On error this function keeps the original channel layout untouched.
++ *
++ * @param channel_layout channel layout which will be changed
++ * @param order the desired channel layout order
++ * @param flags a combination of AV_CHANNEL_LAYOUT_RETYPE_FLAG_* constants
++ * @return 0 if the conversion was successful and lossless or if the channel
++ * layout was already in the desired order
++ * >0 if the conversion was successful but lossy
++ * AVERROR(ENOSYS) if the conversion was not possible (or would be
++ * lossy and AV_CHANNEL_LAYOUT_RETYPE_FLAG_LOSSLESS was specified)
++ * AVERROR(EINVAL), AVERROR(ENOMEM) on error
++ */
++int av_channel_layout_retype(AVChannelLayout *channel_layout, enum
AVChannelOrder order, int flags);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_CHANNEL_LAYOUT_H */
+diff --git a/media/ffvpx/libavutil/common.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/common.h
+copy from media/ffvpx/libavutil/common.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/common.h
+diff --git a/media/ffvpx/libavutil/cpu.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/cpu.h
+copy from media/ffvpx/libavutil/cpu.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/cpu.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/dict.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/dict.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/dict.h
+@@ -0,0 +1,242 @@
++/*
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * Public dictionary API.
++ * @deprecated
++ * AVDictionary is provided for compatibility with libav. It is both in
++ * implementation as well as API inefficient. It does not scale and is
++ * extremely slow with large dictionaries.
++ * It is recommended that new code uses our tree container from tree.c/h
++ * where applicable, which uses AVL trees to achieve O(log n) performance.
++ */
++
++#ifndef AVUTIL_DICT_H
++#define AVUTIL_DICT_H
++
++#include <stdint.h>
++
++/**
++ * @addtogroup lavu_dict AVDictionary
++ * @ingroup lavu_data
++ *
++ * @brief Simple key:value store
++ *
++ * @{
++ * Dictionaries are used for storing key-value pairs.
++ *
++ * - To **create an AVDictionary**, simply pass an address of a NULL
++ * pointer to av_dict_set(). NULL can be used as an empty dictionary
++ * wherever a pointer to an AVDictionary is required.
++ * - To **insert an entry**, use av_dict_set().
++ * - Use av_dict_get() to **retrieve an entry**.
++ * - To **iterate over all entries**, use av_dict_iterate().
++ * - In order to **free the dictionary and all its contents**, use
av_dict_free().
++ *
++ @code
++ AVDictionary *d = NULL; // "create" an empty dictionary
++ AVDictionaryEntry *t = NULL;
++
++ av_dict_set(&d, "foo", "bar", 0); // add an entry
++
++ char *k = av_strdup("key"); // if your strings are already
allocated,
++ char *v = av_strdup("value"); // you can avoid copying them like this
++ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
++
++ while ((t = av_dict_iterate(d, t))) {
++ <....> // iterate over all entries in d
++ }
++ av_dict_free(&d);
++ @endcode
++ */
++
++/**
++ * @name AVDictionary Flags
++ * Flags that influence behavior of the matching of keys or insertion to
the dictionary.
++ * @{
++ */
++#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case
key match. Only relevant in av_dict_get(). */
++#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary
whose first part corresponds to the search key,
++ ignoring the suffix of the found
key string. Only relevant in av_dict_get(). */
++#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
++ allocated with av_malloc() or
another memory allocation function. */
++#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's
been
++ allocated with av_malloc() or
another memory allocation function. */
++#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries.
*/
++#define AV_DICT_APPEND 32 /**< If the entry already exists,
append to it. Note that no
++ delimiter is added, the strings
are simply concatenated. */
++#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys
in the dictionary */
++#define AV_DICT_DEDUP 128 /**< If inserting a value that already
exists for a key, do nothing. Only relevant with AV_DICT_MULTIKEY. */
++/**
++ * @}
++ */
++
++typedef struct AVDictionaryEntry {
++ char *key;
++ char *value;
++} AVDictionaryEntry;
++
++typedef struct AVDictionary AVDictionary;
++
++/**
++ * Get a dictionary entry with matching key.
++ *
++ * The returned entry key or value must not be changed, or it will
++ * cause undefined behavior.
++ *
++ * @param prev Set to the previous matching element to find the next.
++ * If set to NULL the first matching element is returned.
++ * @param key Matching key
++ * @param flags A collection of AV_DICT_* flags controlling how the
++ * entry is retrieved
++ *
++ * @return Found entry or NULL in case no matching entry was found in
the dictionary
++ */
++AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
++ const AVDictionaryEntry *prev, int flags);
++
++/**
++ * Iterate over a dictionary
++ *
++ * Iterates through all entries in the dictionary.
++ *
++ * @warning The returned AVDictionaryEntry key/value must not be changed.
++ *
++ * @warning As av_dict_set() invalidates all previous entries returned
++ * by this function, it must not be called while iterating over the dict.
++ *
++ * Typical usage:
++ * @code
++ * const AVDictionaryEntry *e = NULL;
++ * while ((e = av_dict_iterate(m, e))) {
++ * // ...
++ * }
++ * @endcode
++ *
++ * @param m The dictionary to iterate over
++ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
++ *
++ * @retval AVDictionaryEntry* The next element in the dictionary
++ * @retval NULL No more elements in the dictionary
++ */
++const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
++ const AVDictionaryEntry *prev);
++
++/**
++ * Get number of entries in dictionary.
++ *
++ * @param m dictionary
++ * @return number of entries in dictionary
++ */
++int av_dict_count(const AVDictionary *m);
++
++/**
++ * Set the given entry in *pm, overwriting an existing entry.
++ *
++ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
++ * these arguments will be freed on error.
++ *
++ * @warning Adding a new entry to a dictionary invalidates all existing
entries
++ * previously returned with av_dict_get() or av_dict_iterate().
++ *
++ * @param pm Pointer to a pointer to a dictionary struct. If *pm is
NULL
++ * a dictionary struct is allocated and put in *pm.
++ * @param key Entry key to add to *pm (will either be av_strduped or
added as a new key depending on flags)
++ * @param value Entry value to add to *pm (will be av_strduped or added
as a new key depending on flags).
++ * Passing a NULL value will cause an existing entry to be
deleted.
++ *
++ * @return >= 0 on success otherwise an error code <0
++ */
++int av_dict_set(AVDictionary **pm, const char *key, const char *value, int
flags);
++
++/**
++ * Convenience wrapper for av_dict_set() that converts the value to a string
++ * and stores it.
++ *
++ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
++ */
++int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int
flags);
++
++/**
++ * Parse the key/value pairs list and add the parsed entries to a
dictionary.
++ *
++ * In case of failure, all the successfully set entries are stored in
++ * *pm. You may need to manually free the created dictionary.
++ *
++ * @param key_val_sep A 0-terminated list of characters used to separate
++ * key from value
++ * @param pairs_sep A 0-terminated list of characters used to separate
++ * two pairs from each other
++ * @param flags Flags to use when adding to the dictionary.
++ * ::AV_DICT_DONT_STRDUP_KEY and
::AV_DICT_DONT_STRDUP_VAL
++ * are ignored since the key/value tokens will always
++ * be duplicated.
++ *
++ * @return 0 on success, negative AVERROR code on failure
++ */
++int av_dict_parse_string(AVDictionary **pm, const char *str,
++ const char *key_val_sep, const char *pairs_sep,
++ int flags);
++
++/**
++ * Copy entries from one AVDictionary struct into another.
++ *
++ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
++ *
++ * @param dst Pointer to a pointer to a AVDictionary struct to copy into.
If *dst is NULL,
++ * this function will allocate a struct for you and put it in
*dst
++ * @param src Pointer to the source AVDictionary struct to copy items
from.
++ * @param flags Flags to use when setting entries in *dst
++ *
++ * @return 0 on success, negative AVERROR code on failure. If dst was
allocated
++ * by this function, callers should free the associated memory.
++ */
++int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
++
++/**
++ * Free all the memory allocated for an AVDictionary struct
++ * and all keys and values.
++ */
++void av_dict_free(AVDictionary **m);
++
++/**
++ * Get dictionary entries as a string.
++ *
++ * Create a string containing dictionary's entries.
++ * Such string may be passed back to av_dict_parse_string().
++ * @note String is escaped with backslashes ('\').
++ *
++ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be
the same.
++ *
++ * @param[in] m The dictionary
++ * @param[out] buffer Pointer to buffer that will be allocated with
string containing entries.
++ * Buffer must be freed by the caller when is no
longer needed.
++ * @param[in] key_val_sep Character used to separate key from value
++ * @param[in] pairs_sep Character used to separate two pairs from each
other
++ *
++ * @return >= 0 on success, negative on error
++ */
++int av_dict_get_string(const AVDictionary *m, char **buffer,
++ const char key_val_sep, const char pairs_sep);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_DICT_H */
+diff --git a/media/ffvpx/libavutil/error.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/error.h
+copy from media/ffvpx/libavutil/error.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/error.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/frame.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/frame.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/frame.h
+@@ -0,0 +1,1163 @@
++/*
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * @ingroup lavu_frame
++ * reference-counted frame API
++ */
++
++#ifndef AVUTIL_FRAME_H
++#define AVUTIL_FRAME_H
++
++#include <stddef.h>
++#include <stdint.h>
++
++#include "avutil.h"
++#include "buffer.h"
++#include "channel_layout.h"
++#include "dict.h"
++#include "rational.h"
++#include "samplefmt.h"
++#include "pixfmt.h"
++#include "version.h"
++
++
++/**
++ * @defgroup lavu_frame AVFrame
++ * @ingroup lavu_data
++ *
++ * @{
++ * AVFrame is an abstraction for reference-counted raw multimedia data.
++ */
++
++enum AVFrameSideDataType {
++ /**
++ * The data is the AVPanScan struct defined in libavcodec.
++ */
++ AV_FRAME_DATA_PANSCAN,
++ /**
++ * ATSC A53 Part 4 Closed Captions.
++ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
++ * The number of bytes of CC data is AVFrameSideData.size.
++ */
++ AV_FRAME_DATA_A53_CC,
++ /**
++ * Stereoscopic 3d metadata.
++ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
++ */
++ AV_FRAME_DATA_STEREO3D,
++ /**
++ * The data is the AVMatrixEncoding enum defined in
libavutil/channel_layout.h.
++ */
++ AV_FRAME_DATA_MATRIXENCODING,
++ /**
++ * Metadata relevant to a downmix procedure.
++ * The data is the AVDownmixInfo struct defined in
libavutil/downmix_info.h.
++ */
++ AV_FRAME_DATA_DOWNMIX_INFO,
++ /**
++ * ReplayGain information in the form of the AVReplayGain struct.
++ */
++ AV_FRAME_DATA_REPLAYGAIN,
++ /**
++ * This side data contains a 3x3 transformation matrix describing an
affine
++ * transformation that needs to be applied to the frame for correct
++ * presentation.
++ *
++ * See libavutil/display.h for a detailed description of the data.
++ */
++ AV_FRAME_DATA_DISPLAYMATRIX,
++ /**
++ * Active Format Description data consisting of a single byte as
specified
++ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
++ */
++ AV_FRAME_DATA_AFD,
++ /**
++ * Motion vectors exported by some codecs (on demand through the
export_mvs
++ * flag set in the libavcodec AVCodecContext flags2 option).
++ * The data is the AVMotionVector struct defined in
++ * libavutil/motion_vector.h.
++ */
++ AV_FRAME_DATA_MOTION_VECTORS,
++ /**
++ * Recommends skipping the specified number of samples. This is exported
++ * only if the "skip_manual" AVOption is set in libavcodec.
++ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
++ * @code
++ * u32le number of samples to skip from start of this packet
++ * u32le number of samples to skip from end of this packet
++ * u8 reason for start skip
++ * u8 reason for end skip (0=padding silence, 1=convergence)
++ * @endcode
++ */
++ AV_FRAME_DATA_SKIP_SAMPLES,
++ /**
++ * This side data must be associated with an audio frame and
corresponds to
++ * enum AVAudioServiceType defined in avcodec.h.
++ */
++ AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
++ /**
++ * Mastering display metadata associated with a video frame. The
payload is
++ * an AVMasteringDisplayMetadata type and contains information about the
++ * mastering display color volume.
++ */
++ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
++ /**
++ * The GOP timecode in 25 bit timecode format. Data format is 64-bit
integer.
++ * This is set on the first frame of a GOP that has a temporal
reference of 0.
++ */
++ AV_FRAME_DATA_GOP_TIMECODE,
++
++ /**
++ * The data represents the AVSphericalMapping structure defined in
++ * libavutil/spherical.h.
++ */
++ AV_FRAME_DATA_SPHERICAL,
++
++ /**
++ * Content light level (based on CTA-861.3). This payload contains data
in
++ * the form of the AVContentLightMetadata struct.
++ */
++ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
++
++ /**
++ * The data contains an ICC profile as an opaque octet buffer following
the
++ * format described by ISO 15076-1 with an optional name defined in the
++ * metadata key entry "name".
++ */
++ AV_FRAME_DATA_ICC_PROFILE,
++
++ /**
++ * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4
uint32_t
++ * where the first uint32_t describes how many (1-3) of the other
timecodes are used.
++ * The timecode format is described in the documentation of
av_timecode_get_smpte_from_framenum()
++ * function in libavutil/timecode.h.
++ */
++ AV_FRAME_DATA_S12M_TIMECODE,
++
++ /**
++ * HDR dynamic metadata associated with a video frame. The payload is
++ * an AVDynamicHDRPlus type and contains information for color
++ * volume transform - application 4 of SMPTE 2094-40:2016 standard.
++ */
++ AV_FRAME_DATA_DYNAMIC_HDR_PLUS,
++
++ /**
++ * Regions Of Interest, the data is an array of AVRegionOfInterest
type, the number of
++ * array element is implied by AVFrameSideData.size /
AVRegionOfInterest.self_size.
++ */
++ AV_FRAME_DATA_REGIONS_OF_INTEREST,
++
++ /**
++ * Encoding parameters for a video frame, as described by
AVVideoEncParams.
++ */
++ AV_FRAME_DATA_VIDEO_ENC_PARAMS,
++
++ /**
++ * User data unregistered metadata associated with a video frame.
++ * This is the H.26[45] UDU SEI message, and shouldn't be used for any
other purpose
++ * The data is stored as uint8_t in AVFrameSideData.data which is 16
bytes of
++ * uuid_iso_iec_11578 followed by AVFrameSideData.size - 16 bytes of
user_data_payload_byte.
++ */
++ AV_FRAME_DATA_SEI_UNREGISTERED,
++
++ /**
++ * Film grain parameters for a frame, described by AVFilmGrainParams.
++ * Must be present for every frame which should have film grain applied.
++ *
++ * May be present multiple times, for example when there are multiple
++ * alternative parameter sets for different video signal
characteristics.
++ * The user should select the most appropriate set for the application.
++ */
++ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
++
++ /**
++ * Bounding boxes for object detection and classification,
++ * as described by AVDetectionBBoxHeader.
++ */
++ AV_FRAME_DATA_DETECTION_BBOXES,
++
++ /**
++ * Dolby Vision RPU raw data, suitable for passing to x265
++ * or other libraries. Array of uint8_t, with NAL emulation
++ * bytes intact.
++ */
++ AV_FRAME_DATA_DOVI_RPU_BUFFER,
++
++ /**
++ * Parsed Dolby Vision metadata, suitable for passing to a software
++ * implementation. The payload is the AVDOVIMetadata struct defined in
++ * libavutil/dovi_meta.h.
++ */
++ AV_FRAME_DATA_DOVI_METADATA,
++
++ /**
++ * HDR Vivid dynamic metadata associated with a video frame. The
payload is
++ * an AVDynamicHDRVivid type and contains information for color
++ * volume transform - CUVA 005.1-2021.
++ */
++ AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
++
++ /**
++ * Ambient viewing environment metadata, as defined by H.274.
++ */
++ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
++
++ /**
++ * Provide encoder-specific hinting information about changed/unchanged
++ * portions of a frame. It can be used to pass information about which
++ * macroblocks can be skipped because they didn't change from the
++ * corresponding ones in the previous frame. This could be useful for
++ * applications which know this information in advance to speed up
++ * encoding.
++ */
++ AV_FRAME_DATA_VIDEO_HINT,
++
++ /**
++ * Raw LCEVC payload data, as a uint8_t array, with NAL emulation
++ * bytes intact.
++ */
++ AV_FRAME_DATA_LCEVC,
++
++ /**
++ * This side data must be associated with a video frame.
++ * The presence of this side data indicates that the video stream is
++ * composed of multiple views (e.g. stereoscopic 3D content,
++ * cf. H.264 Annex H or H.265 Annex G).
++ * The data is an int storing the view ID.
++ */
++ AV_FRAME_DATA_VIEW_ID,
++
++ /**
++ * This side data contains information about the reference display
width(s)
++ * and reference viewing distance(s) as well as information about the
++ * corresponding reference stereo pair(s), i.e., the pair(s) of views
to be
++ * displayed for the viewer's left and right eyes on the reference
display
++ * at the reference viewing distance.
++ * The payload is the AV3DReferenceDisplaysInfo struct defined in
++ * libavutil/tdrdi.h.
++ */
++ AV_FRAME_DATA_3D_REFERENCE_DISPLAYS,
++};
++
++enum AVActiveFormatDescription {
++ AV_AFD_SAME = 8,
++ AV_AFD_4_3 = 9,
++ AV_AFD_16_9 = 10,
++ AV_AFD_14_9 = 11,
++ AV_AFD_4_3_SP_14_9 = 13,
++ AV_AFD_16_9_SP_14_9 = 14,
++ AV_AFD_SP_4_3 = 15,
++};
++
++
++/**
++ * Structure to hold side data for an AVFrame.
++ *
++ * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields
may be added
++ * to the end with a minor bump.
++ */
++typedef struct AVFrameSideData {
++ enum AVFrameSideDataType type;
++ uint8_t *data;
++ size_t size;
++ AVDictionary *metadata;
++ AVBufferRef *buf;
++} AVFrameSideData;
++
++enum AVSideDataProps {
++ /**
++ * The side data type can be used in stream-global structures.
++ * Side data types without this property are only meaningful on
per-frame
++ * basis.
++ */
++ AV_SIDE_DATA_PROP_GLOBAL = (1 << 0),
++
++ /**
++ * Multiple instances of this side data type can be meaningfully
present in
++ * a single side data array.
++ */
++ AV_SIDE_DATA_PROP_MULTI = (1 << 1),
++
++ /**
++ * Side data depends on the video dimensions. Side data with this
property
++ * loses its meaning when rescaling or cropping the image, unless
++ * either recomputed or adjusted to the new resolution.
++ */
++ AV_SIDE_DATA_PROP_SIZE_DEPENDENT = (1 << 2),
++
++ /**
++ * Side data depends on the video color space. Side data with this
property
++ * loses its meaning when changing the video color encoding, e.g. by
++ * adapting to a different set of primaries or transfer characteristics.
++ */
++ AV_SIDE_DATA_PROP_COLOR_DEPENDENT = (1 << 3),
++
++ /**
++ * Side data depends on the channel layout. Side data with this property
++ * loses its meaning when downmixing or upmixing, unless either
recomputed
++ * or adjusted to the new layout.
++ */
++ AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT = (1 << 4),
++};
++
++/**
++ * This struct describes the properties of a side data type. Its instance
++ * corresponding to a given type can be obtained from
av_frame_side_data_desc().
++ */
++typedef struct AVSideDataDescriptor {
++ /**
++ * Human-readable side data description.
++ */
++ const char *name;
++
++ /**
++ * Side data property flags, a combination of AVSideDataProps values.
++ */
++ unsigned props;
++} AVSideDataDescriptor;
++
++/**
++ * Structure describing a single Region Of Interest.
++ *
++ * When multiple regions are defined in a single side-data block, they
++ * should be ordered from most to least important - some encoders are only
++ * capable of supporting a limited number of distinct regions, so will have
++ * to truncate the list.
++ *
++ * When overlapping regions are defined, the first region containing a given
++ * area of the frame applies.
++ */
++typedef struct AVRegionOfInterest {
++ /**
++ * Must be set to the size of this data structure (that is,
++ * sizeof(AVRegionOfInterest)).
++ */
++ uint32_t self_size;
++ /**
++ * Distance in pixels from the top edge of the frame to the top and
++ * bottom edges and from the left edge of the frame to the left and
++ * right edges of the rectangle defining this region of interest.
++ *
++ * The constraints on a region are encoder dependent, so the region
++ * actually affected may be slightly larger for alignment or other
++ * reasons.
++ */
++ int top;
++ int bottom;
++ int left;
++ int right;
++ /**
++ * Quantisation offset.
++ *
++ * Must be in the range -1 to +1. A value of zero indicates no quality
++ * change. A negative value asks for better quality (less
quantisation),
++ * while a positive value asks for worse quality (greater quantisation).
++ *
++ * The range is calibrated so that the extreme values indicate the
++ * largest possible offset - if the rest of the frame is encoded with
the
++ * worst possible quality, an offset of -1 indicates that this region
++ * should be encoded with the best possible quality anyway.
Intermediate
++ * values are then interpolated in some codec-dependent way.
++ *
++ * For example, in 10-bit H.264 the quantisation parameter varies
between
++ * -12 and 51. A typical qoffset value of -1/10 therefore indicates
that
++ * this region should be encoded with a QP around one-tenth of the full
++ * range better than the rest of the frame. So, if most of the frame
++ * were to be encoded with a QP of around 30, this region would get a QP
++ * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3).
++ * An extreme value of -1 would indicate that this region should be
++ * encoded with the best possible quality regardless of the treatment of
++ * the rest of the frame - that is, should be encoded at a QP of -12.
++ */
++ AVRational qoffset;
++} AVRegionOfInterest;
++
++/**
++ * This structure describes decoded (raw) audio or video data.
++ *
++ * AVFrame must be allocated using av_frame_alloc(). Note that this only
++ * allocates the AVFrame itself, the buffers for the data must be managed
++ * through other means (see below).
++ * AVFrame must be freed with av_frame_free().
++ *
++ * AVFrame is typically allocated once and then reused multiple times to
hold
++ * different data (e.g. a single AVFrame to hold frames received from a
++ * decoder). In such a case, av_frame_unref() will free any references held
by
++ * the frame and reset it to its original clean state before it
++ * is reused again.
++ *
++ * The data described by an AVFrame is usually reference counted through the
++ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf
/
++ * AVFrame.extended_buf. An AVFrame is considered to be reference counted
if at
++ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a
case,
++ * every single data plane must be contained in one of the buffers in
++ * AVFrame.buf or AVFrame.extended_buf.
++ * There may be a single buffer for all the data, or one separate buffer for
++ * each plane, or anything in between.
++ *
++ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be
added
++ * to the end with a minor bump.
++ *
++ * Fields can be accessed through AVOptions, the name string used, matches
the
++ * C structure field name for fields accessible through AVOptions.
++ */
++typedef struct AVFrame {
++#define AV_NUM_DATA_POINTERS 8
++ /**
++ * pointer to the picture/channel planes.
++ * This might be different from the first allocated byte. For video,
++ * it could even point to the end of the image data.
++ *
++ * All pointers in data and extended_data must point into one of the
++ * AVBufferRef in buf or extended_buf.
++ *
++ * Some decoders access areas outside 0,0 - width,height, please
++ * see avcodec_align_dimensions2(). Some filters and swscale can read
++ * up to 16 bytes beyond the planes, if these filters are to be used,
++ * then 16 extra bytes must be allocated.
++ *
++ * NOTE: Pointers not needed by the format MUST be set to NULL.
++ *
++ * @attention In case of video, the data[] pointers can point to the
++ * end of image data in order to reverse line order, when used in
++ * combination with negative values in the linesize[] array.
++ */
++ uint8_t *data[AV_NUM_DATA_POINTERS];
++
++ /**
++ * For video, a positive or negative value, which is typically
indicating
++ * the size in bytes of each picture line, but it can also be:
++ * - the negative byte size of lines for vertical flipping
++ * (with data[n] pointing to the end of the data
++ * - a positive or negative multiple of the byte size as for accessing
++ * even and odd fields of a frame (possibly flipped)
++ *
++ * For audio, only linesize[0] may be set. For planar audio, each
channel
++ * plane must be the same size.
++ *
++ * For video the linesizes should be multiples of the CPUs alignment
++ * preference, this is 16 or 32 for modern desktop CPUs.
++ * Some code requires such alignment other code can be slower without
++ * correct alignment, for yet other it makes no difference.
++ *
++ * @note The linesize may be larger than the size of usable data --
there
++ * may be extra padding present for performance reasons.
++ *
++ * @attention In case of video, line size values can be negative to
achieve
++ * a vertically inverted iteration over image lines.
++ */
++ int linesize[AV_NUM_DATA_POINTERS];
++
++ /**
++ * pointers to the data planes/channels.
++ *
++ * For video, this should simply point to data[].
++ *
++ * For planar audio, each channel has a separate data pointer, and
++ * linesize[0] contains the size of each channel buffer.
++ * For packed audio, there is just one data pointer, and linesize[0]
++ * contains the total size of the buffer for all channels.
++ *
++ * Note: Both data and extended_data should always be set in a valid
frame,
++ * but for planar audio with more channels that can fit in data,
++ * extended_data must be used in order to access all channels.
++ */
++ uint8_t **extended_data;
++
++ /**
++ * @name Video dimensions
++ * Video frames only. The coded dimensions (in pixels) of the video
frame,
++ * i.e. the size of the rectangle that contains some well-defined
values.
++ *
++ * @note The part of the frame intended for display/presentation is
further
++ * restricted by the @ref cropping "Cropping rectangle".
++ * @{
++ */
++ int width, height;
++ /**
++ * @}
++ */
++
++ /**
++ * number of audio samples (per channel) described by this frame
++ */
++ int nb_samples;
++
++ /**
++ * format of the frame, -1 if unknown or unset
++ * Values correspond to enum AVPixelFormat for video frames,
++ * enum AVSampleFormat for audio)
++ */
++ int format;
++
++ /**
++ * Picture type of the frame.
++ */
++ enum AVPictureType pict_type;
++
++ /**
++ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
++ */
++ AVRational sample_aspect_ratio;
++
++ /**
++ * Presentation timestamp in time_base units (time when frame should be
shown to user).
++ */
++ int64_t pts;
++
++ /**
++ * DTS copied from the AVPacket that triggered returning this frame.
(if frame threading isn't used)
++ * This is also the Presentation time of this AVFrame calculated from
++ * only AVPacket.dts values without pts values.
++ */
++ int64_t pkt_dts;
++
++ /**
++ * Time base for the timestamps in this frame.
++ * In the future, this field may be set on frames output by decoders or
++ * filters, but its value will be by default ignored on input to
encoders
++ * or filters.
++ */
++ AVRational time_base;
++
++ /**
++ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
++ */
++ int quality;
++
++ /**
++ * Frame owner's private data.
++ *
++ * This field may be set by the code that allocates/owns the frame data.
++ * It is then not touched by any library functions, except:
++ * - it is copied to other references by av_frame_copy_props() (and
hence by
++ * av_frame_ref());
++ * - it is set to NULL when the frame is cleared by av_frame_unref()
++ * - on the caller's explicit request. E.g. libavcodec encoders/decoders
++ * will copy this field to/from @ref AVPacket "AVPackets" if the
caller sets
++ * @ref AV_CODEC_FLAG_COPY_OPAQUE.
++ *
++ * @see opaque_ref the reference-counted analogue
++ */
++ void *opaque;
++
++ /**
++ * Number of fields in this frame which should be repeated, i.e. the
total
++ * duration of this frame should be repeat_pict + 2 normal field
durations.
++ *
++ * For interlaced frames this field may be set to 1, which signals that
this
++ * frame should be presented as 3 fields: beginning with the first
field (as
++ * determined by AV_FRAME_FLAG_TOP_FIELD_FIRST being set or not),
followed
++ * by the second field, and then the first field again.
++ *
++ * For progressive frames this field may be set to a multiple of 2,
which
++ * signals that this frame's duration should be (repeat_pict + 2) / 2
++ * normal frame durations.
++ *
++ * @note This field is computed from MPEG2 repeat_first_field flag and
its
++ * associated flags, H.264 pic_struct from picture timing SEI, and
++ * their analogues in other codecs. Typically it should only be used
when
++ * higher-layer timing information is not available.
++ */
++ int repeat_pict;
++
++ /**
++ * Sample rate of the audio data.
++ */
++ int sample_rate;
++
++ /**
++ * AVBuffer references backing the data for this frame. All the
pointers in
++ * data and extended_data must point inside one of the buffers in buf or
++ * extended_buf. This array must be filled contiguously -- if buf[i] is
++ * non-NULL then buf[j] must also be non-NULL for all j < i.
++ *
++ * There may be at most one AVBuffer per data plane, so for video this
array
++ * always contains all the references. For planar audio with more than
++ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can
fit in
++ * this array. Then the extra AVBufferRef pointers are stored in the
++ * extended_buf array.
++ */
++ AVBufferRef *buf[AV_NUM_DATA_POINTERS];
++
++ /**
++ * For planar audio which requires more than AV_NUM_DATA_POINTERS
++ * AVBufferRef pointers, this array will hold all the references which
++ * cannot fit into AVFrame.buf.
++ *
++ * Note that this is different from AVFrame.extended_data, which always
++ * contains all the pointers. This array only contains the extra
pointers,
++ * which cannot fit into AVFrame.buf.
++ *
++ * This array is always allocated using av_malloc() by whoever
constructs
++ * the frame. It is freed in av_frame_unref().
++ */
++ AVBufferRef **extended_buf;
++ /**
++ * Number of elements in extended_buf.
++ */
++ int nb_extended_buf;
++
++ AVFrameSideData **side_data;
++ int nb_side_data;
++
++/**
++ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
++ * @ingroup lavu_frame
++ * Flags describing additional frame properties.
++ *
++ * @{
++ */
++
++/**
++ * The frame data may be corrupted, e.g. due to decoding errors.
++ */
++#define AV_FRAME_FLAG_CORRUPT (1 << 0)
++/**
++ * A flag to mark frames that are keyframes.
++ */
++#define AV_FRAME_FLAG_KEY (1 << 1)
++/**
++ * A flag to mark the frames which need to be decoded, but shouldn't be
output.
++ */
++#define AV_FRAME_FLAG_DISCARD (1 << 2)
++/**
++ * A flag to mark frames whose content is interlaced.
++ */
++#define AV_FRAME_FLAG_INTERLACED (1 << 3)
++/**
++ * A flag to mark frames where the top field is displayed first if the
content
++ * is interlaced.
++ */
++#define AV_FRAME_FLAG_TOP_FIELD_FIRST (1 << 4)
++/**
++ * A decoder can use this flag to mark frames which were originally encoded
losslessly.
++ *
++ * For coding bitstream formats which support both lossless and lossy
++ * encoding, it is sometimes possible for a decoder to determine which
method
++ * was used when the bitsream was encoded.
++ */
++#define AV_FRAME_FLAG_LOSSLESS (1 << 5)
++/**
++ * @}
++ */
++
++ /**
++ * Frame flags, a combination of @ref lavu_frame_flags
++ */
++ int flags;
++
++ /**
++ * MPEG vs JPEG YUV range.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVColorRange color_range;
++
++ enum AVColorPrimaries color_primaries;
++
++ enum AVColorTransferCharacteristic color_trc;
++
++ /**
++ * YUV colorspace type.
++ * - encoding: Set by user
++ * - decoding: Set by libavcodec
++ */
++ enum AVColorSpace colorspace;
++
++ enum AVChromaLocation chroma_location;
++
++ /**
++ * frame timestamp estimated using various heuristics, in stream time
base
++ * - encoding: unused
++ * - decoding: set by libavcodec, read by user.
++ */
++ int64_t best_effort_timestamp;
++
++ /**
++ * metadata.
++ * - encoding: Set by user.
++ * - decoding: Set by libavcodec.
++ */
++ AVDictionary *metadata;
++
++ /**
++ * decode error flags of the frame, set to a combination of
++ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
++ * were errors during the decoding.
++ * - encoding: unused
++ * - decoding: set by libavcodec, read by user.
++ */
++ int decode_error_flags;
++#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
++#define FF_DECODE_ERROR_MISSING_REFERENCE 2
++#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
++#define FF_DECODE_ERROR_DECODE_SLICES 8
++
++ /**
++ * For hwaccel-format frames, this should be a reference to the
++ * AVHWFramesContext describing the frame.
++ */
++ AVBufferRef *hw_frames_ctx;
++
++ /**
++ * Frame owner's private data.
++ *
++ * This field may be set by the code that allocates/owns the frame data.
++ * It is then not touched by any library functions, except:
++ * - a new reference to the underlying buffer is propagated by
++ * av_frame_copy_props() (and hence by av_frame_ref());
++ * - it is unreferenced in av_frame_unref();
++ * - on the caller's explicit request. E.g. libavcodec encoders/decoders
++ * will propagate a new reference to/from @ref AVPacket "AVPackets"
if the
++ * caller sets @ref AV_CODEC_FLAG_COPY_OPAQUE.
++ *
++ * @see opaque the plain pointer analogue
++ */
++ AVBufferRef *opaque_ref;
++
++ /**
++ * @anchor cropping
++ * @name Cropping
++ * Video frames only. The number of pixels to discard from the the
++ * top/bottom/left/right border of the frame to obtain the
sub-rectangle of
++ * the frame intended for presentation.
++ * @{
++ */
++ size_t crop_top;
++ size_t crop_bottom;
++ size_t crop_left;
++ size_t crop_right;
++ /**
++ * @}
++ */
++
++ /**
++ * RefStruct reference for internal use by a single libav* library.
++ * Must not be used to transfer data between libraries.
++ * Has to be NULL when ownership of the frame leaves the respective
library.
++ *
++ * Code outside the FFmpeg libs must never check or change private_ref.
++ */
++ void *private_ref;
++
++ /**
++ * Channel layout of the audio data.
++ */
++ AVChannelLayout ch_layout;
++
++ /**
++ * Duration of the frame, in the same units as pts. 0 if unknown.
++ */
++ int64_t duration;
++} AVFrame;
++
++
++/**
++ * Allocate an AVFrame and set its fields to default values. The resulting
++ * struct must be freed using av_frame_free().
++ *
++ * @return An AVFrame filled with default values or NULL on failure.
++ *
++ * @note this only allocates the AVFrame itself, not the data buffers. Those
++ * must be allocated through other means, e.g. with av_frame_get_buffer() or
++ * manually.
++ */
++AVFrame *av_frame_alloc(void);
++
++/**
++ * Free the frame and any dynamically allocated objects in it,
++ * e.g. extended_data. If the frame is reference counted, it will be
++ * unreferenced first.
++ *
++ * @param frame frame to be freed. The pointer will be set to NULL.
++ */
++void av_frame_free(AVFrame **frame);
++
++/**
++ * Set up a new reference to the data described by the source frame.
++ *
++ * Copy frame properties from src to dst and create a new reference for each
++ * AVBufferRef from src.
++ *
++ * If src is not reference counted, new buffers are allocated and the data
is
++ * copied.
++ *
++ * @warning: dst MUST have been either unreferenced with
av_frame_unref(dst),
++ * or newly allocated with av_frame_alloc() before calling this
++ * function, or undefined behavior will occur.
++ *
++ * @return 0 on success, a negative AVERROR on error
++ */
++int av_frame_ref(AVFrame *dst, const AVFrame *src);
++
++/**
++ * Ensure the destination frame refers to the same data described by the
source
++ * frame, either by creating a new reference for each AVBufferRef from src
if
++ * they differ from those in dst, by allocating new buffers and copying
data if
++ * src is not reference counted, or by unrefencing it if src is empty.
++ *
++ * Frame properties on dst will be replaced by those from src.
++ *
++ * @return 0 on success, a negative AVERROR on error. On error, dst is
++ * unreferenced.
++ */
++int av_frame_replace(AVFrame *dst, const AVFrame *src);
++
++/**
++ * Create a new frame that references the same data as src.
++ *
++ * This is a shortcut for av_frame_alloc()+av_frame_ref().
++ *
++ * @return newly created AVFrame on success, NULL on error.
++ */
++AVFrame *av_frame_clone(const AVFrame *src);
++
++/**
++ * Unreference all the buffers referenced by frame and reset the frame
fields.
++ */
++void av_frame_unref(AVFrame *frame);
++
++/**
++ * Move everything contained in src to dst and reset src.
++ *
++ * @warning: dst is not unreferenced, but directly overwritten without
reading
++ * or deallocating its contents. Call av_frame_unref(dst) manually
++ * before calling this function to ensure that no memory is
leaked.
++ */
++void av_frame_move_ref(AVFrame *dst, AVFrame *src);
++
++/**
++ * Allocate new buffer(s) for audio or video data.
++ *
++ * The following fields must be set on frame before calling this function:
++ * - format (pixel format for video, sample format for audio)
++ * - width and height for video
++ * - nb_samples and ch_layout for audio
++ *
++ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
++ * necessary, allocate and fill AVFrame.extended_data and
AVFrame.extended_buf.
++ * For planar formats, one buffer will be allocated for each plane.
++ *
++ * @warning: if frame already has been allocated, calling this function will
++ * leak memory. In addition, undefined behavior can occur in
certain
++ * cases.
++ *
++ * @param frame frame in which to store the new buffers.
++ * @param align Required buffer size and data pointer alignment. If equal
to 0,
++ * alignment will be chosen automatically for the current CPU.
++ * It is highly recommended to pass 0 here unless you know what
++ * you are doing.
++ *
++ * @return 0 on success, a negative AVERROR on error.
++ */
++int av_frame_get_buffer(AVFrame *frame, int align);
++
++/**
++ * Check if the frame data is writable.
++ *
++ * @return A positive value if the frame data is writable (which is true if
and
++ * only if each of the underlying buffers has only one reference, namely
the one
++ * stored in this frame). Return 0 otherwise.
++ *
++ * If 1 is returned the answer is valid until av_buffer_ref() is called on
any
++ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
++ *
++ * @see av_frame_make_writable(), av_buffer_is_writable()
++ */
++int av_frame_is_writable(AVFrame *frame);
++
++/**
++ * Ensure that the frame data is writable, avoiding data copy if possible.
++ *
++ * Do nothing if the frame is writable, allocate new buffers and copy the
data
++ * if it is not. Non-refcounted frames behave as non-writable, i.e. a copy
++ * is always made.
++ *
++ * @return 0 on success, a negative AVERROR on error.
++ *
++ * @see av_frame_is_writable(), av_buffer_is_writable(),
++ * av_buffer_make_writable()
++ */
++int av_frame_make_writable(AVFrame *frame);
++
++/**
++ * Copy the frame data from src to dst.
++ *
++ * This function does not allocate anything, dst must be already
initialized and
++ * allocated with the same parameters as src.
++ *
++ * This function only copies the frame data (i.e. the contents of the data /
++ * extended data arrays), not any other properties.
++ *
++ * @return >= 0 on success, a negative AVERROR on error.
++ */
++int av_frame_copy(AVFrame *dst, const AVFrame *src);
++
++/**
++ * Copy only "metadata" fields from src to dst.
++ *
++ * Metadata for the purpose of this function are those fields that do not
affect
++ * the data layout in the buffers. E.g. pts, sample rate (for audio) or
sample
++ * aspect ratio (for video), but not width/height or channel layout.
++ * Side data is also copied.
++ */
++int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
++
++/**
++ * Get the buffer reference a given data plane is stored in.
++ *
++ * @param frame the frame to get the plane's buffer from
++ * @param plane index of the data plane of interest in frame->extended_data.
++ *
++ * @return the buffer reference that contains the plane or NULL if the input
++ * frame is not valid.
++ */
++AVBufferRef *av_frame_get_plane_buffer(const AVFrame *frame, int plane);
++
++/**
++ * Add a new side data to a frame.
++ *
++ * @param frame a frame to which the side data should be added
++ * @param type type of the added side data
++ * @param size size of the side data
++ *
++ * @return newly added side data on success, NULL on error
++ */
++AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
++ enum AVFrameSideDataType type,
++ size_t size);
++
++/**
++ * Add a new side data to a frame from an existing AVBufferRef
++ *
++ * @param frame a frame to which the side data should be added
++ * @param type the type of the added side data
++ * @param buf an AVBufferRef to add as side data. The ownership of
++ * the reference is transferred to the frame.
++ *
++ * @return newly added side data on success, NULL on error. On failure
++ * the frame is unchanged and the AVBufferRef remains owned by
++ * the caller.
++ */
++AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
++ enum AVFrameSideDataType
type,
++ AVBufferRef *buf);
++
++/**
++ * @return a pointer to the side data of a given type on success, NULL if
there
++ * is no side data with such type in this frame.
++ */
++AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
++ enum AVFrameSideDataType type);
++
++/**
++ * Remove and free all side data instances of the given type.
++ */
++void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType
type);
++
++
++/**
++ * Flags for frame cropping.
++ */
++enum {
++ /**
++ * Apply the maximum possible cropping, even if it requires setting the
++ * AVFrame.data[] entries to unaligned pointers. Passing unaligned data
++ * to FFmpeg API is generally not allowed, and causes undefined behavior
++ * (such as crashes). You can pass unaligned data only to FFmpeg APIs
that
++ * are explicitly documented to accept it. Use this flag only if you
++ * absolutely know what you are doing.
++ */
++ AV_FRAME_CROP_UNALIGNED = 1 << 0,
++};
++
++/**
++ * Crop the given video AVFrame according to its
crop_left/crop_top/crop_right/
++ * crop_bottom fields. If cropping is successful, the function will adjust
the
++ * data pointers and the width/height fields, and set the crop fields to 0.
++ *
++ * In all cases, the cropping boundaries will be rounded to the inherent
++ * alignment of the pixel format. In some cases, such as for opaque hwaccel
++ * formats, the left/top cropping is ignored. The crop fields are set to 0
even
++ * if the cropping was rounded or ignored.
++ *
++ * @param frame the frame which should be cropped
++ * @param flags Some combination of AV_FRAME_CROP_* flags, or 0.
++ *
++ * @return >= 0 on success, a negative AVERROR on error. If the cropping
fields
++ * were invalid, AVERROR(ERANGE) is returned, and nothing is changed.
++ */
++int av_frame_apply_cropping(AVFrame *frame, int flags);
++
++/**
++ * @return a string identifying the side data type
++ */
++const char *av_frame_side_data_name(enum AVFrameSideDataType type);
++
++/**
++ * @return side data descriptor corresponding to a given side data type,
NULL
++ * when not available.
++ */
++const AVSideDataDescriptor *av_frame_side_data_desc(enum
AVFrameSideDataType type);
++
++/**
++ * Free all side data entries and their contents, then zeroes out the
++ * values which the pointers are pointing to.
++ *
++ * @param sd pointer to array of side data to free. Will be set to NULL
++ * upon return.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array. Will be set to 0 upon return.
++ */
++void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd);
++
++/**
++ * Remove existing entries before adding new ones.
++ */
++#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE (1 << 0)
++/**
++ * Don't add a new entry if another of the same type exists.
++ * Applies only for side data types without the AV_SIDE_DATA_PROP_MULTI
prop.
++ */
++#define AV_FRAME_SIDE_DATA_FLAG_REPLACE (1 << 1)
++/**
++ * Create a new reference to the passed in buffer instead of taking
ownership
++ * of it.
++ */
++#define AV_FRAME_SIDE_DATA_FLAG_NEW_REF (1 << 2)
++
++/**
++ * Add new side data entry to an array.
++ *
++ * @param sd pointer to array of side data to which to add another entry,
++ * or to NULL in order to start a new array.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array.
++ * @param type type of the added side data
++ * @param size size of the side data
++ * @param flags Some combination of AV_FRAME_SIDE_DATA_FLAG_* flags, or 0.
++ *
++ * @return newly added side data on success, NULL on error.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_UNIQUE being set, entries of
++ * matching AVFrameSideDataType will be removed before the addition
++ * is attempted.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_REPLACE being set, if an
++ * entry of the same type already exists, it will be replaced instead.
++ */
++AVFrameSideData *av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd,
++ enum AVFrameSideDataType type,
++ size_t size, unsigned int flags);
++
++/**
++ * Add a new side data entry to an array from an existing AVBufferRef.
++ *
++ * @param sd pointer to array of side data to which to add another entry,
++ * or to NULL in order to start a new array.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array.
++ * @param type type of the added side data
++ * @param buf Pointer to AVBufferRef to add to the array. On success,
++ * the function takes ownership of the AVBufferRef and *buf is
++ * set to NULL, unless AV_FRAME_SIDE_DATA_FLAG_NEW_REF is set
++ * in which case the ownership will remain with the caller.
++ * @param flags Some combination of AV_FRAME_SIDE_DATA_FLAG_* flags, or 0.
++ *
++ * @return newly added side data on success, NULL on error.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_UNIQUE being set, entries of
++ * matching AVFrameSideDataType will be removed before the addition
++ * is attempted.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_REPLACE being set, if an
++ * entry of the same type already exists, it will be replaced instead.
++ *
++ */
++AVFrameSideData *av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd,
++ enum AVFrameSideDataType type,
++ AVBufferRef **buf, unsigned int
flags);
++
++/**
++ * Add a new side data entry to an array based on existing side data, taking
++ * a reference towards the contained AVBufferRef.
++ *
++ * @param sd pointer to array of side data to which to add another entry,
++ * or to NULL in order to start a new array.
++ * @param nb_sd pointer to an integer containing the number of entries in
++ * the array.
++ * @param src side data to be cloned, with a new reference utilized
++ * for the buffer.
++ * @param flags Some combination of AV_FRAME_SIDE_DATA_FLAG_* flags, or 0.
++ *
++ * @return negative error code on failure, >=0 on success.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_UNIQUE being set, entries of
++ * matching AVFrameSideDataType will be removed before the addition
++ * is attempted.
++ * @note In case of AV_FRAME_SIDE_DATA_FLAG_REPLACE being set, if an
++ * entry of the same type already exists, it will be replaced instead.
++ */
++int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd,
++ const AVFrameSideData *src, unsigned int
flags);
++
++/**
++ * Get a side data entry of a specific type from an array.
++ *
++ * @param sd array of side data.
++ * @param nb_sd integer containing the number of entries in the array.
++ * @param type type of side data to be queried
++ *
++ * @return a pointer to the side data of a given type on success, NULL if
there
++ * is no side data with such type in this set.
++ */
++const AVFrameSideData *av_frame_side_data_get_c(const AVFrameSideData *
const *sd,
++ const int nb_sd,
++ enum AVFrameSideDataType
type);
++
++/**
++ * Wrapper around av_frame_side_data_get_c() to workaround the limitation
++ * that for any type T the conversion from T * const * to const T * const *
++ * is not performed automatically in C.
++ * @see av_frame_side_data_get_c()
++ */
++static inline
++const AVFrameSideData *av_frame_side_data_get(AVFrameSideData * const *sd,
++ const int nb_sd,
++ enum AVFrameSideDataType type)
++{
++ return av_frame_side_data_get_c((const AVFrameSideData * const *)sd,
++ nb_sd, type);
++}
++
++/**
++ * Remove and free all side data instances of the given type from an array.
++ */
++void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd,
++ enum AVFrameSideDataType type);
++
++/**
++ * Remove and free all side data instances that match any of the given
++ * side data properties. (See enum AVSideDataProps)
++ */
++void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd,
++ int props);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_FRAME_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext.h
+@@ -0,0 +1,601 @@
++/*
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVUTIL_HWCONTEXT_H
++#define AVUTIL_HWCONTEXT_H
++
++#include "buffer.h"
++#include "frame.h"
++#include "log.h"
++#include "pixfmt.h"
++
++enum AVHWDeviceType {
++ AV_HWDEVICE_TYPE_NONE,
++ AV_HWDEVICE_TYPE_VDPAU,
++ AV_HWDEVICE_TYPE_CUDA,
++ AV_HWDEVICE_TYPE_VAAPI,
++ AV_HWDEVICE_TYPE_DXVA2,
++ AV_HWDEVICE_TYPE_QSV,
++ AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
++ AV_HWDEVICE_TYPE_D3D11VA,
++ AV_HWDEVICE_TYPE_DRM,
++ AV_HWDEVICE_TYPE_OPENCL,
++ AV_HWDEVICE_TYPE_MEDIACODEC,
++ AV_HWDEVICE_TYPE_VULKAN,
++ AV_HWDEVICE_TYPE_D3D12VA,
++ AV_HWDEVICE_TYPE_AMF,
++ /* OpenHarmony Codec device */
++ AV_HWDEVICE_TYPE_OHCODEC,
++};
++
++/**
++ * This struct aggregates all the (hardware/vendor-specific) "high-level"
state,
++ * i.e. state that is not tied to a concrete processing configuration.
++ * E.g., in an API that supports hardware-accelerated encoding and decoding,
++ * this struct will (if possible) wrap the state that is common to both
encoding
++ * and decoding and from which specific instances of encoders or decoders
can be
++ * derived.
++ *
++ * This struct is reference-counted with the AVBuffer mechanism. The
++ * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field
++ * points to the actual AVHWDeviceContext. Further objects derived from
++ * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool
with
++ * specific properties) will hold an internal reference to it. After all the
++ * references are released, the AVHWDeviceContext itself will be freed,
++ * optionally invoking a user-specified callback for uninitializing the
hardware
++ * state.
++ */
++typedef struct AVHWDeviceContext {
++ /**
++ * A class for logging. Set by av_hwdevice_ctx_alloc().
++ */
++ const AVClass *av_class;
++
++ /**
++ * This field identifies the underlying API used for hardware access.
++ *
++ * This field is set when this struct is allocated and never changed
++ * afterwards.
++ */
++ enum AVHWDeviceType type;
++
++ /**
++ * The format-specific data, allocated and freed by libavutil along with
++ * this context.
++ *
++ * Should be cast by the user to the format-specific context defined in
the
++ * corresponding header (hwcontext_*.h) and filled as described in the
++ * documentation before calling av_hwdevice_ctx_init().
++ *
++ * After calling av_hwdevice_ctx_init() this struct should not be
modified
++ * by the caller.
++ */
++ void *hwctx;
++
++ /**
++ * This field may be set by the caller before calling
av_hwdevice_ctx_init().
++ *
++ * If non-NULL, this callback will be called when the last reference to
++ * this context is unreferenced, immediately before it is freed.
++ *
++ * @note when other objects (e.g an AVHWFramesContext) are derived from
this
++ * struct, this callback will be invoked after all such child
objects
++ * are fully uninitialized and their respective destructors
invoked.
++ */
++ void (*free)(struct AVHWDeviceContext *ctx);
++
++ /**
++ * Arbitrary user data, to be used e.g. by the free() callback.
++ */
++ void *user_opaque;
++} AVHWDeviceContext;
++
++/**
++ * This struct describes a set or pool of "hardware" frames (i.e. those with
++ * data not located in normal system memory). All the frames in the pool are
++ * assumed to be allocated in the same way and interchangeable.
++ *
++ * This struct is reference-counted with the AVBuffer mechanism and tied to
a
++ * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor
++ * yields a reference, whose data field points to the actual
AVHWFramesContext
++ * struct.
++ */
++typedef struct AVHWFramesContext {
++ /**
++ * A class for logging.
++ */
++ const AVClass *av_class;
++
++ /**
++ * A reference to the parent AVHWDeviceContext. This reference is owned
and
++ * managed by the enclosing AVHWFramesContext, but the caller may derive
++ * additional references from it.
++ */
++ AVBufferRef *device_ref;
++
++ /**
++ * The parent AVHWDeviceContext. This is simply a pointer to
++ * device_ref->data provided for convenience.
++ *
++ * Set by libavutil in av_hwframe_ctx_init().
++ */
++ AVHWDeviceContext *device_ctx;
++
++ /**
++ * The format-specific data, allocated and freed automatically along
with
++ * this context.
++ *
++ * The user shall ignore this field if the corresponding format-specific
++ * header (hwcontext_*.h) does not define a context to be used as
++ * AVHWFramesContext.hwctx.
++ *
++ * Otherwise, it should be cast by the user to said context and filled
++ * as described in the documentation before calling
av_hwframe_ctx_init().
++ *
++ * After any frames using this context are created, the contents of this
++ * struct should not be modified by the caller.
++ */
++ void *hwctx;
++
++ /**
++ * This field may be set by the caller before calling
av_hwframe_ctx_init().
++ *
++ * If non-NULL, this callback will be called when the last reference to
++ * this context is unreferenced, immediately before it is freed.
++ */
++ void (*free)(struct AVHWFramesContext *ctx);
++
++ /**
++ * Arbitrary user data, to be used e.g. by the free() callback.
++ */
++ void *user_opaque;
++
++ /**
++ * A pool from which the frames are allocated by
av_hwframe_get_buffer().
++ * This field may be set by the caller before calling
av_hwframe_ctx_init().
++ * The buffers returned by calling av_buffer_pool_get() on this pool
must
++ * have the properties described in the documentation in the
corresponding hw
++ * type's header (hwcontext_*.h). The pool will be freed strictly before
++ * this struct's free() callback is invoked.
++ *
++ * This field may be NULL, then libavutil will attempt to allocate a
pool
++ * internally. Note that certain device types enforce pools allocated at
++ * fixed size (frame count), which cannot be extended dynamically. In
such a
++ * case, initial_pool_size must be set appropriately.
++ */
++ AVBufferPool *pool;
++
++ /**
++ * Initial size of the frame pool. If a device type does not support
++ * dynamically resizing the pool, then this is also the maximum pool
size.
++ *
++ * May be set by the caller before calling av_hwframe_ctx_init(). Must
be
++ * set if pool is NULL and the device type does not support dynamic
pools.
++ */
++ int initial_pool_size;
++
++ /**
++ * The pixel format identifying the underlying HW surface type.
++ *
++ * Must be a hwaccel format, i.e. the corresponding descriptor must
have the
++ * AV_PIX_FMT_FLAG_HWACCEL flag set.
++ *
++ * Must be set by the user before calling av_hwframe_ctx_init().
++ */
++ enum AVPixelFormat format;
++
++ /**
++ * The pixel format identifying the actual data layout of the hardware
++ * frames.
++ *
++ * Must be set by the caller before calling av_hwframe_ctx_init().
++ *
++ * @note when the underlying API does not provide the exact data
layout, but
++ * only the colorspace/bit depth, this field should be set to the fully
++ * planar version of that format (e.g. for 8-bit 420 YUV it should be
++ * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else).
++ */
++ enum AVPixelFormat sw_format;
++
++ /**
++ * The allocated dimensions of the frames in this pool.
++ *
++ * Must be set by the user before calling av_hwframe_ctx_init().
++ */
++ int width, height;
++} AVHWFramesContext;
++
++/**
++ * Look up an AVHWDeviceType by name.
++ *
++ * @param name String name of the device type (case-insensitive).
++ * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if
++ * not found.
++ */
++enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name);
++
++/** Get the string name of an AVHWDeviceType.
++ *
++ * @param type Type from enum AVHWDeviceType.
++ * @return Pointer to a static string containing the name, or NULL if the
type
++ * is not valid.
++ */
++const char *av_hwdevice_get_type_name(enum AVHWDeviceType type);
++
++/**
++ * Iterate over supported device types.
++ *
++ * @param prev AV_HWDEVICE_TYPE_NONE initially, then the previous type
++ * returned by this function in subsequent iterations.
++ * @return The next usable device type from enum AVHWDeviceType, or
++ * AV_HWDEVICE_TYPE_NONE if there are no more.
++ */
++enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev);
++
++/**
++ * Allocate an AVHWDeviceContext for a given hardware type.
++ *
++ * @param type the type of the hardware device to allocate.
++ * @return a reference to the newly created AVHWDeviceContext on success or
NULL
++ * on failure.
++ */
++AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type);
++
++/**
++ * Finalize the device context before use. This function must be called
after
++ * the context is filled with all the required information and before it is
++ * used in any way.
++ *
++ * @param ref a reference to the AVHWDeviceContext
++ * @return 0 on success, a negative AVERROR code on failure
++ */
++int av_hwdevice_ctx_init(AVBufferRef *ref);
++
++/**
++ * Open a device of the specified type and create an AVHWDeviceContext for
it.
++ *
++ * This is a convenience function intended to cover the simple cases.
Callers
++ * who need to fine-tune device creation/management should open the device
++ * manually and then wrap it in an AVHWDeviceContext using
++ * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init().
++ *
++ * The returned context is already initialized and ready for use, the caller
++ * should not call av_hwdevice_ctx_init() on it. The user_opaque/free
fields of
++ * the created AVHWDeviceContext are set by this function and should not be
++ * touched by the caller.
++ *
++ * @param device_ctx On success, a reference to the newly-created device
context
++ * will be written here. The reference is owned by the
caller
++ * and must be released with av_buffer_unref() when no
longer
++ * needed. On failure, NULL will be written to this
pointer.
++ * @param type The type of the device to create.
++ * @param device A type-specific string identifying the device to open.
++ * @param opts A dictionary of additional (type-specific) options to use in
++ * opening the device. The dictionary remains owned by the
caller.
++ * @param flags currently unused
++ *
++ * @return 0 on success, a negative AVERROR code on failure.
++ */
++int av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType
type,
++ const char *device, AVDictionary *opts, int
flags);
++
++/**
++ * Create a new device of the specified type from an existing device.
++ *
++ * If the source device is a device of the target type or was originally
++ * derived from such a device (possibly through one or more intermediate
++ * devices of other types), then this will return a reference to the
++ * existing device of the same type as is requested.
++ *
++ * Otherwise, it will attempt to derive a new device from the given source
++ * device. If direct derivation to the new type is not implemented, it will
++ * attempt the same derivation from each ancestor of the source device in
++ * turn looking for an implemented derivation method.
++ *
++ * @param dst_ctx On success, a reference to the newly-created
++ * AVHWDeviceContext.
++ * @param type The type of the new device to create.
++ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
++ * used to create the new device.
++ * @param flags Currently unused; should be set to zero.
++ * @return Zero on success, a negative AVERROR code on failure.
++ */
++int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx,
++ enum AVHWDeviceType type,
++ AVBufferRef *src_ctx, int flags);
++
++/**
++ * Create a new device of the specified type from an existing device.
++ *
++ * This function performs the same action as av_hwdevice_ctx_create_derived,
++ * however, it is able to set options for the new device to be derived.
++ *
++ * @param dst_ctx On success, a reference to the newly-created
++ * AVHWDeviceContext.
++ * @param type The type of the new device to create.
++ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
++ * used to create the new device.
++ * @param options Options for the new device to create, same format as in
++ * av_hwdevice_ctx_create.
++ * @param flags Currently unused; should be set to zero.
++ * @return Zero on success, a negative AVERROR code on failure.
++ */
++int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ctx,
++ enum AVHWDeviceType type,
++ AVBufferRef *src_ctx,
++ AVDictionary *options, int flags);
++
++/**
++ * Allocate an AVHWFramesContext tied to a given device context.
++ *
++ * @param device_ctx a reference to a AVHWDeviceContext. This function will
make
++ * a new reference for internal use, the one passed to the
++ * function remains owned by the caller.
++ * @return a reference to the newly created AVHWFramesContext on success or
NULL
++ * on failure.
++ */
++AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ctx);
++
++/**
++ * Finalize the context before use. This function must be called after the
++ * context is filled with all the required information and before it is
attached
++ * to any frames.
++ *
++ * @param ref a reference to the AVHWFramesContext
++ * @return 0 on success, a negative AVERROR code on failure
++ */
++int av_hwframe_ctx_init(AVBufferRef *ref);
++
++/**
++ * Allocate a new frame attached to the given AVHWFramesContext.
++ *
++ * @param hwframe_ctx a reference to an AVHWFramesContext
++ * @param frame an empty (freshly allocated or unreffed) frame to be filled
with
++ * newly allocated buffers.
++ * @param flags currently unused, should be set to zero
++ * @return 0 on success, a negative AVERROR code on failure
++ */
++int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int
flags);
++
++/**
++ * Copy data to or from a hw surface. At least one of dst/src must have an
++ * AVHWFramesContext attached.
++ *
++ * If src has an AVHWFramesContext attached, then the format of dst (if set)
++ * must use one of the formats returned by
av_hwframe_transfer_get_formats(src,
++ * AV_HWFRAME_TRANSFER_DIRECTION_FROM).
++ * If dst has an AVHWFramesContext attached, then the format of src must
use one
++ * of the formats returned by av_hwframe_transfer_get_formats(dst,
++ * AV_HWFRAME_TRANSFER_DIRECTION_TO)
++ *
++ * dst may be "clean" (i.e. with data/buf pointers unset), in which case the
++ * data buffers will be allocated by this function using
av_frame_get_buffer().
++ * If dst->format is set, then this format will be used, otherwise (when
++ * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be
chosen.
++ *
++ * The two frames must have matching allocated dimensions (i.e. equal to
++ * AVHWFramesContext.width/height), since not all device types support
++ * transferring a sub-rectangle of the whole surface. The display dimensions
++ * (i.e. AVFrame.width/height) may be smaller than the allocated
dimensions, but
++ * also have to be equal for both frames. When the display dimensions are
++ * smaller than the allocated dimensions, the content of the padding in the
++ * destination frame is unspecified.
++ *
++ * @param dst the destination frame. dst is not touched on failure.
++ * @param src the source frame.
++ * @param flags currently unused, should be set to zero
++ * @return 0 on success, a negative AVERROR error code on failure.
++ */
++int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags);
++
++enum AVHWFrameTransferDirection {
++ /**
++ * Transfer the data from the queried hw frame.
++ */
++ AV_HWFRAME_TRANSFER_DIRECTION_FROM,
++
++ /**
++ * Transfer the data to the queried hw frame.
++ */
++ AV_HWFRAME_TRANSFER_DIRECTION_TO,
++};
++
++/**
++ * Get a list of possible source or target formats usable in
++ * av_hwframe_transfer_data().
++ *
++ * @param hwframe_ctx the frame context to obtain the information for
++ * @param dir the direction of the transfer
++ * @param formats the pointer to the output format list will be written
here.
++ * The list is terminated with AV_PIX_FMT_NONE and must be
freed
++ * by the caller when no longer needed using av_free().
++ * If this function returns successfully, the format list
will
++ * have at least one item (not counting the terminator).
++ * On failure, the contents of this pointer are unspecified.
++ * @param flags currently unused, should be set to zero
++ * @return 0 on success, a negative AVERROR code on failure.
++ */
++int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx,
++ enum AVHWFrameTransferDirection dir,
++ enum AVPixelFormat **formats, int
flags);
++
++
++/**
++ * This struct describes the constraints on hardware frames attached to
++ * a given device with a hardware-specific configuration. This is returned
++ * by av_hwdevice_get_hwframe_constraints() and must be freed by
++ * av_hwframe_constraints_free() after use.
++ */
++typedef struct AVHWFramesConstraints {
++ /**
++ * A list of possible values for format in the hw_frames_ctx,
++ * terminated by AV_PIX_FMT_NONE. This member will always be filled.
++ */
++ enum AVPixelFormat *valid_hw_formats;
++
++ /**
++ * A list of possible values for sw_format in the hw_frames_ctx,
++ * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is
++ * not known.
++ */
++ enum AVPixelFormat *valid_sw_formats;
++
++ /**
++ * The minimum size of frames in this hw_frames_ctx.
++ * (Zero if not known.)
++ */
++ int min_width;
++ int min_height;
++
++ /**
++ * The maximum size of frames in this hw_frames_ctx.
++ * (INT_MAX if not known / no limit.)
++ */
++ int max_width;
++ int max_height;
++} AVHWFramesConstraints;
++
++/**
++ * Allocate a HW-specific configuration structure for a given HW device.
++ * After use, the user must free all members as required by the specific
++ * hardware structure being used, then free the structure itself with
++ * av_free().
++ *
++ * @param device_ctx a reference to the associated AVHWDeviceContext.
++ * @return The newly created HW-specific configuration structure on
++ * success or NULL on failure.
++ */
++void *av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx);
++
++/**
++ * Get the constraints on HW frames given a device and the HW-specific
++ * configuration to be used with that device. If no HW-specific
++ * configuration is provided, returns the maximum possible capabilities
++ * of the device.
++ *
++ * @param ref a reference to the associated AVHWDeviceContext.
++ * @param hwconfig a filled HW-specific configuration structure, or NULL
++ * to return the maximum possible capabilities of the device.
++ * @return AVHWFramesConstraints structure describing the constraints
++ * on the device, or NULL if not available.
++ */
++AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref,
++ const void
*hwconfig);
++
++/**
++ * Free an AVHWFrameConstraints structure.
++ *
++ * @param constraints The (filled or unfilled) AVHWFrameConstraints
structure.
++ */
++void av_hwframe_constraints_free(AVHWFramesConstraints **constraints);
++
++
++/**
++ * Flags to apply to frame mappings.
++ */
++enum {
++ /**
++ * The mapping must be readable.
++ */
++ AV_HWFRAME_MAP_READ = 1 << 0,
++ /**
++ * The mapping must be writeable.
++ */
++ AV_HWFRAME_MAP_WRITE = 1 << 1,
++ /**
++ * The mapped frame will be overwritten completely in subsequent
++ * operations, so the current frame data need not be loaded. Any values
++ * which are not overwritten are unspecified.
++ */
++ AV_HWFRAME_MAP_OVERWRITE = 1 << 2,
++ /**
++ * The mapping must be direct. That is, there must not be any copying
in
++ * the map or unmap steps. Note that performance of direct mappings may
++ * be much lower than normal memory.
++ */
++ AV_HWFRAME_MAP_DIRECT = 1 << 3,
++};
++
++/**
++ * Map a hardware frame.
++ *
++ * This has a number of different possible effects, depending on the format
++ * and origin of the src and dst frames. On input, src should be a usable
++ * frame with valid buffers and dst should be blank (typically as just
created
++ * by av_frame_alloc()). src should have an associated hwframe context, and
++ * dst may optionally have a format and associated hwframe context.
++ *
++ * If src was created by mapping a frame from the hwframe context of dst,
++ * then this function undoes the mapping - dst is replaced by a reference to
++ * the frame that src was originally mapped from.
++ *
++ * If both src and dst have an associated hwframe context, then this
function
++ * attempts to map the src frame from its hardware context to that of dst
and
++ * then fill dst with appropriate data to be usable there. This will only
be
++ * possible if the hwframe contexts and associated devices are compatible -
++ * given compatible devices, av_hwframe_ctx_create_derived() can be used to
++ * create a hwframe context for dst in which mapping should be possible.
++ *
++ * If src has a hwframe context but dst does not, then the src frame is
++ * mapped to normal memory and should thereafter be usable as a normal
frame.
++ * If the format is set on dst, then the mapping will attempt to create dst
++ * with that format and fail if it is not possible. If format is unset (is
++ * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most
appropriate
++ * format to use is (probably the sw_format of the src hwframe context).
++ *
++ * A return value of AVERROR(ENOSYS) indicates that the mapping is not
++ * possible with the given arguments and hwframe setup, while other return
++ * values indicate that it failed somehow.
++ *
++ * On failure, the destination frame will be left blank, except for the
++ * hw_frames_ctx/format fields they may have been set by the caller - those
will
++ * be preserved as they were.
++ *
++ * @param dst Destination frame, to contain the mapping.
++ * @param src Source frame, to be mapped.
++ * @param flags Some combination of AV_HWFRAME_MAP_* flags.
++ * @return Zero on success, negative AVERROR code on failure.
++ */
++int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags);
++
++
++/**
++ * Create and initialise an AVHWFramesContext as a mapping of another
existing
++ * AVHWFramesContext on a different device.
++ *
++ * av_hwframe_ctx_init() should not be called after this.
++ *
++ * @param derived_frame_ctx On success, a reference to the newly created
++ * AVHWFramesContext.
++ * @param format The AVPixelFormat for the derived context.
++ * @param derived_device_ctx A reference to the device to create the new
++ * AVHWFramesContext on.
++ * @param source_frame_ctx A reference to an existing AVHWFramesContext
++ * which will be mapped to the derived context.
++ * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the
++ * mapping parameters to apply to frames which are allocated
++ * in the derived device.
++ * @return Zero on success, negative AVERROR code on failure.
++ */
++int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx,
++ enum AVPixelFormat format,
++ AVBufferRef *derived_device_ctx,
++ AVBufferRef *source_frame_ctx,
++ int flags);
++
++#endif /* AVUTIL_HWCONTEXT_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext_drm.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext_drm.h
+copy from
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext_drm.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext_drm.h
+diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext_vaapi.h
+copy from media/ffvpx/libavutil/hwcontext_vaapi.h
+copy to
dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/hwcontext_vaapi.h
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/intfloat.h
+copy from dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/intfloat.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/log.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/log.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/log.h
+@@ -0,0 +1,427 @@
++/*
++ * copyright (c) 2006 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVUTIL_LOG_H
++#define AVUTIL_LOG_H
++
++#include <stdarg.h>
++#include "attributes.h"
++#include "version.h"
++
++typedef enum {
++ AV_CLASS_CATEGORY_NA = 0,
++ AV_CLASS_CATEGORY_INPUT,
++ AV_CLASS_CATEGORY_OUTPUT,
++ AV_CLASS_CATEGORY_MUXER,
++ AV_CLASS_CATEGORY_DEMUXER,
++ AV_CLASS_CATEGORY_ENCODER,
++ AV_CLASS_CATEGORY_DECODER,
++ AV_CLASS_CATEGORY_FILTER,
++ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
++ AV_CLASS_CATEGORY_SWSCALER,
++ AV_CLASS_CATEGORY_SWRESAMPLER,
++ AV_CLASS_CATEGORY_HWDEVICE,
++ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
++ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
++ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
++ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
++ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
++ AV_CLASS_CATEGORY_DEVICE_INPUT,
++ AV_CLASS_CATEGORY_NB ///< not part of ABI/API
++}AVClassCategory;
++
++enum AVClassStateFlags {
++ /**
++ * Object initialization has finished and it is now in the 'runtime'
stage.
++ * This affects e.g. what options can be set on the object (only
++ * AV_OPT_FLAG_RUNTIME_PARAM options can be set on initialized objects).
++ */
++ AV_CLASS_STATE_INITIALIZED = (1 << 0),
++};
++
++#define AV_IS_INPUT_DEVICE(category) \
++ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
++ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
++ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
++
++#define AV_IS_OUTPUT_DEVICE(category) \
++ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
++ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
++ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
++
++struct AVOptionRanges;
++
++/**
++ * Describe the class of an AVClass context structure. That is an
++ * arbitrary struct of which the first field is a pointer to an
++ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
++ */
++typedef struct AVClass {
++ /**
++ * The name of the class; usually it is the same name as the
++ * context structure type to which the AVClass is associated.
++ */
++ const char* class_name;
++
++ /**
++ * A pointer to a function which returns the name of a context
++ * instance ctx associated with the class.
++ */
++ const char* (*item_name)(void* ctx);
++
++ /**
++ * An array of options for the structure or NULL.
++ * When non-NULL, the array must be terminated by an option with a NULL
++ * name.
++ *
++ * @see av_set_default_options()
++ */
++ const struct AVOption *option;
++
++ /**
++ * LIBAVUTIL_VERSION with which this structure was created.
++ * This is used to allow fields to be added to AVClass without requiring
++ * major version bumps everywhere.
++ */
++
++ int version;
++
++ /**
++ * Offset in the structure where the log level offset is stored. The log
++ * level offset is an int added to the log level for logging with this
++ * object as the context.
++ *
++ * 0 means there is no such variable.
++ */
++ int log_level_offset_offset;
++
++ /**
++ * Offset in the structure where a pointer to the parent context for
++ * logging is stored. For example a decoder could pass its
AVCodecContext
++ * to eval as such a parent context, which an ::av_log() implementation
++ * could then leverage to display the parent context.
++ *
++ * When the pointer is NULL, or this offset is zero, the object is
assumed
++ * to have no parent.
++ */
++ int parent_log_context_offset;
++
++ /**
++ * Category used for visualization (like color).
++ *
++ * Only used when ::get_category() is NULL. Use this field when all
++ * instances of this class have the same category, use ::get_category()
++ * otherwise.
++ */
++ AVClassCategory category;
++
++ /**
++ * Callback to return the instance category. Use this callback when
++ * different instances of this class may have different categories,
++ * ::category otherwise.
++ */
++ AVClassCategory (*get_category)(void* ctx);
++
++ /**
++ * Callback to return the supported/allowed ranges.
++ */
++ int (*query_ranges)(struct AVOptionRanges **, void *obj, const char
*key, int flags);
++
++ /**
++ * Return next AVOptions-enabled child or NULL
++ */
++ void* (*child_next)(void *obj, void *prev);
++
++ /**
++ * Iterate over the AVClasses corresponding to potential
AVOptions-enabled
++ * children.
++ *
++ * @param iter pointer to opaque iteration state. The caller must
initialize
++ * *iter to NULL before the first call.
++ * @return AVClass for the next AVOptions-enabled child or NULL if
there are
++ * no more such children.
++ *
++ * @note The difference between ::child_next() and
::child_class_iterate()
++ * is that ::child_next() iterates over _actual_ children of an
++ * _existing_ object instance, while ::child_class_iterate()
iterates
++ * over the classes of all _potential_ children of any possible
++ * instance of this class.
++ */
++ const struct AVClass* (*child_class_iterate)(void **iter);
++
++ /**
++ * When non-zero, offset in the object to an unsigned int holding object
++ * state flags, a combination of AVClassStateFlags values. The flags are
++ * updated by the object to signal its state to the generic code.
++ *
++ * Added in version 59.41.100.
++ */
++ int state_flags_offset;
++} AVClass;
++
++/**
++ * @addtogroup lavu_log
++ *
++ * @{
++ *
++ * @defgroup lavu_log_constants Logging Constants
++ *
++ * @{
++ */
++
++/**
++ * Print no output.
++ */
++#define AV_LOG_QUIET -8
++
++/**
++ * Something went really wrong and we will crash now.
++ */
++#define AV_LOG_PANIC 0
++
++/**
++ * Something went wrong and recovery is not possible.
++ * For example, no header was found for a format which depends
++ * on headers or an illegal combination of parameters is used.
++ */
++#define AV_LOG_FATAL 8
++
++/**
++ * Something went wrong and cannot losslessly be recovered.
++ * However, not all future data is affected.
++ */
++#define AV_LOG_ERROR 16
++
++/**
++ * Something somehow does not look correct. This may or may not
++ * lead to problems. An example would be the use of '-vstrict -2'.
++ */
++#define AV_LOG_WARNING 24
++
++/**
++ * Standard information.
++ */
++#define AV_LOG_INFO 32
++
++/**
++ * Detailed information.
++ */
++#define AV_LOG_VERBOSE 40
++
++/**
++ * Stuff which is only useful for libav* developers.
++ */
++#define AV_LOG_DEBUG 48
++
++/**
++ * Extremely verbose debugging, useful for libav* development.
++ */
++#define AV_LOG_TRACE 56
++
++#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET)
++
++/**
++ * @}
++ */
++
++/**
++ * Sets additional colors for extended debugging sessions.
++ * @code
++ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
++ @endcode
++ * Requires 256color terminal support. Uses outside debugging is not
++ * recommended.
++ */
++#define AV_LOG_C(x) ((x) << 8)
++
++/**
++ * Send the specified message to the log if the level is less than or equal
++ * to the current av_log_level. By default, all logging messages are sent to
++ * stderr. This behavior can be altered by setting a different logging
callback
++ * function.
++ * @see av_log_set_callback
++ *
++ * @param avcl A pointer to an arbitrary struct of which the first field is
a
++ * pointer to an AVClass struct or NULL if general log.
++ * @param level The importance level of the message expressed using a @ref
++ * lavu_log_constants "Logging Constant".
++ * @param fmt The format string (printf-compatible) that specifies how
++ * subsequent arguments are converted to output.
++ */
++void av_log(void *avcl, int level, const char *fmt, ...)
av_printf_format(3, 4);
++
++/**
++ * Send the specified message to the log once with the initial_level and
then with
++ * the subsequent_level. By default, all logging messages are sent to
++ * stderr. This behavior can be altered by setting a different logging
callback
++ * function.
++ * @see av_log
++ *
++ * @param avcl A pointer to an arbitrary struct of which the first field is
a
++ * pointer to an AVClass struct or NULL if general log.
++ * @param initial_level importance level of the message expressed using a
@ref
++ * lavu_log_constants "Logging Constant" for the first occurrence.
++ * @param subsequent_level importance level of the message expressed using
a @ref
++ * lavu_log_constants "Logging Constant" after the first occurrence.
++ * @param fmt The format string (printf-compatible) that specifies how
++ * subsequent arguments are converted to output.
++ * @param state a variable to keep trak of if a message has already been
printed
++ * this must be initialized to 0 before the first use. The same state
++ * must not be accessed by 2 Threads simultaneously.
++ */
++void av_log_once(void* avcl, int initial_level, int subsequent_level, int
*state, const char *fmt, ...) av_printf_format(5, 6);
++
++
++/**
++ * Send the specified message to the log if the level is less than or equal
++ * to the current av_log_level. By default, all logging messages are sent to
++ * stderr. This behavior can be altered by setting a different logging
callback
++ * function.
++ * @see av_log_set_callback
++ *
++ * @param avcl A pointer to an arbitrary struct of which the first field is
a
++ * pointer to an AVClass struct.
++ * @param level The importance level of the message expressed using a @ref
++ * lavu_log_constants "Logging Constant".
++ * @param fmt The format string (printf-compatible) that specifies how
++ * subsequent arguments are converted to output.
++ * @param vl The arguments referenced by the format string.
++ */
++void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
++
++/**
++ * Get the current log level
++ *
++ * @see lavu_log_constants
++ *
++ * @return Current log level
++ */
++int av_log_get_level(void);
++
++/**
++ * Set the log level
++ *
++ * @see lavu_log_constants
++ *
++ * @param level Logging level
++ */
++void av_log_set_level(int level);
++
++/**
++ * Set the logging callback
++ *
++ * @note The callback must be thread safe, even if the application does not
use
++ * threads itself as some codecs are multithreaded.
++ *
++ * @see av_log_default_callback
++ *
++ * @param callback A logging function with a compatible signature.
++ */
++void av_log_set_callback(void (*callback)(void*, int, const char*,
va_list));
++
++/**
++ * Default logging callback
++ *
++ * It prints the message to stderr, optionally colorizing it.
++ *
++ * @param avcl A pointer to an arbitrary struct of which the first field is
a
++ * pointer to an AVClass struct.
++ * @param level The importance level of the message expressed using a @ref
++ * lavu_log_constants "Logging Constant".
++ * @param fmt The format string (printf-compatible) that specifies how
++ * subsequent arguments are converted to output.
++ * @param vl The arguments referenced by the format string.
++ */
++void av_log_default_callback(void *avcl, int level, const char *fmt,
++ va_list vl);
++
++/**
++ * Return the context name
++ *
++ * @param ctx The AVClass context
++ *
++ * @return The AVClass class_name
++ */
++const char* av_default_item_name(void* ctx);
++AVClassCategory av_default_get_category(void *ptr);
++
++/**
++ * Format a line of log the same way as the default callback.
++ * @param line buffer to receive the formatted line
++ * @param line_size size of the buffer
++ * @param print_prefix used to store whether the prefix must be printed;
++ * must point to a persistent integer initially set to
1
++ */
++void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
++ char *line, int line_size, int *print_prefix);
++
++/**
++ * Format a line of log the same way as the default callback.
++ * @param line buffer to receive the formatted line;
++ * may be NULL if line_size is 0
++ * @param line_size size of the buffer; at most line_size-1 characters
will
++ * be written to the buffer, plus one null terminator
++ * @param print_prefix used to store whether the prefix must be printed;
++ * must point to a persistent integer initially set to
1
++ * @return Returns a negative value if an error occurred, otherwise returns
++ * the number of characters that would have been written for a
++ * sufficiently large buffer, not including the terminating null
++ * character. If the return value is not less than line_size, it
means
++ * that the log message was truncated to fit the buffer.
++ */
++int av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl,
++ char *line, int line_size, int *print_prefix);
++
++/**
++ * Skip repeated messages, this requires the user app to use av_log()
instead of
++ * (f)printf as the 2 would otherwise interfere and lead to
++ * "Last message repeated x times" messages below (f)printf messages with
some
++ * bad luck.
++ * Also to receive the last, "last repeated" line if any, the user app must
++ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
++ */
++#define AV_LOG_SKIP_REPEATED 1
++
++/**
++ * Include the log severity in messages originating from codecs.
++ *
++ * Results in messages such as:
++ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
++ */
++#define AV_LOG_PRINT_LEVEL 2
++
++/**
++ * Include system time in log output.
++ */
++#define AV_LOG_PRINT_TIME 4
++
++/**
++ * Include system date and time in log output.
++ */
++#define AV_LOG_PRINT_DATETIME 8
++
++void av_log_set_flags(int arg);
++int av_log_get_flags(void);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_LOG_H */
+diff --git a/media/ffvpx/libavutil/macros.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/macros.h
+copy from media/ffvpx/libavutil/macros.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/macros.h
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/mathematics.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/mathematics.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/mathematics.h
+@@ -0,0 +1,300 @@
++/*
++ * copyright (c) 2005-2012 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * @addtogroup lavu_math
++ * Mathematical utilities for working with timestamp and time base.
++ */
++
++#ifndef AVUTIL_MATHEMATICS_H
++#define AVUTIL_MATHEMATICS_H
++
++#include <stdint.h>
++#include <math.h>
++#include "attributes.h"
++#include "rational.h"
++#include "intfloat.h"
++
++#ifndef M_E
++#define M_E 2.7182818284590452354 /* e */
++#endif
++#ifndef M_Ef
++#define M_Ef 2.7182818284590452354f /* e */
++#endif
++#ifndef M_LN2
++#define M_LN2 0.69314718055994530942 /* log_e 2 */
++#endif
++#ifndef M_LN2f
++#define M_LN2f 0.69314718055994530942f /* log_e 2 */
++#endif
++#ifndef M_LN10
++#define M_LN10 2.30258509299404568402 /* log_e 10 */
++#endif
++#ifndef M_LN10f
++#define M_LN10f 2.30258509299404568402f /* log_e 10 */
++#endif
++#ifndef M_LOG2_10
++#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
++#endif
++#ifndef M_LOG2_10f
++#define M_LOG2_10f 3.32192809488736234787f /* log_2 10 */
++#endif
++#ifndef M_PHI
++#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
++#endif
++#ifndef M_PHIf
++#define M_PHIf 1.61803398874989484820f /* phi / golden ratio */
++#endif
++#ifndef M_PI
++#define M_PI 3.14159265358979323846 /* pi */
++#endif
++#ifndef M_PIf
++#define M_PIf 3.14159265358979323846f /* pi */
++#endif
++#ifndef M_PI_2
++#define M_PI_2 1.57079632679489661923 /* pi/2 */
++#endif
++#ifndef M_PI_2f
++#define M_PI_2f 1.57079632679489661923f /* pi/2 */
++#endif
++#ifndef M_PI_4
++#define M_PI_4 0.78539816339744830962 /* pi/4 */
++#endif
++#ifndef M_PI_4f
++#define M_PI_4f 0.78539816339744830962f /* pi/4 */
++#endif
++#ifndef M_1_PI
++#define M_1_PI 0.31830988618379067154 /* 1/pi */
++#endif
++#ifndef M_1_PIf
++#define M_1_PIf 0.31830988618379067154f /* 1/pi */
++#endif
++#ifndef M_2_PI
++#define M_2_PI 0.63661977236758134308 /* 2/pi */
++#endif
++#ifndef M_2_PIf
++#define M_2_PIf 0.63661977236758134308f /* 2/pi */
++#endif
++#ifndef M_2_SQRTPI
++#define M_2_SQRTPI 1.12837916709551257390 /* 2/sqrt(pi) */
++#endif
++#ifndef M_2_SQRTPIf
++#define M_2_SQRTPIf 1.12837916709551257390f /* 2/sqrt(pi) */
++#endif
++#ifndef M_SQRT1_2
++#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
++#endif
++#ifndef M_SQRT1_2f
++#define M_SQRT1_2f 0.70710678118654752440f /* 1/sqrt(2) */
++#endif
++#ifndef M_SQRT2
++#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
++#endif
++#ifndef M_SQRT2f
++#define M_SQRT2f 1.41421356237309504880f /* sqrt(2) */
++#endif
++#ifndef NAN
++#define NAN av_int2float(0x7fc00000)
++#endif
++#ifndef INFINITY
++#define INFINITY av_int2float(0x7f800000)
++#endif
++
++/**
++ * @addtogroup lavu_math
++ *
++ * @{
++ */
++
++/**
++ * Rounding methods.
++ */
++enum AVRounding {
++ AV_ROUND_ZERO = 0, ///< Round toward zero.
++ AV_ROUND_INF = 1, ///< Round away from zero.
++ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
++ AV_ROUND_UP = 3, ///< Round toward +infinity.
++ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away
from zero.
++ /**
++ * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through
++ * unchanged, avoiding special cases for #AV_NOPTS_VALUE.
++ *
++ * Unlike other values of the enumeration AVRounding, this value is a
++ * bitmask that must be used in conjunction with another value of the
++ * enumeration through a bitwise OR, in order to set behavior for normal
++ * cases.
++ *
++ * @code{.c}
++ * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
++ * // Rescaling 3:
++ * // Calculating 3 * 1 / 2
++ * // 3 / 2 is rounded up to 2
++ * // => 2
++ *
++ * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP |
AV_ROUND_PASS_MINMAX);
++ * // Rescaling AV_NOPTS_VALUE:
++ * // AV_NOPTS_VALUE == INT64_MIN
++ * // AV_NOPTS_VALUE is passed through
++ * // => AV_NOPTS_VALUE
++ * @endcode
++ */
++ AV_ROUND_PASS_MINMAX = 8192,
++};
++
++/**
++ * Compute the greatest common divisor of two integer operands.
++ *
++ * @param a Operand
++ * @param b Operand
++ * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is
>= 0;
++ * if a == 0 and b == 0, returns 0.
++ */
++int64_t av_const av_gcd(int64_t a, int64_t b);
++
++/**
++ * Rescale a 64-bit integer with rounding to nearest.
++ *
++ * The operation is mathematically equivalent to `a * b / c`, but writing
that
++ * directly can overflow.
++ *
++ * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF.
++ *
++ * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd()
++ */
++int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
++
++/**
++ * Rescale a 64-bit integer with specified rounding.
++ *
++ * The operation is mathematically equivalent to `a * b / c`, but writing
that
++ * directly can overflow, and does not support different rounding methods.
++ * If the result is not representable then INT64_MIN is returned.
++ *
++ * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd()
++ */
++int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding
rnd) av_const;
++
++/**
++ * Rescale a 64-bit integer by 2 rational numbers.
++ *
++ * The operation is mathematically equivalent to `a * bq / cq`.
++ *
++ * This function is equivalent to av_rescale_q_rnd() with
#AV_ROUND_NEAR_INF.
++ *
++ * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()
++ */
++int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
++
++/**
++ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
++ *
++ * The operation is mathematically equivalent to `a * bq / cq`.
++ *
++ * @see av_rescale(), av_rescale_rnd(), av_rescale_q()
++ */
++int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
++ enum AVRounding rnd) av_const;
++
++/**
++ * Compare two timestamps each in its own time base.
++ *
++ * @return One of the following values:
++ * - -1 if `ts_a` is before `ts_b`
++ * - 1 if `ts_a` is after `ts_b`
++ * - 0 if they represent the same position
++ *
++ * @warning
++ * The result of the function is undefined if one of the timestamps is
outside
++ * the `int64_t` range when represented in the other's timebase.
++ */
++int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational
tb_b);
++
++/**
++ * Compare the remainders of two integer operands divided by a common
divisor.
++ *
++ * In other words, compare the least significant `log2(mod)` bits of
integers
++ * `a` and `b`.
++ *
++ * @code{.c}
++ * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02
% 0x10 (0x2)
++ * av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02
% 0x20 (0x02)
++ * @endcode
++ *
++ * @param a Operand
++ * @param b Operand
++ * @param mod Divisor; must be a power of 2
++ * @return
++ * - a negative value if `a % mod < b % mod`
++ * - a positive value if `a % mod > b % mod`
++ * - zero if `a % mod == b % mod`
++ */
++int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
++
++/**
++ * Rescale a timestamp while preserving known durations.
++ *
++ * This function is designed to be called per audio packet to scale the
input
++ * timestamp to a different time base. Compared to a simple av_rescale_q()
++ * call, this function is robust against possible inconsistent frame
durations.
++ *
++ * The `last` parameter is a state variable that must be preserved for all
++ * subsequent calls for the same stream. For the first call, `*last` should
be
++ * initialized to #AV_NOPTS_VALUE.
++ *
++ * @param[in] in_tb Input time base
++ * @param[in] in_ts Input timestamp
++ * @param[in] fs_tb Duration time base; typically this is
finer-grained
++ * (greater) than `in_tb` and `out_tb`
++ * @param[in] duration Duration till the next call to this function
(i.e.
++ * duration of the current packet/frame)
++ * @param[in,out] last Pointer to a timestamp expressed in terms of
++ * `fs_tb`, acting as a state variable
++ * @param[in] out_tb Output timebase
++ * @return Timestamp expressed in terms of `out_tb`
++ *
++ * @note In the context of this function, "duration" is in term of samples,
not
++ * seconds.
++ */
++int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational
fs_tb, int duration, int64_t *last, AVRational out_tb);
++
++/**
++ * Add a value to a timestamp.
++ *
++ * This function guarantees that when the same value is repeatedly added
that
++ * no accumulation of rounding errors occurs.
++ *
++ * @param[in] ts Input timestamp
++ * @param[in] ts_tb Input timestamp time base
++ * @param[in] inc Value to be added
++ * @param[in] inc_tb Time base of `inc`
++ */
++int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb,
int64_t inc);
++
++/**
++ * 0th order modified bessel function of the first kind.
++ */
++double av_bessel_i0(double x);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_MATHEMATICS_H */
+diff --git a/media/ffvpx/libavutil/mem.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/mem.h
+copy from media/ffvpx/libavutil/mem.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/mem.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/pixfmt.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/pixfmt.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/pixfmt.h
+@@ -0,0 +1,797 @@
++/*
++ * copyright (c) 2006 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++#ifndef AVUTIL_PIXFMT_H
++#define AVUTIL_PIXFMT_H
++
++/**
++ * @file
++ * pixel format definitions
++ */
++
++#include "libavutil/avconfig.h"
++#include "version.h"
++
++#define AVPALETTE_SIZE 1024
++#define AVPALETTE_COUNT 256
++
++/**
++ * Maximum number of planes in any pixel format.
++ * This should be used when a maximum is needed, but code should not
++ * be written to require a maximum for no good reason.
++ */
++#define AV_VIDEO_MAX_PLANES 4
++
++/**
++ * Pixel format.
++ *
++ * @note
++ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
++ * color is put together as:
++ * (A << 24) | (R << 16) | (G << 8) | B
++ * This is stored as BGRA on little-endian CPU architectures and ARGB on
++ * big-endian CPUs.
++ *
++ * @note
++ * If the resolution is not a multiple of the chroma subsampling factor
++ * then the chroma plane resolution must be rounded up.
++ *
++ * @par
++ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the
palettized
++ * image data is stored in AVFrame.data[0]. The palette is transported in
++ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
++ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
++ * also endian-specific). Note also that the individual RGB32 palette
++ * components stored in AVFrame.data[1] should be in the range 0..255.
++ * This is important as many custom PAL8 video codecs that were designed
++ * to run on the IBM VGA graphics adapter use 6-bit palette components.
++ *
++ * @par
++ * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like
++ * for pal8. This palette is filled in automatically by the function
++ * allocating the picture.
++ */
++enum AVPixelFormat {
++ AV_PIX_FMT_NONE = -1,
++ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample
per 2x2 Y samples)
++ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
++ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
++ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
++ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample
per 2x1 Y samples)
++ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample
per 1x1 Y samples)
++ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample
per 4x4 Y samples)
++ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample
per 4x1 Y samples)
++ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
++ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is
black, in each byte pixels are ordered from the msb to the lsb
++ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is
white, in each byte pixels are ordered from the msb to the lsb
++ AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
++ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG),
deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
++ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG),
deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
++ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG),
deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
++ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
++ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
++ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
++ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B
2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the
one composed by the 4 msb bits
++ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
++ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
++ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R
2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the
one composed by the 4 msb bits
++ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
++ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1
plane for the UV components, which are interleaved (first byte U and the
following byte V)
++ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
++
++ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
++ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
++ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
++ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
++
++ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
++ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
++ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y
samples)
++ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG),
deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
++ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample
per 2x2 Y & A samples)
++ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B,
the 2-byte value for each R/G/B component is stored as big-endian
++ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B,
the 2-byte value for each R/G/B component is stored as little-endian
++
++ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G
5B(lsb), big-endian
++ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G
5B(lsb), little-endian
++ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G
5B(lsb), big-endian , X=unused/undefined
++ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G
5B(lsb), little-endian, X=unused/undefined
++
++ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G
5R(lsb), big-endian
++ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G
5R(lsb), little-endian
++ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G
5R(lsb), big-endian , X=unused/undefined
++ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G
5R(lsb), little-endian, X=unused/undefined
++
++ /**
++ * Hardware acceleration through VA-API, data[3] contains a
++ * VASurfaceID.
++ */
++ AV_PIX_FMT_VAAPI,
++
++ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb
sample per 2x2 Y samples), little-endian
++ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb
sample per 2x2 Y samples), big-endian
++ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb
sample per 2x1 Y samples), little-endian
++ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb
sample per 2x1 Y samples), big-endian
++ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb
sample per 1x1 Y samples), little-endian
++ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb
sample per 1x1 Y samples), big-endian
++ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2,
Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
++
++ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G
4B(lsb), little-endian, X=unused/undefined
++ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G
4B(lsb), big-endian, X=unused/undefined
++ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G
4R(lsb), little-endian, X=unused/undefined
++ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G
4R(lsb), big-endian, X=unused/undefined
++ AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
++
++ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
++ AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
++
++ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R,
the 2-byte value for each R/G/B component is stored as big-endian
++ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R,
the 2-byte value for each R/G/B component is stored as little-endian
++
++ /**
++ * The following 12 formats have the disadvantage of needing 1 format
for each bit depth.
++ * Notice that each 9/10 bits sample is stored in 16 bits with extra
padding.
++ * If you want to support multiple bit depths, then using
AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
++ */
++ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb
sample per 2x2 Y samples), big-endian
++ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb
sample per 2x2 Y samples), little-endian
++ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample
per 2x2 Y samples), big-endian
++ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample
per 2x2 Y samples), little-endian
++ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample
per 2x1 Y samples), big-endian
++ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample
per 2x1 Y samples), little-endian
++ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample
per 1x1 Y samples), big-endian
++ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample
per 1x1 Y samples), little-endian
++ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample
per 1x1 Y samples), big-endian
++ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample
per 1x1 Y samples), little-endian
++ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample
per 2x1 Y samples), big-endian
++ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample
per 2x1 Y samples), little-endian
++ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
++ AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
++ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
++ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
++ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
++ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
++ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
++ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
++ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample
per 2x1 Y & A samples)
++ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample
per 1x1 Y & A samples)
++ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb
sample per 2x2 Y & A samples), big-endian
++ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb
sample per 2x2 Y & A samples), little-endian
++ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample
per 2x1 Y & A samples), big-endian
++ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample
per 2x1 Y & A samples), little-endian
++ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample
per 1x1 Y & A samples), big-endian
++ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample
per 1x1 Y & A samples), little-endian
++ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample
per 2x2 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample
per 2x2 Y & A samples, little-endian)
++ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample
per 2x1 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample
per 2x1 Y & A samples, little-endian)
++ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample
per 1x1 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample
per 1x1 Y & A samples, little-endian)
++ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample
per 2x2 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample
per 2x2 Y & A samples, little-endian)
++ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample
per 2x1 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample
per 2x1 Y & A samples, little-endian)
++ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample
per 1x1 Y & A samples, big-endian)
++ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample
per 1x1 Y & A samples, little-endian)
++
++ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU,
Picture.data[3] contains a VdpVideoSurface
++
++ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y,
12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4
lower bits are set to 0
++ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y,
12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4
lower bits are set to 0
++ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1
Cr & Cb sample per 2x1 Y samples)
++ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1
Cr & Cb sample per 2x1 Y samples), little-endian
++ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1
Cr & Cb sample per 2x1 Y samples), big-endian
++
++ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G,
16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
++ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G,
16B, 16A, the 2-byte value for each R/G/B/A component is stored as
little-endian
++ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G,
16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
++ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G,
16R, 16A, the 2-byte value for each R/G/B/A component is stored as
little-endian
++
++ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
++
++ AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
++ AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha
(little-endian)
++
++ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
++ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
++ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
++ /**
++ * HW acceleration through QSV, data[3] contains a pointer to the
++ * mfxFrameSurface1 structure.
++ *
++ * Before FFmpeg 5.0:
++ * mfxFrameSurface1.Data.MemId contains a pointer when importing
++ * the following frames as QSV frames:
++ *
++ * VAAPI:
++ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
++ *
++ * DXVA2:
++ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
++ *
++ * FFmpeg 5.0 and above:
++ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
++ * structure when importing the following frames as QSV frames:
++ *
++ * VAAPI:
++ * mfxHDLPair.first contains a VASurfaceID pointer.
++ * mfxHDLPair.second is always MFX_INFINITE.
++ *
++ * DXVA2:
++ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
++ * mfxHDLPair.second is always MFX_INFINITE.
++ *
++ * D3D11:
++ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
++ * mfxHDLPair.second contains the texture array index of the frame if
the
++ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is
a
++ * normal texture.
++ */
++ AV_PIX_FMT_QSV,
++ /**
++ * HW acceleration though MMAL, data[3] contains a pointer to the
++ * MMAL_BUFFER_HEADER_T structure.
++ */
++ AV_PIX_FMT_MMAL,
++
++ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old
API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
++
++ /**
++ * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
++ * exactly as for system memory frames.
++ */
++ AV_PIX_FMT_CUDA,
++
++ AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB...
X=unused/undefined
++ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX...
X=unused/undefined
++ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR...
X=unused/undefined
++ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX...
X=unused/undefined
++
++ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample
per 2x2 Y samples), big-endian
++ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample
per 2x2 Y samples), little-endian
++ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample
per 2x2 Y samples), big-endian
++ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample
per 2x2 Y samples), little-endian
++ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample
per 2x1 Y samples), big-endian
++ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample
per 2x1 Y samples), little-endian
++ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample
per 2x1 Y samples), big-endian
++ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample
per 2x1 Y samples), little-endian
++ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample
per 1x1 Y samples), big-endian
++ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample
per 1x1 Y samples), little-endian
++ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample
per 1x1 Y samples), big-endian
++ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample
per 1x1 Y samples), little-endian
++ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
++ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
++ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
++ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
++ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample
per 4x1 Y samples) full scale (JPEG), deprecated in favor of
AV_PIX_FMT_YUV411P and setting color_range
++
++ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even
line), 8-bit samples
++ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even
line), 8-bit samples
++ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even
line), 8-bit samples
++ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even
line), 8-bit samples
++ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even
line), 16-bit samples, little-endian
++ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even
line), 16-bit samples, big-endian
++ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even
line), 16-bit samples, little-endian
++ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even
line), 16-bit samples, big-endian
++ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even
line), 16-bit samples, little-endian
++ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even
line), 16-bit samples, big-endian
++ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even
line), 16-bit samples, little-endian
++ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even
line), 16-bit samples, big-endian
++
++ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample
per 1x2 Y samples), little-endian
++ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample
per 1x2 Y samples), big-endian
++ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample
per 1x2 Y samples), little-endian
++ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample
per 1x2 Y samples), big-endian
++ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample
per 1x1 Y & A samples), little-endian
++ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample
per 1x1 Y & A samples), big-endian
++
++ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
++
++ AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in
the high bits, zeros in the low bits, little-endian
++ AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in
the high bits, zeros in the low bits, big-endian
++
++ AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
++ AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
++
++ AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
++ AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
++
++ AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
++
++ AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
++ AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
++ AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
++ AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
++
++ AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component,
little-endian
++ AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
++
++ /**
++ * Hardware surfaces for Direct3D11.
++ *
++ * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new
D3D11
++ * hwaccel API and filtering support AV_PIX_FMT_D3D11 only.
++ *
++ * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the
++ * texture array index of the frame as intptr_t if the ID3D11Texture2D
is
++ * an array texture (or always 0 if it's a normal texture).
++ */
++ AV_PIX_FMT_D3D11,
++
++ AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian
++ AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian
++
++ AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4,
96bpp, big-endian
++ AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4,
96bpp, little-endian
++ AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA
4:4:4:4, 128bpp, big-endian
++ AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA
4:4:4:4, 128bpp, little-endian
++
++ /**
++ * DRM-managed buffers exposed through PRIME buffer sharing.
++ *
++ * data[0] points to an AVDRMFrameDescriptor.
++ */
++ AV_PIX_FMT_DRM_PRIME,
++ /**
++ * Hardware surfaces for OpenCL.
++ *
++ * data[i] contain 2D image objects (typed in C as cl_mem, used
++ * in OpenCL as image2d_t) for each plane of the surface.
++ */
++ AV_PIX_FMT_OPENCL,
++
++ AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
++ AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
++
++ AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp,
big-endian
++ AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp,
little-endian
++
++ AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample
per 2x1 Y samples), 12b alpha, big-endian
++ AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample
per 2x1 Y samples), 12b alpha, little-endian
++ AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample
per 1x1 Y samples), 12b alpha, big-endian
++ AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample
per 1x1 Y samples), 12b alpha, little-endian
++
++ AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1
plane for the UV components, which are interleaved (first byte U and the
following byte V)
++ AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
++
++ /**
++ * Vulkan hardware images.
++ *
++ * data[0] points to an AVVkFrame
++ */
++ AV_PIX_FMT_VULKAN,
++
++ AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data
in the high bits, big-endian
++ AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data
in the high bits, little-endian
++
++ AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
10B(lsb), little-endian, X=unused/undefined
++ AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G
10B(lsb), big-endian, X=unused/undefined
++ AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
10R(lsb), little-endian, X=unused/undefined
++ AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G
10R(lsb), big-endian, X=unused/undefined
++
++ AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data
in the high bits, big-endian
++ AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data
in the high bits, little-endian
++
++ AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data
in the high bits, big-endian
++ AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data
in the high bits, little-endian
++
++ AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp,
big-endian
++ AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp,
little-endian
++
++ AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp,
big-endian
++ AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp,
little-endian
++
++ AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4:4, 32bpp (1 Cr & Cb
sample per 1x1 Y & A samples), VUYAVUYA...
++
++ AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA
16:16:16:16, 64bpp, RGBARGBA..., big-endian
++ AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA
16:16:16:16, 64bpp, RGBARGBA..., little-endian
++
++ AV_PIX_FMT_VUYX, ///< packed VUYX 4:4:4:4, 32bpp, Variant of
VUYA where alpha channel is left undefined
++
++ AV_PIX_FMT_P012LE, ///< like NV12, with 12bpp per component, data
in the high bits, zeros in the low bits, little-endian
++ AV_PIX_FMT_P012BE, ///< like NV12, with 12bpp per component, data
in the high bits, zeros in the low bits, big-endian
++
++ AV_PIX_FMT_Y212BE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data
in the high bits, zeros in the low bits, big-endian
++ AV_PIX_FMT_Y212LE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data
in the high bits, zeros in the low bits, little-endian
++
++ AV_PIX_FMT_XV30BE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y
10U(lsb), big-endian, variant of Y410 where alpha channel is left undefined
++ AV_PIX_FMT_XV30LE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y
10U(lsb), little-endian, variant of Y410 where alpha channel is left undefined
++
++ AV_PIX_FMT_XV36BE, ///< packed XVYU 4:4:4, 48bpp, data in the high
bits, zeros in the low bits, big-endian, variant of Y412 where alpha channel
is left undefined
++ AV_PIX_FMT_XV36LE, ///< packed XVYU 4:4:4, 48bpp, data in the high
bits, zeros in the low bits, little-endian, variant of Y412 where alpha
channel is left undefined
++
++ AV_PIX_FMT_RGBF32BE, ///< IEEE-754 single precision packed RGB
32:32:32, 96bpp, RGBRGB..., big-endian
++ AV_PIX_FMT_RGBF32LE, ///< IEEE-754 single precision packed RGB
32:32:32, 96bpp, RGBRGB..., little-endian
++
++ AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA
32:32:32:32, 128bpp, RGBARGBA..., big-endian
++ AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA
32:32:32:32, 128bpp, RGBARGBA..., little-endian
++
++ AV_PIX_FMT_P212BE, ///< interleaved chroma YUV 4:2:2, 24bpp, data
in the high bits, big-endian
++ AV_PIX_FMT_P212LE, ///< interleaved chroma YUV 4:2:2, 24bpp, data
in the high bits, little-endian
++
++ AV_PIX_FMT_P412BE, ///< interleaved chroma YUV 4:4:4, 36bpp, data
in the high bits, big-endian
++ AV_PIX_FMT_P412LE, ///< interleaved chroma YUV 4:4:4, 36bpp, data
in the high bits, little-endian
++
++ AV_PIX_FMT_GBRAP14BE, ///< planar GBR 4:4:4:4 56bpp, big-endian
++ AV_PIX_FMT_GBRAP14LE, ///< planar GBR 4:4:4:4 56bpp, little-endian
++
++ /**
++ * Hardware surfaces for Direct3D 12.
++ *
++ * data[0] points to an AVD3D12VAFrame
++ */
++ AV_PIX_FMT_D3D12,
++
++ AV_PIX_FMT_AYUV, ///< packed AYUV 4:4:4:4, 32bpp (1 Cr & Cb
sample per 1x1 Y & A samples), AYUVAYUV...
++
++ AV_PIX_FMT_UYVA, ///< packed UYVA 4:4:4:4, 32bpp (1 Cr & Cb
sample per 1x1 Y & A samples), UYVAUYVA...
++
++ AV_PIX_FMT_VYU444, ///< packed VYU 4:4:4, 24bpp (1 Cr & Cb sample
per 1x1 Y), VYUVYU...
++
++ AV_PIX_FMT_V30XBE, ///< packed VYUX 4:4:4 like XV30, 32bpp,
(msb)10V 10Y 10U 2X(lsb), big-endian
++ AV_PIX_FMT_V30XLE, ///< packed VYUX 4:4:4 like XV30, 32bpp,
(msb)10V 10Y 10U 2X(lsb), little-endian
++
++ AV_PIX_FMT_RGBF16BE, ///< IEEE-754 half precision packed RGB
16:16:16, 48bpp, RGBRGB..., big-endian
++ AV_PIX_FMT_RGBF16LE, ///< IEEE-754 half precision packed RGB
16:16:16, 48bpp, RGBRGB..., little-endian
++
++ AV_PIX_FMT_RGBA128BE, ///< packed RGBA 32:32:32:32, 128bpp,
RGBARGBA..., big-endian
++ AV_PIX_FMT_RGBA128LE, ///< packed RGBA 32:32:32:32, 128bpp,
RGBARGBA..., little-endian
++
++ AV_PIX_FMT_RGB96BE, ///< packed RGBA 32:32:32, 96bpp, RGBRGB...,
big-endian
++ AV_PIX_FMT_RGB96LE, ///< packed RGBA 32:32:32, 96bpp, RGBRGB...,
little-endian
++
++ AV_PIX_FMT_Y216BE, ///< packed YUV 4:2:2 like YUYV422, 32bpp,
big-endian
++ AV_PIX_FMT_Y216LE, ///< packed YUV 4:2:2 like YUYV422, 32bpp,
little-endian
++
++ AV_PIX_FMT_XV48BE, ///< packed XVYU 4:4:4, 64bpp, big-endian,
variant of Y416 where alpha channel is left undefined
++ AV_PIX_FMT_XV48LE, ///< packed XVYU 4:4:4, 64bpp, little-endian,
variant of Y416 where alpha channel is left undefined
++
++ AV_PIX_FMT_GBRPF16BE, ///< IEEE-754 half precision planer GBR 4:4:4,
48bpp, big-endian
++ AV_PIX_FMT_GBRPF16LE, ///< IEEE-754 half precision planer GBR 4:4:4,
48bpp, little-endian
++ AV_PIX_FMT_GBRAPF16BE, ///< IEEE-754 half precision planar GBRA
4:4:4:4, 64bpp, big-endian
++ AV_PIX_FMT_GBRAPF16LE, ///< IEEE-754 half precision planar GBRA
4:4:4:4, 64bpp, little-endian
++
++ AV_PIX_FMT_GRAYF16BE, ///< IEEE-754 half precision Y, 16bpp, big-endian
++ AV_PIX_FMT_GRAYF16LE, ///< IEEE-754 half precision Y, 16bpp,
little-endian
++
++ /**
++ * HW acceleration through AMF. data[0] contain AMFSurface pointer
++ */
++ AV_PIX_FMT_AMF_SURFACE,
++
++ AV_PIX_FMT_GRAY32BE, ///< Y , 32bpp, big-endian
++ AV_PIX_FMT_GRAY32LE, ///< Y , 32bpp, little-endian
++
++ AV_PIX_FMT_YAF32BE, ///< IEEE-754 single precision packed YA, 32 bits
gray, 32 bits alpha, 64bpp, big-endian
++ AV_PIX_FMT_YAF32LE, ///< IEEE-754 single precision packed YA, 32 bits
gray, 32 bits alpha, 64bpp, little-endian
++
++ AV_PIX_FMT_YAF16BE, ///< IEEE-754 half precision packed YA, 16 bits
gray, 16 bits alpha, 32bpp, big-endian
++ AV_PIX_FMT_YAF16LE, ///< IEEE-754 half precision packed YA, 16 bits
gray, 16 bits alpha, 32bpp, little-endian
++
++ AV_PIX_FMT_GBRAP32BE, ///< planar GBRA 4:4:4:4 128bpp, big-endian
++ AV_PIX_FMT_GBRAP32LE, ///< planar GBRA 4:4:4:4 128bpp, little-endian
++
++ AV_PIX_FMT_YUV444P10MSBBE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb
sample per 1x1 Y samples), lowest bits zero, big-endian
++ AV_PIX_FMT_YUV444P10MSBLE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb
sample per 1x1 Y samples), lowest bits zero, little-endian
++ AV_PIX_FMT_YUV444P12MSBBE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb
sample per 1x1 Y samples), lowest bits zero, big-endian
++ AV_PIX_FMT_YUV444P12MSBLE, ///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb
sample per 1x1 Y samples), lowest bits zero, little-endian
++ AV_PIX_FMT_GBRP10MSBBE, ///< planar GBR 4:4:4 30bpp, lowest bits
zero, big-endian
++ AV_PIX_FMT_GBRP10MSBLE, ///< planar GBR 4:4:4 30bpp, lowest bits
zero, little-endian
++ AV_PIX_FMT_GBRP12MSBBE, ///< planar GBR 4:4:4 36bpp, lowest bits
zero, big-endian
++ AV_PIX_FMT_GBRP12MSBLE, ///< planar GBR 4:4:4 36bpp, lowest bits
zero, little-endian
++
++ AV_PIX_FMT_OHCODEC, /// hardware decoding through openharmony
++
++ AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if
you want to link with shared libav* because the number of formats might
differ between versions
++};
++
++#if AV_HAVE_BIGENDIAN
++# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
++#else
++# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
++#endif
++
++#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
++#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
++#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
++#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
++#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
++#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
++
++#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
++#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
++#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
++#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
++#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
++#define AV_PIX_FMT_GRAY32 AV_PIX_FMT_NE(GRAY32BE, GRAY32LE)
++#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
++#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
++#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
++#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
++#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
++#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
++#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
++#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
++#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
++#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
++#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
++
++#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
++#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
++#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
++#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
++#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
++#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
++#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
++#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
++#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
++#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
++#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
++#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
++#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
++#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
++#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
++#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
++#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
++
++#define AV_PIX_FMT_YUV444P10MSB AV_PIX_FMT_NE(YUV444P10MSBBE,
YUV444P10MSBLE)
++#define AV_PIX_FMT_YUV444P12MSB AV_PIX_FMT_NE(YUV444P12MSBBE,
YUV444P12MSBLE)
++
++#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
++#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
++#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
++#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
++#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
++#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE)
++#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE)
++#define AV_PIX_FMT_GBRAP14 AV_PIX_FMT_NE(GBRAP14BE, GBRAP14LE)
++#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
++#define AV_PIX_FMT_GBRAP32 AV_PIX_FMT_NE(GBRAP32BE, GBRAP32LE)
++
++#define AV_PIX_FMT_GBRP10MSB AV_PIX_FMT_NE(GBRP10MSBBE, GBRP10MSBLE)
++#define AV_PIX_FMT_GBRP12MSB AV_PIX_FMT_NE(GBRP12MSBBE, GBRP12MSBLE)
++
++#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE,
BAYER_BGGR16LE)
++#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE,
BAYER_RGGB16LE)
++#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE,
BAYER_GBRG16LE)
++#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE,
BAYER_GRBG16LE)
++
++#define AV_PIX_FMT_GBRPF16 AV_PIX_FMT_NE(GBRPF16BE, GBRPF16LE)
++#define AV_PIX_FMT_GBRAPF16 AV_PIX_FMT_NE(GBRAPF16BE, GBRAPF16LE)
++#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
++#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
++
++#define AV_PIX_FMT_GRAYF16 AV_PIX_FMT_NE(GRAYF16BE, GRAYF16LE)
++#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
++
++#define AV_PIX_FMT_YAF16 AV_PIX_FMT_NE(YAF16BE, YAF16LE)
++#define AV_PIX_FMT_YAF32 AV_PIX_FMT_NE(YAF32BE, YAF32LE)
++
++#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
++#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
++#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
++#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
++#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
++#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
++#define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE)
++#define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE)
++#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
++#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
++#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
++
++#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
++#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
++#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
++#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE)
++#define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE)
++#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE)
++
++#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
++#define AV_PIX_FMT_Y212 AV_PIX_FMT_NE(Y212BE, Y212LE)
++#define AV_PIX_FMT_Y216 AV_PIX_FMT_NE(Y216BE, Y216LE)
++#define AV_PIX_FMT_XV30 AV_PIX_FMT_NE(XV30BE, XV30LE)
++#define AV_PIX_FMT_XV36 AV_PIX_FMT_NE(XV36BE, XV36LE)
++#define AV_PIX_FMT_XV48 AV_PIX_FMT_NE(XV48BE, XV48LE)
++#define AV_PIX_FMT_V30X AV_PIX_FMT_NE(V30XBE, V30XLE)
++#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
++#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
++
++#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
++#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
++#define AV_PIX_FMT_P212 AV_PIX_FMT_NE(P212BE, P212LE)
++#define AV_PIX_FMT_P412 AV_PIX_FMT_NE(P412BE, P412LE)
++#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
++#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
++
++#define AV_PIX_FMT_RGBF16 AV_PIX_FMT_NE(RGBF16BE, RGBF16LE)
++#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
++
++#define AV_PIX_FMT_RGBF32 AV_PIX_FMT_NE(RGBF32BE, RGBF32LE)
++#define AV_PIX_FMT_RGBAF32 AV_PIX_FMT_NE(RGBAF32BE, RGBAF32LE)
++
++#define AV_PIX_FMT_RGB96 AV_PIX_FMT_NE(RGB96BE, RGB96LE)
++#define AV_PIX_FMT_RGBA128 AV_PIX_FMT_NE(RGBA128BE, RGBA128LE)
++
++/**
++ * Chromaticity coordinates of the source primaries.
++ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause
8.1 and ITU-T H.273.
++ */
++enum AVColorPrimaries {
++ AVCOL_PRI_RESERVED0 = 0,
++ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 /
SMPTE RP 177 Annex B
++ AVCOL_PRI_UNSPECIFIED = 2,
++ AVCOL_PRI_RESERVED = 3,
++ AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal
Regulations 73.682 (a)(20)
++
++ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358
625 / ITU-R BT1700 625 PAL & SECAM
++ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358
525 / ITU-R BT1700 NTSC
++ AVCOL_PRI_SMPTE240M = 7, ///< identical to above, also called "SMPTE
C" even though it uses D65
++ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
++ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
++ AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
++ AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
++ AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
++ AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 /
Display P3
++ AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one
of JEDEC P22 group phosphors
++ AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
++ AVCOL_PRI_NB ///< Not part of ABI
++};
++
++/**
++ * Color Transfer Characteristic.
++ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause
8.2.
++ */
++enum AVColorTransferCharacteristic {
++ AVCOL_TRC_RESERVED0 = 0,
++ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
++ AVCOL_TRC_UNSPECIFIED = 2,
++ AVCOL_TRC_RESERVED = 3,
++ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625
PAL & SECAM
++ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
++ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R
BT1358 525 or 625 / ITU-R BT1700 NTSC
++ AVCOL_TRC_SMPTE240M = 7,
++ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
++ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic
(100:1 range)"
++ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic
(100 * Sqrt(10) : 1 range)"
++ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
++ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
++ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
++ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system
++ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system
++ AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and
16-bit systems
++ AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084,
++ AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1
++ AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428,
++ AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid
log-gamma"
++ AVCOL_TRC_NB ///< Not part of ABI
++};
++
++/**
++ * YUV colorspace type.
++ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause
8.3.
++ */
++enum AVColorSpace {
++ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR,
also IEC 61966-2-1 (sRGB), YZX and ST 428-1
++ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4
xvYCC709 / derived in SMPTE RP 177 Annex B
++ AVCOL_SPC_UNSPECIFIED = 2,
++ AVCOL_SPC_RESERVED = 3, ///< reserved for future use by ITU-T and
ISO/IEC just like 15-255 are
++ AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal
Regulations 73.682 (a)(20)
++ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358
625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
++ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358
525 / ITU-R BT1700 NTSC / functionally identical to above
++ AVCOL_SPC_SMPTE240M = 7, ///< derived from 170M primaries and D65
white point, 170M is derived from BT470 System M's primaries
++ AVCOL_SPC_YCGCO = 8, ///< used by Dirac / VC-2 and H.264 FRext,
see ITU-T SG16
++ AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
++ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance
system
++ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
++ AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x
++ AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived
non-constant luminance system
++ AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant
luminance system
++ AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp
++ AVCOL_SPC_IPT_C2 = 15, ///< SMPTE ST 2128, IPT-C2
++ AVCOL_SPC_YCGCO_RE = 16, ///< YCgCo-R, even addition of bits
++ AVCOL_SPC_YCGCO_RO = 17, ///< YCgCo-R, odd addition of bits
++ AVCOL_SPC_NB ///< Not part of ABI
++};
++
++/**
++ * Visual content value range.
++ *
++ * These values are based on definitions that can be found in multiple
++ * specifications, such as ITU-T BT.709 (3.4 - Quantization of RGB,
luminance
++ * and colour-difference signals), ITU-T BT.2020 (Table 5 - Digital
++ * Representation) as well as ITU-T BT.2100 (Table 9 - Digital 10- and
12-bit
++ * integer representation). At the time of writing, the BT.2100 one is
++ * recommended, as it also defines the full range representation.
++ *
++ * Common definitions:
++ * - For RGB and luma planes such as Y in YCbCr and I in ICtCp,
++ * 'E' is the original value in range of 0.0 to 1.0.
++ * - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original
++ * value in range of -0.5 to 0.5.
++ * - 'n' is the output bit depth.
++ * - For additional definitions such as rounding and clipping to valid n
++ * bit unsigned integer range, please refer to BT.2100 (Table 9).
++ */
++enum AVColorRange {
++ AVCOL_RANGE_UNSPECIFIED = 0,
++
++ /**
++ * Narrow or limited range content.
++ *
++ * - For luma planes:
++ *
++ * (219 * E + 16) * 2^(n-8)
++ *
++ * F.ex. the range of 16-235 for 8 bits
++ *
++ * - For chroma planes:
++ *
++ * (224 * E + 128) * 2^(n-8)
++ *
++ * F.ex. the range of 16-240 for 8 bits
++ */
++ AVCOL_RANGE_MPEG = 1,
++
++ /**
++ * Full range content.
++ *
++ * - For RGB and luma planes:
++ *
++ * (2^n - 1) * E
++ *
++ * F.ex. the range of 0-255 for 8 bits
++ *
++ * - For chroma planes:
++ *
++ * (2^n - 1) * E + 2^(n - 1)
++ *
++ * F.ex. the range of 1-255 for 8 bits
++ */
++ AVCOL_RANGE_JPEG = 2,
++ AVCOL_RANGE_NB ///< Not part of ABI
++};
++
++/**
++ * Location of chroma samples.
++ *
++ * Illustration showing the location of the first (top left) chroma sample
of the
++ * image, the left shows only luma, the right
++ * shows the location of the chroma sample, the 2 could be imagined to
overlay
++ * each other but are drawn separately due to limitations of ASCII
++ *
++ * 1st 2nd 1st 2nd horizontal luma sample positions
++ * v v v v
++ * ______ ______
++ *1st luma line > |X X ... |3 4 X ... X are luma samples,
++ * | |1 2 1-6 are possible chroma
positions
++ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown
position
++ */
++enum AVChromaLocation {
++ AVCHROMA_LOC_UNSPECIFIED = 0,
++ AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for
4:2:0
++ AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0
++ AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV
4:1:1), mpeg2 4:2:2
++ AVCHROMA_LOC_TOP = 4,
++ AVCHROMA_LOC_BOTTOMLEFT = 5,
++ AVCHROMA_LOC_BOTTOM = 6,
++ AVCHROMA_LOC_NB ///< Not part of ABI
++};
++
++#endif /* AVUTIL_PIXFMT_H */
+diff --git
a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/rational.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/rational.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/rational.h
+@@ -0,0 +1,225 @@
++/*
++ * rational numbers
++ * Copyright (c) 2003 Michael Niedermayer <michaelni AT gmx.at>
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * @ingroup lavu_math_rational
++ * Utilities for rational number calculation.
++ * @author Michael Niedermayer <michaelni AT gmx.at>
++ */
++
++#ifndef AVUTIL_RATIONAL_H
++#define AVUTIL_RATIONAL_H
++
++#include <stdint.h>
++#include <limits.h>
++#include "attributes.h"
++
++/**
++ * @defgroup lavu_math_rational AVRational
++ * @ingroup lavu_math
++ * Rational number calculation.
++ *
++ * While rational numbers can be expressed as floating-point numbers, the
++ * conversion process is a lossy one, so are floating-point operations. On
the
++ * other hand, the nature of FFmpeg demands highly accurate calculation of
++ * timestamps. This set of rational number utilities serves as a generic
++ * interface for manipulating rational numbers as pairs of numerators and
++ * denominators.
++ *
++ * Many of the functions that operate on AVRational's have the suffix `_q`,
in
++ * reference to the mathematical symbol "ℚ" (Q) which denotes the set of all
++ * rational numbers.
++ *
++ * @{
++ */
++
++/**
++ * Rational number (pair of numerator and denominator).
++ */
++typedef struct AVRational{
++ int num; ///< Numerator
++ int den; ///< Denominator
++} AVRational;
++
++/**
++ * Create an AVRational.
++ *
++ * Useful for compilers that do not support compound literals.
++ *
++ * @note The return value is not reduced.
++ * @see av_reduce()
++ */
++static inline AVRational av_make_q(int num, int den)
++{
++ AVRational r = { num, den };
++ return r;
++}
++
++/**
++ * Compare two rationals.
++ *
++ * @param a First rational
++ * @param b Second rational
++ *
++ * @return One of the following values:
++ * - 0 if `a == b`
++ * - 1 if `a > b`
++ * - -1 if `a < b`
++ * - `INT_MIN` if one of the values is of the form `0 / 0`
++ */
++static inline int av_cmp_q(AVRational a, AVRational b){
++ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
++
++ if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;
++ else if(b.den && a.den) return 0;
++ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
++ else return INT_MIN;
++}
++
++/**
++ * Convert an AVRational to a `double`.
++ * @param a AVRational to convert
++ * @return `a` in floating-point form
++ * @see av_d2q()
++ */
++static inline double av_q2d(AVRational a){
++ return a.num / (double) a.den;
++}
++
++/**
++ * Reduce a fraction.
++ *
++ * This is useful for framerate calculations.
++ *
++ * @param[out] dst_num Destination numerator
++ * @param[out] dst_den Destination denominator
++ * @param[in] num Source numerator
++ * @param[in] den Source denominator
++ * @param[in] max Maximum allowed values for `dst_num` & `dst_den`
++ * @return 1 if the operation is exact, 0 otherwise
++ */
++int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t
max);
++
++/**
++ * Multiply two rationals.
++ * @param b First rational
++ * @param c Second rational
++ * @return b*c
++ */
++AVRational av_mul_q(AVRational b, AVRational c) av_const;
++
++/**
++ * Divide one rational by another.
++ * @param b First rational
++ * @param c Second rational
++ * @return b/c
++ */
++AVRational av_div_q(AVRational b, AVRational c) av_const;
++
++/**
++ * Add two rationals.
++ * @param b First rational
++ * @param c Second rational
++ * @return b+c
++ */
++AVRational av_add_q(AVRational b, AVRational c) av_const;
++
++/**
++ * Subtract one rational from another.
++ * @param b First rational
++ * @param c Second rational
++ * @return b-c
++ */
++AVRational av_sub_q(AVRational b, AVRational c) av_const;
++
++/**
++ * Invert a rational.
++ * @param q value
++ * @return 1 / q
++ */
++static av_always_inline AVRational av_inv_q(AVRational q)
++{
++ AVRational r = { q.den, q.num };
++ return r;
++}
++
++/**
++ * Convert a double precision floating point number to a rational.
++ *
++ * In case of infinity, the returned value is expressed as `{1, 0}` or
++ * `{-1, 0}` depending on the sign.
++ *
++ * In general rational numbers with |num| <= 1<<26 && |den| <= 1<<26
++ * can be recovered exactly from their double representation.
++ * (no exceptions were found within 1B random ones)
++ *
++ * @param d `double` to convert
++ * @param max Maximum allowed numerator and denominator
++ * @return `d` in AVRational form
++ * @see av_q2d()
++ */
++AVRational av_d2q(double d, int max) av_const;
++
++/**
++ * Find which of the two rationals is closer to another rational.
++ *
++ * @param q Rational to be compared against
++ * @param q1 Rational to be tested
++ * @param q2 Rational to be tested
++ * @return One of the following values:
++ * - 1 if `q1` is nearer to `q` than `q2`
++ * - -1 if `q2` is nearer to `q` than `q1`
++ * - 0 if they have the same distance
++ */
++int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
++
++/**
++ * Find the value in a list of rationals nearest a given reference rational.
++ *
++ * @param q Reference rational
++ * @param q_list Array of rationals terminated by `{0, 0}`
++ * @return Index of the nearest value found in the array
++ */
++int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
++
++/**
++ * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point
++ * format.
++ *
++ * @param q Rational to be converted
++ * @return Equivalent floating-point value, expressed as an unsigned 32-bit
++ * integer.
++ * @note The returned value is platform-indepedant.
++ */
++uint32_t av_q2intfloat(AVRational q);
++
++/**
++ * Return the best rational so that a and b are multiple of it.
++ * If the resulting denominator is larger than max_den, return def.
++ */
++AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational
def);
++
++/**
++ * @}
++ */
++
++#endif /* AVUTIL_RATIONAL_H */
+diff --git a/media/ffvpx/libavutil/samplefmt.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/samplefmt.h
+copy from media/ffvpx/libavutil/samplefmt.h
+copy to dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/samplefmt.h
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/version.h
b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/version.h
+new file mode 100644
+--- /dev/null
++++ b/dom/media/platforms/ffmpeg/ffmpeg62/include/libavutil/version.h
+@@ -0,0 +1,119 @@
++/*
++ * copyright (c) 2003 Fabrice Bellard
++ *
++ * This file is part of FFmpeg.
++ *
++ * FFmpeg is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * FFmpeg is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
++ */
++
++/**
++ * @file
++ * @ingroup lavu
++ * Libavutil version macros
++ */
++
++#ifndef AVUTIL_VERSION_H
++#define AVUTIL_VERSION_H
++
++#include "macros.h"
++
++/**
++ * @addtogroup version_utils
++ *
++ * Useful to check and match library version in order to maintain
++ * backward compatibility.
++ *
++ * The FFmpeg libraries follow a versioning scheme very similar to
++ * Semantic Versioning (http://semver.org/)
++ * The difference is that the component called PATCH is called MICRO in
FFmpeg
++ * and its value is reset to 100 instead of 0 to keep it above or equal to
100.
++ * Also we do not increase MICRO for every bugfix or change in git master.
++ *
++ * Prior to FFmpeg 3.2 point releases did not change any lib version number
to
++ * avoid aliassing different git master checkouts.
++ * Starting with FFmpeg 3.2, the released library versions will occupy
++ * a separate MAJOR.MINOR that is not used on the master development branch.
++ * That is if we branch a release of master 55.10.123 we will bump to
55.11.100
++ * for the release and master will continue at 55.12.100 after it. Each new
++ * point release will then bump the MICRO improving the usefulness of the
lib
++ * versions.
++ *
++ * @{
++ */
++
++#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c))
++#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
++#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
++
++/**
++ * Extract version components from the full ::AV_VERSION_INT int as returned
++ * by functions like ::avformat_version() and ::avcodec_version()
++ */
++#define AV_VERSION_MAJOR(a) ((a) >> 16)
++#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8)
++#define AV_VERSION_MICRO(a) ((a) & 0xFF)
++
++/**
++ * @}
++ */
++
++/**
++ * @defgroup lavu_ver Version and Build diagnostics
++ *
++ * Macros and function useful to check at compile time and at runtime
++ * which version of libavutil is in use.
++ *
++ * @{
++ */
++
++#define LIBAVUTIL_VERSION_MAJOR 60
++#define LIBAVUTIL_VERSION_MINOR 8
++#define LIBAVUTIL_VERSION_MICRO 100
++
++#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
++ LIBAVUTIL_VERSION_MINOR, \
++ LIBAVUTIL_VERSION_MICRO)
++#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
++ LIBAVUTIL_VERSION_MINOR, \
++ LIBAVUTIL_VERSION_MICRO)
++#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
++
++#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
++
++/**
++ * @defgroup lavu_depr_guards Deprecation Guards
++ * FF_API_* defines may be placed below to indicate public API that will be
++ * dropped at a future version bump. The defines themselves are not part of
++ * the public API and may change, break or disappear at any time.
++ *
++ * @note, when bumping the major version it is recommended to manually
++ * disable each FF_API_* in its own commit instead of disabling them all
++ * at once through the bump. This improves the git bisect-ability of the
change.
++ *
++ * @{
++ */
++
++#define FF_API_MOD_UINTP2 (LIBAVUTIL_VERSION_MAJOR < 61)
++#define FF_API_RISCV_FD_ZBA (LIBAVUTIL_VERSION_MAJOR < 61)
++#define FF_API_VULKAN_FIXED_QUEUES (LIBAVUTIL_VERSION_MAJOR < 61)
++#define FF_API_OPT_INT_LIST (LIBAVUTIL_VERSION_MAJOR < 61)
++#define FF_API_OPT_PTR (LIBAVUTIL_VERSION_MAJOR < 61)
++
++/**
++ * @}
++ * @}
++ */
++
++#endif /* AVUTIL_VERSION_H */
+diff --git a/dom/media/platforms/ffmpeg/ffmpeg61/moz.build
b/dom/media/platforms/ffmpeg/ffmpeg62/moz.build
+copy from dom/media/platforms/ffmpeg/ffmpeg61/moz.build
+copy to dom/media/platforms/ffmpeg/ffmpeg62/moz.build
+diff --git a/tools/rewriting/ThirdPartyPaths.txt
b/tools/rewriting/ThirdPartyPaths.txt
+--- a/tools/rewriting/ThirdPartyPaths.txt
++++ b/tools/rewriting/ThirdPartyPaths.txt
+@@ -31,16 +31,17 @@ dom/media/gmp/rlz/
+ dom/media/gmp/widevine-adapter/content_decryption_module_export.h
+ dom/media/gmp/widevine-adapter/content_decryption_module_ext.h
+ dom/media/gmp/widevine-adapter/content_decryption_module.h
+ dom/media/platforms/ffmpeg/ffmpeg57/
+ dom/media/platforms/ffmpeg/ffmpeg58/
+ dom/media/platforms/ffmpeg/ffmpeg59/
+ dom/media/platforms/ffmpeg/ffmpeg60/
+ dom/media/platforms/ffmpeg/ffmpeg61/
++dom/media/platforms/ffmpeg/ffmpeg62/
+ dom/media/platforms/ffmpeg/libav53/
+ dom/media/platforms/ffmpeg/libav54/
+ dom/media/platforms/ffmpeg/libav55/
+ dom/media/webaudio/test/blink/
+ dom/media/webrtc/tests/mochitests/helpers_from_wpt/sdp.js
+ dom/media/webrtc/transport/third_party/
+ dom/media/webspeech/recognition/endpointer.cc
+ dom/media/webspeech/recognition/endpointer.h
+
diff --git
a/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p2.patch
b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p2.patch
new file mode 100644
index 0000000..b92f659
--- /dev/null
+++ b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p2.patch
@@ -0,0 +1,309 @@
+
+# HG changeset patch
+# User Landry Breuil <landry AT openbsd.org>
+# Date 1763998693 0
+# Node ID f23d592e80b9f929c85df96449b057503fd3a53c
+# Parent 19e0df8033b24b543bd24343bea7cee0865c47f2
+Bug 1962139 - Adapt the macros/defines for function detection in ffmpeg8
r=media-playback-reviewers,alwu
+
+avcodec_close() is the only function that was removed from the API
+
+Differential Revision: https://phabricator.services.mozilla.com/D272253
+
+
+diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+@@ -75,28 +75,30 @@ FFmpegLibWrapper::LinkResult FFmpegLibWr
+ AV_FUNC_54 = 1 << 1,
+ AV_FUNC_55 = 1 << 2,
+ AV_FUNC_56 = 1 << 3,
+ AV_FUNC_57 = 1 << 4,
+ AV_FUNC_58 = 1 << 5,
+ AV_FUNC_59 = 1 << 6,
+ AV_FUNC_60 = 1 << 7,
+ AV_FUNC_61 = 1 << 8,
++ AV_FUNC_62 = 1 << 9,
+ AV_FUNC_AVUTIL_53 = AV_FUNC_53 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_54 = AV_FUNC_54 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_55 = AV_FUNC_55 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_56 = AV_FUNC_56 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_57 = AV_FUNC_57 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_58 = AV_FUNC_58 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_59 = AV_FUNC_59 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_60 = AV_FUNC_60 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_61 = AV_FUNC_61 | AV_FUNC_AVUTIL_MASK,
++ AV_FUNC_AVUTIL_62 = AV_FUNC_62 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVCODEC_ALL = AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 | AV_FUNC_56
|
+ AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60
|
+- AV_FUNC_61,
++ AV_FUNC_61 | AV_FUNC_62,
+ AV_FUNC_AVUTIL_ALL = AV_FUNC_AVCODEC_ALL | AV_FUNC_AVUTIL_MASK
+ };
+
+ switch (macro) {
+ case 53:
+ version = AV_FUNC_53;
+ break;
+ case 54:
+@@ -118,16 +120,19 @@ FFmpegLibWrapper::LinkResult FFmpegLibWr
+ version = AV_FUNC_59;
+ break;
+ case 60:
+ version = AV_FUNC_60;
+ break;
+ case 61:
+ version = AV_FUNC_61;
+ break;
++ case 62:
++ version = AV_FUNC_62;
++ break;
+ default:
+ FFMPEGV_LOG("Unknown avcodec version: %d", macro);
+ Unlink();
+ return isFFMpeg ? ((macro > 57) ?
LinkResult::UnknownFutureFFMpegVersion
+ :
LinkResult::UnknownOlderFFMpegVersion)
+ // All LibAV versions<54.35.1 are blocked, therefore
we
+ // must be dealing with a later one.
+ : LinkResult::UnknownFutureLibAVVersion;
+@@ -158,158 +163,180 @@ FFmpegLibWrapper::LinkResult FFmpegLibWr
+ Unlink(); \
+ return isFFMpeg ? LinkResult::MissingFFMpegFunction \
+ : LinkResult::MissingLibAVFunction; \
+ }
+
+ AV_FUNC(av_lockmgr_register, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
+ AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
+ AV_FUNC(avcodec_alloc_context3, AV_FUNC_AVCODEC_ALL)
+- AV_FUNC(avcodec_close, AV_FUNC_AVCODEC_ALL)
++ AV_FUNC(avcodec_close, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 | AV_FUNC_56 |
++ AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60 |
++ AV_FUNC_61)
++
+ AV_FUNC(avcodec_decode_audio4, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
+ AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
+ AV_FUNC(avcodec_decode_video2, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
+ AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
+ AV_FUNC(avcodec_find_decoder, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_find_decoder_by_name,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC(avcodec_find_encoder, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_find_encoder_by_name,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC(avcodec_flush_buffers, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_open2, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_register_all, AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 |
+ AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58)
+ AV_FUNC(av_init_packet, (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 |
AV_FUNC_58 |
+ AV_FUNC_59 | AV_FUNC_60))
+ AV_FUNC(av_parser_init, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_parser_close, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_parser_parse2, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_align_dimensions, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
+ AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
+ AV_FUNC(avcodec_free_frame, AV_FUNC_54)
+ AV_FUNC(avcodec_send_packet,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC(avcodec_receive_packet,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC(avcodec_send_frame, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 |
AV_FUNC_61)
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
++ AV_FUNC(avcodec_send_frame,
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC(avcodec_receive_frame,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC(avcodec_default_get_buffer2,
+ (AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
+- AV_FUNC_60 | AV_FUNC_61))
+- AV_FUNC(av_packet_alloc,
+- (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
+- AV_FUNC(av_packet_unref,
+- (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
+- AV_FUNC(av_packet_free,
+- (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61))
++ AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62))
++ AV_FUNC(av_packet_alloc, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60 |
++ AV_FUNC_61 | AV_FUNC_62))
++ AV_FUNC(av_packet_unref, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60 |
++ AV_FUNC_61 | AV_FUNC_62))
++ AV_FUNC(av_packet_free, (AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60 |
++ AV_FUNC_61 | AV_FUNC_62))
+ AV_FUNC(avcodec_descriptor_get, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_log_set_callback, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_frame_alloc,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_frame_clone,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_frame_free,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_frame_unref,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_frame_get_buffer,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_frame_make_writable,
+ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61))
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62))
+ AV_FUNC(av_image_check_size, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC_OPTION(av_channel_layout_default,
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION(av_channel_layout_from_mask,
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
+- AV_FUNC_OPTION(av_channel_layout_copy, AV_FUNC_AVUTIL_60 |
AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62)
++ AV_FUNC_OPTION(av_channel_layout_copy,
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION(av_buffer_get_opaque,
+ (AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58
|
+- AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
AV_FUNC_AVUTIL_61))
+- AV_FUNC(
+- av_buffer_create,
+- (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+- AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
AV_FUNC_61))
++ AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61
|
++ AV_FUNC_AVUTIL_62))
++ AV_FUNC(av_buffer_create,
++ (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
++ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60 |
++ AV_FUNC_61 | AV_FUNC_62))
+ AV_FUNC_OPTION(av_frame_get_colorspace,
+ AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58)
+ AV_FUNC_OPTION(av_frame_get_color_range,
+ AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 |
+ AV_FUNC_AVUTIL_58)
+ AV_FUNC(av_strerror, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+ AV_FUNC(av_get_sample_fmt_name, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_dict_set, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_dict_free, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_opt_set, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_opt_set_double, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_opt_set_int, AV_FUNC_AVUTIL_ALL)
+- AV_FUNC(avcodec_free_context,
+- AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(avcodec_get_hw_config,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_codec_is_decoder,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_codec_is_encoder,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_codec_iterate,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC(avcodec_free_context, AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
++ AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(avcodec_get_hw_config, AV_FUNC_58 | AV_FUNC_59 |
++ AV_FUNC_60 | AV_FUNC_61 |
++ AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(av_codec_is_decoder, AV_FUNC_58 | AV_FUNC_59 |
++ AV_FUNC_60 | AV_FUNC_61 |
++ AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(av_codec_is_encoder, AV_FUNC_58 | AV_FUNC_59 |
++ AV_FUNC_60 | AV_FUNC_61 |
++ AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(av_codec_iterate, AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60 |
++ AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_init,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_alloc,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
+- AV_FUNC_OPTION_SILENT(av_buffer_ref, AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
++ AV_FUNC_OPTION_SILENT(
++ av_buffer_ref, AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
AV_FUNC_AVUTIL_60 |
++ AV_FUNC_AVUTIL_61 | AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION_SILENT(av_buffer_unref, AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 |
+ AV_FUNC_AVUTIL_60 |
+- AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION_SILENT(av_hwframe_ctx_alloc,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+ AV_FUNC_OPTION_SILENT(av_hwframe_ctx_init,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+
+ #ifdef MOZ_WIDGET_GTK
+- AV_FUNC_OPTION_SILENT(av_hwdevice_hwconfig_alloc,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_hwdevice_get_hwframe_constraints,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_hwframe_constraints_free,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_hwframe_transfer_get_formats,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
+- AV_FUNC_OPTION_SILENT(av_hwdevice_ctx_create_derived,
+- AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_OPTION_SILENT(av_hwdevice_hwconfig_alloc, AV_FUNC_58 | AV_FUNC_59
|
++ AV_FUNC_60 |
++ AV_FUNC_61 |
AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(
++ av_hwdevice_get_hwframe_constraints,
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(
++ av_hwframe_constraints_free,
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(
++ av_hwframe_transfer_get_formats,
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
++ AV_FUNC_OPTION_SILENT(
++ av_hwdevice_ctx_create_derived,
++ AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60 | AV_FUNC_61 | AV_FUNC_62)
+ AV_FUNC_OPTION_SILENT(avcodec_get_name, AV_FUNC_57 | AV_FUNC_58 |
AV_FUNC_59 |
+- AV_FUNC_60 | AV_FUNC_61)
++ AV_FUNC_60 | AV_FUNC_61 |
++ AV_FUNC_62)
+ AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string,
+ AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 |
+- AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61)
++ AV_FUNC_AVUTIL_60 | AV_FUNC_AVUTIL_61 |
++ AV_FUNC_AVUTIL_62)
+ #endif
+
+ AV_FUNC_OPTION(av_tx_init, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC_OPTION(av_tx_uninit, AV_FUNC_AVUTIL_ALL)
+
+ #ifdef MOZ_WIDGET_ANDROID
+ AV_FUNC(av_mediacodec_release_buffer, AV_FUNC_AVCODEC_ALL);
+ AV_FUNC(moz_avcodec_mediacodec_is_eos, AV_FUNC_AVCODEC_ALL);
+
diff --git
a/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p3.patch
b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p3.patch
new file mode 100644
index 0000000..c801d27
--- /dev/null
+++ b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p3.patch
@@ -0,0 +1,248 @@
+
+# HG changeset patch
+# User Landry Breuil <landry AT openbsd.org>
+# Date 1763998694 0
+# Node ID 568e210639f79dda7823455a3fded66735466344
+# Parent f23d592e80b9f929c85df96449b057503fd3a53c
+Bug 1962139 - Adapt FFmpegVideoDecoder for ffmpeg8
r=media-playback-reviewers,alwu
+
+Provide IsKeyFrame() helper handling various versions, and use it where
+appropriate. Also, In ffmpeg 8 the offset is to be found in the packet
struct,
+not anymore in the frame struct, so use a variable local to DoDecode()
+
+Differential Revision: https://phabricator.services.mozilla.com/D272254
+
+
+diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+@@ -1060,16 +1060,24 @@ void FFmpegVideoDecoder<LIBAV_VER>::Init
+ static int64_t GetFramePts(const AVFrame* aFrame) {
+ #if LIBAVCODEC_VERSION_MAJOR > 57
+ return aFrame->pts;
+ #else
+ return aFrame->pkt_pts;
+ #endif
+ }
+
++static bool IsKeyFrame(const AVFrame* aFrame) {
++#if LIBAVCODEC_VERSION_MAJOR > 61
++ return !!(aFrame->flags & AV_FRAME_FLAG_KEY);
++#else
++ return !!aFrame->key_frame;
++#endif
++}
++
+ #if LIBAVCODEC_VERSION_MAJOR >= 58
+ void FFmpegVideoDecoder<LIBAV_VER>::DecodeStats::DecodeStart() {
+ mDecodeStart = TimeStamp::Now();
+ }
+
+ bool FFmpegVideoDecoder<LIBAV_VER>::DecodeStats::IsDecodingSlow() const {
+ return mDecodedFramesLate > mMaxLateDecodedFrames;
+ }
+@@ -1268,16 +1276,22 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ // Release unused VA-API surfaces before avcodec_receive_frame() as
+ // ffmpeg recycles VASurface for HW decoding.
+ if (mVideoFramePool) {
+ mVideoFramePool->ReleaseUnusedVAAPIFrames();
+ }
+ # endif
+
+ int res = mLib->avcodec_receive_frame(mCodecContext, mFrame);
++ int64_t fpos =
++# if LIBAVCODEC_VERSION_MAJOR > 61
++ packet->pos;
++# else
++ mFrame->pkt_pos;
++# endif
+ if (res == int(AVERROR_EOF)) {
+ if (MaybeQueueDrain(aResults)) {
+ FFMPEG_LOG(" Output buffer shortage.");
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ FFMPEG_LOG(" End of stream.");
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+ }
+@@ -1303,53 +1317,52 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ PROFILER_MARKER_TEXT("FFmpegVideoDecoder::DoDecode",
MEDIA_PLAYBACK, {},
+ "Fallback to SW decode");
+ FFMPEG_LOG(" HW decoding is slow, switching back to SW decode");
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("HW decoding is slow, switching back to SW
decode"));
+ }
+ if (mUsingV4L2) {
+- rv = CreateImageV4L2(mFrame->pkt_pos, GetFramePts(mFrame),
+- Duration(mFrame), aResults);
++ rv = CreateImageV4L2(fpos, GetFramePts(mFrame), Duration(mFrame),
++ aResults);
+ } else {
+- rv = CreateImageVAAPI(mFrame->pkt_pos, GetFramePts(mFrame),
+- Duration(mFrame), aResults);
++ rv = CreateImageVAAPI(fpos, GetFramePts(mFrame), Duration(mFrame),
++ aResults);
+ }
+
+ // If VA-API/V4L2 playback failed, just quit. Decoder is going to be
+ // restarted without hardware acceleration
+ if (NS_FAILED(rv)) {
+ // Explicitly remove dmabuf surface pool as it's configured
+ // for VA-API/V4L2 support.
+ mVideoFramePool = nullptr;
+ return rv;
+ }
+ # elif defined(MOZ_ENABLE_D3D11VA)
+ mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
+- rv = CreateImageD3D11(mFrame->pkt_pos, GetFramePts(mFrame),
+- Duration(mFrame), aResults);
++ rv = CreateImageD3D11(fpos, GetFramePts(mFrame), Duration(mFrame),
++ aResults);
+ # elif defined(MOZ_WIDGET_ANDROID)
+ InputInfo info(aSample);
+ info.mTimecode = -1;
+ TakeInputInfo(mFrame, info);
+ mDecodeStats.UpdateDecodeTimes(info.mDuration);
+- rv = CreateImageMediaCodec(mFrame->pkt_pos, GetFramePts(mFrame),
+- info.mTimecode, info.mDuration, aResults);
++ rv = CreateImageMediaCodec(fpos, GetFramePts(mFrame), info.mTimecode,
++ info.mDuration, aResults);
+ # else
+ mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("No HW decoding implementation!"));
+ # endif
+ } else
+ # endif
+ {
+ mDecodeStats.UpdateDecodeTimes(Duration(mFrame));
+- rv = CreateImage(mFrame->pkt_pos, GetFramePts(mFrame),
Duration(mFrame),
+- aResults);
++ rv = CreateImage(fpos, GetFramePts(mFrame), Duration(mFrame),
aResults);
+ }
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ RecordFrame(aSample, aResults.LastElement());
+ if (aGotFrame) {
+ *aGotFrame = true;
+@@ -1699,17 +1712,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ !requiresCopy) {
+ RefPtr<ImageBufferWrapper> wrapper = static_cast<ImageBufferWrapper*>(
+ mLib->av_buffer_get_opaque(mFrame->buf[0]));
+ MOZ_ASSERT(wrapper);
+ FFMPEG_LOGV("Create a video data from a shmem image=%p", wrapper.get());
+ v = VideoData::CreateFromImage(
+ mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
+ TimeUnit::FromMicroseconds(aDuration), wrapper->AsImage(),
+- !!mFrame->key_frame, TimeUnit::FromMicroseconds(-1));
++ IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(-1));
+ }
+ #endif
+ #if defined(MOZ_WIDGET_GTK) && defined(MOZ_USE_HWDECODE)
+ if (mUploadSWDecodeToDMABuf) {
+ MOZ_DIAGNOSTIC_ASSERT(!v);
+ if (!mVideoFramePool) {
+ mVideoFramePool = MakeUnique<VideoFramePool<LIBAV_VER>>(10);
+ }
+@@ -1737,17 +1750,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ ? ColorSpace2ToString(mInfo.mColorPrimaries.value())
+ : "unknown",
+ mInfo.mTransferFunction
+ ? TransferFunctionToString(mInfo.mTransferFunction.value())
+ : "unknown");
+ v = VideoData::CreateFromImage(
+ mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
+ TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
+- !!mFrame->key_frame, TimeUnit::FromMicroseconds(-1));
++ IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(-1));
+ } else {
+ FFMPEG_LOG("Failed to uploaded video data to DMABuf");
+ }
+ } else {
+ FFMPEG_LOG("Failed to convert PlanarYCbCrData");
+ }
+ }
+ #endif
+@@ -1757,17 +1770,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ if (NS_FAILED(ret.Code())) {
+ FFMPEG_LOG("%s: %s", __func__, ret.Message().get());
+ return ret;
+ }
+ }
+ Result<already_AddRefed<VideoData>, MediaResult> r =
+ VideoData::CreateAndCopyData(
+ mInfo, mImageContainer, aOffset,
TimeUnit::FromMicroseconds(aPts),
+- TimeUnit::FromMicroseconds(aDuration), b, !!mFrame->key_frame,
++ TimeUnit::FromMicroseconds(aDuration), b, IsKeyFrame(mFrame),
+ TimeUnit::FromMicroseconds(mFrame->pkt_dts),
+ mInfo.ScaledImageRect(mFrame->width, mFrame->height),
+ mImageAllocator);
+ if (r.isErr()) {
+ return r.unwrapErr();
+ }
+ v = r.unwrap();
+ }
+@@ -1835,21 +1848,20 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ aPts, mFrame->pkt_dts, aDuration,
+ YUVColorSpaceToString(GetFrameColorSpace()),
+ mInfo.mColorPrimaries
+ ? ColorSpace2ToString(mInfo.mColorPrimaries.value())
+ : "unknown",
+ mInfo.mTransferFunction
+ ? TransferFunctionToString(mInfo.mTransferFunction.value())
+ : "unknown");
+-
+ RefPtr<VideoData> vp = VideoData::CreateFromImage(
+ mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
+ TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
+- !!mFrame->key_frame, TimeUnit::FromMicroseconds(mFrame->pkt_dts));
++ IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(mFrame->pkt_dts));
+
+ if (!vp) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VAAPI image allocation error"));
+ }
+
+ aResults.AppendElement(std::move(vp));
+ return NS_OK;
+@@ -1888,17 +1900,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ RESULT_DETAIL("V4L2 dmabuf allocation error"));
+ }
+ surface->SetYUVColorSpace(GetFrameColorSpace());
+ surface->SetColorRange(GetFrameColorRange());
+
+ RefPtr<VideoData> vp = VideoData::CreateFromImage(
+ mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
+ TimeUnit::FromMicroseconds(aDuration), surface->GetAsImage(),
+- !!mFrame->key_frame, TimeUnit::FromMicroseconds(mFrame->pkt_dts));
++ IsKeyFrame(mFrame), TimeUnit::FromMicroseconds(mFrame->pkt_dts));
+
+ if (!vp) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("V4L2 image creation error"));
+ }
+
+ aResults.AppendElement(std::move(vp));
+ return NS_OK;
+@@ -2342,17 +2354,17 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
+ nsPrintfCString msg("Failed to create a D3D image");
+ FFMPEG_LOG("%s", msg.get());
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, msg);
+ }
+ MOZ_ASSERT(image);
+
+ RefPtr<VideoData> v = VideoData::CreateFromImage(
+ mInfo.mDisplay, aOffset, TimeUnit::FromMicroseconds(aPts),
+- TimeUnit::FromMicroseconds(aDuration), image, !!mFrame->key_frame,
++ TimeUnit::FromMicroseconds(aDuration), image, IsKeyFrame(mFrame),
+ TimeUnit::FromMicroseconds(mFrame->pkt_dts));
+ if (!v) {
+ nsPrintfCString msg("D3D image allocation error");
+ FFMPEG_LOG("%s", msg.get());
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, msg);
+ }
+ aResults.AppendElement(std::move(v));
+ return NS_OK;
+
diff --git
a/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p4.patch
b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p4.patch
new file mode 100644
index 0000000..4f4d769
--- /dev/null
+++ b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p4.patch
@@ -0,0 +1,45 @@
+
+# HG changeset patch
+# User Landry Breuil <landry AT openbsd.org>
+# Date 1763998694 0
+# Node ID eb8352183323a78e60ab16855494e4e07e41314b
+# Parent 568e210639f79dda7823455a3fded66735466344
+Bug 1962139 - FF_PROFILE defines were renamed to AV_PROFILE in ffmpeg8
r=media-playback-reviewers,alwu
+
+Differential Revision: https://phabricator.services.mozilla.com/D272255
+
+
+diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegVideoEncoder.cpp
+@@ -113,21 +113,29 @@ struct H264Setting {
+ };
+
+ struct H264LiteralSetting {
+ int mValue;
+ nsLiteralCString mString;
+ H264Setting get() const { return {mValue, mString.AsString()}; }
+ };
+
++#if LIBAVCODEC_VERSION_MAJOR < 62
+ static constexpr H264LiteralSetting H264Profiles[]{
+ {FF_PROFILE_H264_BASELINE, "baseline"_ns},
+ {FF_PROFILE_H264_MAIN, "main"_ns},
+ {FF_PROFILE_H264_EXTENDED, ""_ns},
+ {FF_PROFILE_H264_HIGH, "high"_ns}};
++#else
++static constexpr H264LiteralSetting H264Profiles[]{
++ {AV_PROFILE_H264_BASELINE, "baseline"_ns},
++ {AV_PROFILE_H264_MAIN, "main"_ns},
++ {AV_PROFILE_H264_EXTENDED, ""_ns},
++ {AV_PROFILE_H264_HIGH, "high"_ns}};
++#endif
+
+ static Maybe<H264Setting> GetH264Profile(const H264_PROFILE& aProfile) {
+ switch (aProfile) {
+ case H264_PROFILE::H264_PROFILE_UNKNOWN:
+ return Nothing();
+ case H264_PROFILE::H264_PROFILE_BASE:
+ return Some(H264Profiles[0].get());
+ case H264_PROFILE::H264_PROFILE_MAIN:
+
diff --git
a/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p5.patch
b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p5.patch
new file mode 100644
index 0000000..022e035
--- /dev/null
+++ b/http/firefox/patches/0022-bmo-1962139-vendor-ffmpeg-8-headers-p5.patch
@@ -0,0 +1,129 @@
+
+# HG changeset patch
+# User Landry Breuil <landry AT openbsd.org>
+# Date 1763998694 0
+# Node ID b1660dee0419e8ef66ecccfcc1bba59b7e5b5bc7
+# Parent eb8352183323a78e60ab16855494e4e07e41314b
+Bug 1962139 - Enable ffmpeg8 support in FFmpegRuntimeLinker.cpp
r=media-playback-reviewers,padenot
+
+Differential Revision: https://phabricator.services.mozilla.com/D272256
+
+
+diff --git a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
+@@ -29,29 +29,31 @@ class FFmpegEncoderModule {
+ static already_AddRefed<PlatformEncoderModule> Create(FFmpegLibWrapper*);
+ };
+
+ static FFmpegLibWrapper sLibAV;
+
+ static const char* sLibs[] = {
+ // clang-format off
+ #if defined(XP_DARWIN)
++ "libavcodec.62.dylib",
+ "libavcodec.61.dylib",
+ "libavcodec.60.dylib",
+ "libavcodec.59.dylib",
+ "libavcodec.58.dylib",
+ "libavcodec.57.dylib",
+ "libavcodec.56.dylib",
+ "libavcodec.55.dylib",
+ "libavcodec.54.dylib",
+ "libavcodec.53.dylib",
+ #elif defined(XP_OPENBSD)
+- "libavcodec.so", // OpenBSD hardly controls the major/minor library
version
++ "libavcodec.so", // OpenBSD port controls the major/minor library version
+ // of ffmpeg and update it regulary on ABI/API changes
+ #else
++ "libavcodec.so.62",
+ "libavcodec.so.61",
+ "libavcodec.so.60",
+ "libavcodec.so.59",
+ "libavcodec.so.58",
+ "libavcodec-ffmpeg.so.58",
+ "libavcodec-ffmpeg.so.57",
+ "libavcodec-ffmpeg.so.56",
+ "libavcodec.so.57",
+@@ -114,16 +116,19 @@ bool FFmpegRuntimeLinker::Init() {
+ FFmpegDecoderModule<59>::Init(&sLibAV);
+ break;
+ case 60:
+ FFmpegDecoderModule<60>::Init(&sLibAV);
+ break;
+ case 61:
+ FFmpegDecoderModule<61>::Init(&sLibAV);
+ break;
++ case 62:
++ FFmpegDecoderModule<62>::Init(&sLibAV);
++ break;
+ }
+ return true;
+ case FFmpegLibWrapper::LinkResult::NoProvidedLib:
+ MOZ_ASSERT_UNREACHABLE("Incorrectly-setup sLibAV");
+ break;
+ case FFmpegLibWrapper::LinkResult::NoAVCodecVersion:
+ if (sLinkStatus > LinkStatus_INVALID_CANDIDATE) {
+ sLinkStatus = LinkStatus_INVALID_CANDIDATE;
+@@ -204,16 +209,19 @@ already_AddRefed<PlatformDecoderModule>
+ module = FFmpegDecoderModule<59>::Create(&sLibAV);
+ break;
+ case 60:
+ module = FFmpegDecoderModule<60>::Create(&sLibAV);
+ break;
+ case 61:
+ module = FFmpegDecoderModule<61>::Create(&sLibAV);
+ break;
++ case 62:
++ module = FFmpegDecoderModule<62>::Create(&sLibAV);
++ break;
+ default:
+ module = nullptr;
+ }
+ return module.forget();
+ }
+
+ /* static */
+ already_AddRefed<PlatformEncoderModule>
FFmpegRuntimeLinker::CreateEncoder() {
+@@ -242,16 +250,19 @@ already_AddRefed<PlatformEncoderModule>
+ module = FFmpegEncoderModule<59>::Create(&sLibAV);
+ break;
+ case 60:
+ module = FFmpegEncoderModule<60>::Create(&sLibAV);
+ break;
+ case 61:
+ module = FFmpegEncoderModule<61>::Create(&sLibAV);
+ break;
++ case 62:
++ module = FFmpegEncoderModule<62>::Create(&sLibAV);
++ break;
+ default:
+ module = nullptr;
+ }
+ return module.forget();
+ }
+
+ /* static */ const char* FFmpegRuntimeLinker::LinkStatusString() {
+ switch (sLinkStatus) {
+diff --git a/dom/media/platforms/ffmpeg/moz.build
b/dom/media/platforms/ffmpeg/moz.build
+--- a/dom/media/platforms/ffmpeg/moz.build
++++ b/dom/media/platforms/ffmpeg/moz.build
+@@ -12,16 +12,17 @@ DIRS += [
+ "libav53",
+ "libav54",
+ "libav55",
+ "ffmpeg57",
+ "ffmpeg58",
+ "ffmpeg59",
+ "ffmpeg60",
+ "ffmpeg61",
++ "ffmpeg62",
+ ]
+
+ UNIFIED_SOURCES += ["FFmpegRuntimeLinker.cpp"]
+
+ if CONFIG["MOZ_WIDGET_TOOLKIT"] == "gtk":
+ include("/ipc/chromium/chromium-config.mozbuild")
+ UNIFIED_SOURCES += ["VALibWrapper.cpp"]
+
+
diff --git
a/http/firefox/patches/0023-bgo-966424-include-prenv.h-jumbo-build-fix.patch
b/http/firefox/patches/0023-bgo-966424-include-prenv.h-jumbo-build-fix.patch
new file mode 100644
index 0000000..47d5a45
--- /dev/null
+++
b/http/firefox/patches/0023-bgo-966424-include-prenv.h-jumbo-build-fix.patch
@@ -0,0 +1,10 @@
+--- a/widget/gtk/WakeLockListener.cpp
++++ b/widget/gtk/WakeLockListener.cpp
+@@ -10,6 +10,7 @@
+ #include "WakeLockListener.h"
+ #include "WidgetUtilsGtk.h"
+ #include "mozilla/ScopeExit.h"
++#include "prenv.h"
+
+ #ifdef MOZ_ENABLE_DBUS
+ # include <gio/gio.h>
- [[SM-Commit] ] GIT changes to master grimoire by Pavel Vinogradov (f84038cfde96069b5d567ebc26db1acae64d7fe3), Pavel Vinogradov, 11/27/2025
Archive powered by MHonArc 2.6.24.