/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFORMAT_AVFORMAT_H #define AVFORMAT_AVFORMAT_H /** * @file * @ingroup libavf * Main libavformat public API header */ /** * @defgroup libavf libavformat * I/O and Muxing/Demuxing Library * * Libavformat (lavf) is a library for dealing with various media container * formats. Its main two purposes are demuxing - i.e. splitting a media file * into component streams, and the reverse process of muxing - writing supplied * data in a specified container format. It also has an @ref lavf_io * "I/O module" which supports a number of protocols for accessing the data (e.g. * file, tcp, http and others). * Unless you are absolutely sure you won't use libavformat's network * capabilities, you should also call avformat_network_init(). * * A supported input format is described by an AVInputFormat struct, conversely * an output format is described by AVOutputFormat. You can iterate over all * input/output formats using the av_demuxer_iterate / av_muxer_iterate() functions. * The protocols layer is not part of the public API, so you can only get the names * of supported protocols with the avio_enum_protocols() function. * * Main lavf structure used for both muxing and demuxing is AVFormatContext, * which exports all information about the file being read or written. As with * most Libavformat structures, its size is not part of public ABI, so it cannot be * allocated on stack or directly with av_malloc(). To create an * AVFormatContext, use avformat_alloc_context() (some functions, like * avformat_open_input() might do that for you). * * Most importantly an AVFormatContext contains: * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat * "output" format. It is either autodetected or set by user for input; * always set by user for output. * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all * elementary streams stored in the file. AVStreams are typically referred to * using their index in this array. * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or * set by user for input, always set by user for output (unless you are dealing * with an AVFMT_NOFILE format). * * @section lavf_options Passing options to (de)muxers * It is possible to configure lavf muxers and demuxers using the @ref avoptions * mechanism. Generic (format-independent) libavformat options are provided by * AVFormatContext, they can be examined from a user program by calling * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass * from avformat_get_class()). Private (format-specific) options are provided by * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. * Further options may be provided by the @ref AVFormatContext.pb "I/O context", * if its AVClass is non-NULL, and the protocols layer. See the discussion on * nesting in @ref avoptions documentation to learn how to access those. * * @section urls * URL strings in libavformat are made of a scheme/protocol, a ':', and a * scheme specific string. URLs without a scheme and ':' used for local files * are supported but deprecated. "file:" should be used for local files. * * It is important that the scheme string is not taken from untrusted * sources without checks. * * Note that some schemes/protocols are quite powerful, allowing access to * both local and remote files, parts of them, concatenations of them, local * audio and video devices and so on. * * @{ * * @defgroup lavf_decoding Demuxing * @{ * Demuxers read a media file and split it into chunks of data (@em packets). A * @ref AVPacket "packet" contains one or more encoded frames which belongs to a * single elementary stream. In the lavf API this process is represented by the * avformat_open_input() function for opening a file, av_read_frame() for * reading a single packet and finally avformat_close_input(), which does the * cleanup. * * @section lavf_decoding_open Opening a media file * The minimum information required to open a file is its URL, which * is passed to avformat_open_input(), as in the following code: * @code * const char *url = "file:in.mp3"; * AVFormatContext *s = NULL; * int ret = avformat_open_input(&s, url, NULL, NULL); * if (ret < 0) * abort(); * @endcode * The above code attempts to allocate an AVFormatContext, open the * specified file (autodetecting the format) and read the header, exporting the * information stored there into s. Some formats do not have a header or do not * store enough information there, so it is recommended that you call the * avformat_find_stream_info() function which tries to read and decode a few * frames to find missing information. * * In some cases you might want to preallocate an AVFormatContext yourself with * avformat_alloc_context() and do some tweaking on it before passing it to * avformat_open_input(). One such case is when you want to use custom functions * for reading input data instead of lavf internal I/O layer. * To do that, create your own AVIOContext with avio_alloc_context(), passing * your reading callbacks to it. Then set the @em pb field of your * AVFormatContext to newly created AVIOContext. * * Since the format of the opened file is in general not known until after * avformat_open_input() has returned, it is not possible to set demuxer private * options on a preallocated context. Instead, the options should be passed to * avformat_open_input() wrapped in an AVDictionary: * @code * AVDictionary *options = NULL; * av_dict_set(&options, "video_size", "640x480", 0); * av_dict_set(&options, "pixel_format", "rgb24", 0); * * if (avformat_open_input(&s, url, NULL, &options) < 0) * abort(); * av_dict_free(&options); * @endcode * This code passes the private options 'video_size' and 'pixel_format' to the * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it * cannot know how to interpret raw video data otherwise. If the format turns * out to be something different than raw video, those options will not be * recognized by the demuxer and therefore will not be applied. Such unrecognized * options are then returned in the options dictionary (recognized options are * consumed). The calling program can handle such unrecognized options as it * wishes, e.g. * @code * AVDictionaryEntry *e; * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); * abort(); * } * @endcode * * After you have finished reading the file, you must close it with * avformat_close_input(). It will free everything associated with the file. * * @section lavf_decoding_read Reading from an opened file * Reading data from an opened AVFormatContext is done by repeatedly calling * av_read_frame() on it. Each call, if successful, will return an AVPacket * containing encoded data for one AVStream, identified by * AVPacket.stream_index. This packet may be passed straight into the libavcodec * decoding functions avcodec_send_packet() or avcodec_decode_subtitle2() if the * caller wishes to decode the data. * * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for * pts/dts, 0 for duration) if the stream does not provide them. The timing * information will be in AVStream.time_base units, i.e. it has to be * multiplied by the timebase to convert them to seconds. * * A packet returned by av_read_frame() is always reference-counted, * i.e. AVPacket.buf is set and the user may keep it indefinitely. * The packet must be freed with av_packet_unref() when it is no * longer needed. * * @section lavf_decoding_seek Seeking * @} * * @defgroup lavf_encoding Muxing * @{ * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write * it into files or other output bytestreams in the specified container format. * * The main API functions for muxing are avformat_write_header() for writing the * file header, av_write_frame() / av_interleaved_write_frame() for writing the * packets and av_write_trailer() for finalizing the file. * * At the beginning of the muxing process, the caller must first call * avformat_alloc_context() to create a muxing context. The caller then sets up * the muxer by filling the various fields in this context: * * - The @ref AVFormatContext.oformat "oformat" field must be set to select the * muxer that will be used. * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb * "pb" field must be set to an opened IO context, either returned from * avio_open2() or a custom one. * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must * be created with the avformat_new_stream() function. The caller should fill * the @ref AVStream.codecpar "stream codec parameters" information, such as the * codec @ref AVCodecParameters.codec_type "type", @ref AVCodecParameters.codec_id * "id" and other parameters (e.g. width / height, the pixel or sample format, * etc.) as known. The @ref AVStream.time_base "stream timebase" should * be set to the timebase that the caller desires to use for this stream (note * that the timebase actually used by the muxer can be different, as will be * described later). * - It is advised to manually initialize only the relevant fields in * AVCodecParameters, rather than using @ref avcodec_parameters_copy() during * remuxing: there is no guarantee that the codec context values remain valid * for both input and output format contexts. * - The caller may fill in additional information, such as @ref * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" * metadata, @ref AVFormatContext.chapters "chapters", @ref * AVFormatContext.programs "programs", etc. as described in the * AVFormatContext documentation. Whether such information will actually be * stored in the output depends on what the container format and the muxer * support. * * When the muxing context is fully set up, the caller must call * avformat_write_header() to initialize the muxer internals and write the file * header. Whether anything actually is written to the IO context at this step * depends on the muxer, but this function must always be called. Any muxer * private options must be passed in the options parameter to this function. * * The data is then sent to the muxer by repeatedly calling av_write_frame() or * av_interleaved_write_frame() (consult those functions' documentation for * discussion on the difference between them; only one of them may be used with * a single muxing context, they should not be mixed). Do note that the timing * information on the packets sent to the muxer must be in the corresponding * AVStream's timebase. That timebase is set by the muxer (in the * avformat_write_header() step) and may be different from the timebase * requested by the caller. * * Once all the data has been written, the caller must call av_write_trailer() * to flush any buffered packets and finalize the output file, then close the IO * context (if any) and finally free the muxing context with * avformat_free_context(). * @} * * @defgroup lavf_io I/O Read/Write * @{ * @section lavf_io_dirlist Directory listing * The directory listing API makes it possible to list files on remote servers. * * Some of possible use cases: * - an "open file" dialog to choose files from a remote location, * - a recursive media finder providing a player with an ability to play all * files from a given directory. * * @subsection lavf_io_dirlist_open Opening a directory * At first, a directory needs to be opened by calling avio_open_dir() * supplied with a URL and, optionally, ::AVDictionary containing * protocol-specific parameters. The function returns zero or positive * integer and allocates AVIODirContext on success. * * @code * AVIODirContext *ctx = NULL; * if (avio_open_dir(&ctx, "smb://example.com/some_dir", NULL) < 0) { * fprintf(stderr, "Cannot open directory.\n"); * abort(); * } * @endcode * * This code tries to open a sample directory using smb protocol without * any additional parameters. * * @subsection lavf_io_dirlist_read Reading entries * Each directory's entry (i.e. file, another directory, anything else * within ::AVIODirEntryType) is represented by AVIODirEntry. * Reading consecutive entries from an opened AVIODirContext is done by * repeatedly calling avio_read_dir() on it. Each call returns zero or * positive integer if successful. Reading can be stopped right after the * NULL entry has been read -- it means there are no entries left to be * read. The following code reads all entries from a directory associated * with ctx and prints their names to standard output. * @code * AVIODirEntry *entry = NULL; * for (;;) { * if (avio_read_dir(ctx, &entry) < 0) { * fprintf(stderr, "Cannot list directory.\n"); * abort(); * } * if (!entry) * break; * printf("%s\n", entry->name); * avio_free_directory_entry(&entry); * } * @endcode * @} * * @defgroup lavf_codec Demuxers * @{ * @defgroup lavf_codec_native Native Demuxers * @{ * @} * @defgroup lavf_codec_wrappers External library wrappers * @{ * @} * @} * @defgroup lavf_protos I/O Protocols * @{ * @} * @defgroup lavf_internal Internal * @{ * @} * @} */ #include #include /* FILE */ #include "libavcodec/codec.h" #include "libavcodec/codec_par.h" #include "libavcodec/defs.h" #include "libavcodec/packet.h" #include "libavutil/dict.h" #include "libavutil/log.h" #include "avio.h" #include "libavformat/version_major.h" #ifndef HAVE_AV_CONFIG_H /* When included as part of the ffmpeg build, only include the major version * to avoid unnecessary rebuilds. When included externally, keep including * the full version information. */ #include "libavformat/version.h" #endif struct AVFormatContext; struct AVStream; struct AVDeviceInfoList; struct AVDeviceCapabilitiesQuery; /** * @defgroup metadata_api Public Metadata API * @{ * @ingroup libavf * The metadata API allows libavformat to export metadata tags to a client * application when demuxing. Conversely it allows a client application to * set metadata when muxing. * * Metadata is exported or set as pairs of key/value strings in the 'metadata' * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata * exported by demuxers isn't checked to be valid UTF-8 in most cases. * * Important concepts to keep in mind: * - Keys are unique; there can never be 2 tags with the same key. This is * also meant semantically, i.e., a demuxer should not knowingly produce * several keys that are literally different but semantically identical. * E.g., key=Author5, key=Author6. In this example, all authors must be * placed in the same tag. * - Metadata is flat, not hierarchical; there are no subtags. If you * want to store, e.g., the email address of the child of producer Alice * and actor Bob, that could have key=alice_and_bobs_childs_email_address. * - Several modifiers can be applied to the tag name. This is done by * appending a dash character ('-') and the modifier name in the order * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. * - language -- a tag whose value is localized for a particular language * is appended with the ISO 639-2/B 3-letter language code. * For example: Author-ger=Michael, Author-eng=Mike * The original/default language is in the unqualified "Author" tag. * A demuxer should set a default if it sets any translated tag. * - sorting -- a modified version of a tag that should be used for * sorting will have '-sort' appended. E.g. artist="The Beatles", * artist-sort="Beatles, The". * - Some protocols and demuxers support metadata updates. After a successful * call to av_read_frame(), AVFormatContext.event_flags or AVStream.event_flags * will be updated to indicate if metadata changed. In order to detect metadata * changes on a stream, you need to loop through all streams in the AVFormatContext * and check their individual event_flags. * * - Demuxers attempt to export metadata in a generic format, however tags * with no generic equivalents are left as they are stored in the container. * Follows a list of generic tag names: * @verbatim album -- name of the set this work belongs to album_artist -- main creator of the set/album, if different from artist. e.g. "Various Artists" for compilation albums. artist -- main creator of the work comment -- any additional description of the file. composer -- who composed the work, if different from artist. copyright -- name of copyright holder. creation_time-- date when the file was created, preferably in ISO 8601. date -- date when the work was created, preferably in ISO 8601. disc -- number of a subset, e.g. disc in a multi-disc collection. encoder -- name/settings of the software/hardware that produced the file. encoded_by -- person/group who created the file. filename -- original name of the file. genre -- . language -- main language in which the work is performed, preferably in ISO 639-2 format. Multiple languages can be specified by separating them with commas. performer -- artist who performed the work, if different from artist. E.g for "Also sprach Zarathustra", artist would be "Richard Strauss" and performer "London Philharmonic Orchestra". publisher -- name of the label/publisher. service_name -- name of the service in broadcasting (channel name). service_provider -- name of the service provider in broadcasting. title -- name of the work. track -- number of this work in the set, can be in form current/total. variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of @endverbatim * * Look in the examples section for an application example how to use the Metadata API. * * @} */ /* packet functions */ /** * Allocate and read the payload of a packet and initialize its * fields with default values. * * @param s associated IO context * @param pkt packet * @param size desired payload size * @return >0 (read size) if OK, AVERROR_xxx otherwise */ int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); /** * Read data and append it to the current content of the AVPacket. * If pkt->size is 0 this is identical to av_get_packet. * Note that this uses av_grow_packet and thus involves a realloc * which is inefficient. Thus this function should only be used * when there is no reasonable way to know (an upper bound of) * the final size. * * @param s associated IO context * @param pkt packet * @param size amount of data to read * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data * will not be lost even if an error occurs. */ int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); /*************************************************/ /* input/output formats */ struct AVCodecTag; /** * This structure contains the data a format has to probe a file. */ typedef struct AVProbeData { const char *filename; unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ int buf_size; /**< Size of buf except extra allocated bytes */ const char *mime_type; /**< mime_type, when known. */ } AVProbeData; #define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) #define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) #define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension #define AVPROBE_SCORE_MIME 75 ///< score for file mime type #define AVPROBE_SCORE_MAX 100 ///< maximum score #define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer /// Demuxer will use avio_open, no opened file should be provided by the caller. #define AVFMT_NOFILE 0x0001 #define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ /** * The muxer/demuxer is experimental and should be used with caution. * * - demuxers: will not be selected automatically by probing, must be specified * explicitly. */ #define AVFMT_EXPERIMENTAL 0x0004 #define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ #define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ #define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ #define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ #define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ #define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ #define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ #define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ #define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ #define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ #define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ #define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ #define AVFMT_TS_NONSTRICT 0x20000 /**< Format does not require strictly increasing timestamps, but they must still be monotonic */ #define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative timestamps. If not set the timestamp will be shifted in av_write_frame and av_interleaved_write_frame so they start from 0. The user or muxer can override this through AVFormatContext.avoid_negative_ts */ #define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ /** * @addtogroup lavf_encoding * @{ */ typedef struct AVOutputFormat { const char *name; /** * Descriptive name for the format, meant to be more human-readable * than name. You should use the NULL_IF_CONFIG_SMALL() macro * to define it. */ const char *long_name; const char *mime_type; const char *extensions; /**< comma-separated filename extensions */ /* output support */ enum AVCodecID audio_codec; /**< default audio codec */ enum AVCodecID video_codec; /**< default video codec */ enum AVCodecID subtitle_codec; /**< default subtitle codec */ /** * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, * AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE */ int flags; /** * List of supported codec_id-codec_tag pairs, ordered by "better * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. */ const struct AVCodecTag * const *codec_tag; const AVClass *priv_class; ///< AVClass for the private context /***************************************************************** * No fields below this line are part of the public API. They * may not be used outside of libavformat and can be changed and * removed at will. * New public fields should be added right above. ***************************************************************** */ /** * size of private data so that it can be allocated in the wrapper */ int priv_data_size; /** * Internal flags. See FF_FMT_FLAG_* in internal.h. */ int flags_internal; int (*write_header)(struct AVFormatContext *); /** * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, * pkt can be NULL in order to flush data buffered in the muxer. * When flushing, return 0 if there still is more data to flush, * or 1 if everything was flushed and there is no more buffered * data. */ int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); int (*write_trailer)(struct AVFormatContext *); /** * A format-specific function for interleavement. * If unset, packets will be interleaved by dts. * * @param s An AVFormatContext for output. pkt will be added to * resp. taken from its packet buffer. * @param[in,out] pkt A packet to be interleaved if has_packet is set; * also used to return packets. If no packet is returned * (e.g. on error), pkt is blank on return. * @param flush 1 if no further packets are available as input and * all remaining packets should be output. * @param has_packet If set, pkt contains a packet to be interleaved * on input; otherwise pkt is blank on input. * @return 1 if a packet was output, 0 if no packet could be output, * < 0 if an error occurred */ int (*interleave_packet)(struct AVFormatContext *s, AVPacket *pkt, int flush, int has_packet); /** * Test if the given codec can be stored in this container. * * @return 1 if the codec is supported, 0 if it is not. * A negative number if unknown. * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC */ int (*query_codec)(enum AVCodecID id, int std_compliance); void (*get_output_timestamp)(struct AVFormatContext *s, int stream, int64_t *dts, int64_t *wall); /** * Allows sending messages from application to device. */ int (*control_message)(struct AVFormatContext *s, int type, void *data, size_t data_size); /** * Write an uncoded AVFrame. * * See av_write_uncoded_frame() for details. * * The library will free *frame afterwards, but the muxer can prevent it * by setting the pointer to NULL. */ int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, AVFrame **frame, unsigned flags); /** * Returns device list with it properties. * @see avdevice_list_devices() for more details. */ int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); enum AVCodecID data_codec; /**< default data codec */ /** * Initialize format. May allocate data here, and set any AVFormatContext or * AVStream parameters that need to be set before packets are sent. * This method must not write output. * * Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure * * Any allocations made here must be freed in deinit(). */ int (*init)(struct AVFormatContext *); /** * Deinitialize format. If present, this is called whenever the muxer is being * destroyed, regardless of whether or not the header has been written. * * If a trailer is being written, this is called after write_trailer(). * * This is called if init() fails as well. */ void (*deinit)(struct AVFormatContext *); /** * Set up any necessary bitstream filtering and extract any extra data needed * for the global header. * * @note pkt might have been directly forwarded by a meta-muxer; therefore * pkt->stream_index as well as the pkt's timebase might be invalid. * Return 0 if more packets from this stream must be checked; 1 if not. */ int (*check_bitstream)(struct AVFormatContext *s, struct AVStream *st, const AVPacket *pkt); } AVOutputFormat; /** * @} */ /** * @addtogroup lavf_decoding 解复用器对象,文件容器格式对应一个AVInputFormat 结构 * @{ */ typedef struct AVInputFormat { /** * A comma separated list of short names for the format. New names * may be appended with a minor bump. */ const char *name; /** * Descriptive name for the format, meant to be more human-readable * than name. You should use the NULL_IF_CONFIG_SMALL() macro * to define it. */ const char *long_name; /** * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, * AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. */ int flags; /** * If extensions are defined, then no probe is done. You should * usually not use extension format guessing because it is not * reliable enough */ const char *extensions; const struct AVCodecTag * const *codec_tag; const AVClass *priv_class; ///< AVClass for the private context /** * Comma-separated list of mime types. * It is used check for matching mime types while probing. * @see av_probe_input_format2 */ const char *mime_type; /***************************************************************** * No fields below this line are part of the public API. They * may not be used outside of libavformat and can be changed and * removed at will. * New public fields should be added right above. ***************************************************************** */ /** * Raw demuxers store their codec ID here. */ int raw_codec_id; /** * Size of private data so that it can be allocated in the wrapper. */ int priv_data_size; /** * Internal flags. See FF_FMT_FLAG_* in internal.h. */ int flags_internal; /** * Tell if a given file has a chance of being parsed as this format. * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes * big so you do not have to check for that unless you need more. */ int (*read_probe)(const AVProbeData *); /** * Read the format header and initialize the AVFormatContext * structure. Return 0 if OK. 'avformat_new_stream' should be * called to create new streams. */ int (*read_header)(struct AVFormatContext *); /** * Read one packet and put it in 'pkt'. pts and flags are also * set. 'avformat_new_stream' can be called only if the flag * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a * background thread). * @return 0 on success, < 0 on error. * Upon returning an error, pkt must be unreferenced by the caller. */ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); /** * Close the stream. The AVFormatContext and AVStreams are not * freed by this function */ int (*read_close)(struct AVFormatContext *); /** * Seek to a given timestamp relative to the frames in * stream component stream_index. * @param stream_index Must not be -1. * @param flags Selects which direction should be preferred if no exact * match is available. * @return >= 0 on success (but not necessarily the new offset) */ int (*read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags); /** * Get the next timestamp in stream[stream_index].time_base units. * @return the timestamp or AV_NOPTS_VALUE if an error occurred */ int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, int64_t *pos, int64_t pos_limit); /** * Start/resume playing - only meaningful if using a network-based format * (RTSP). */ int (*read_play)(struct AVFormatContext *); /** * Pause playing - only meaningful if using a network-based format * (RTSP). */ int (*read_pause)(struct AVFormatContext *); /** * Seek to timestamp ts. * Seeking will be done so that the point from which all active streams * can be presented successfully will be closest to ts and within min/max_ts. * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. */ int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); /** * Returns device list with it properties. * @see avdevice_list_devices() for more details. */ int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); } AVInputFormat; /** * @} */ enum AVStreamParseType { AVSTREAM_PARSE_NONE, AVSTREAM_PARSE_FULL, /**< full parsing and repack */ AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ AVSTREAM_PARSE_FULL_RAW, /**< full parsing and repack with timestamp and position generation by parser for raw this assumes that each packet in the file contains no demuxer level headers and just codec level data, otherwise position generation would fail */ }; typedef struct AVIndexEntry { int64_t pos; int64_t timestamp; /**< * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available * when seeking to this entry. That means preferable PTS on keyframe based formats. * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better * is known */ #define AVINDEX_KEYFRAME 0x0001 #define AVINDEX_DISCARD_FRAME 0x0002 /** * Flag is used to indicate which frame should be discarded after decoding. */ int flags:2; int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ } AVIndexEntry; /** * The stream should be chosen by default among other streams of the same type, * unless the user has explicitly specified otherwise. */ #define AV_DISPOSITION_DEFAULT (1 << 0) /** * The stream is not in original language. * * @note AV_DISPOSITION_ORIGINAL is the inverse of this disposition. At most * one of them should be set in properly tagged streams. * @note This disposition may apply to any stream type, not just audio. */ #define AV_DISPOSITION_DUB (1 << 1) /** * The stream is in original language. * * @see the notes for AV_DISPOSITION_DUB */ #define AV_DISPOSITION_ORIGINAL (1 << 2) /** * The stream is a commentary track. */ #define AV_DISPOSITION_COMMENT (1 << 3) /** * The stream contains song lyrics. */ #define AV_DISPOSITION_LYRICS (1 << 4) /** * The stream contains karaoke audio. */ #define AV_DISPOSITION_KARAOKE (1 << 5) /** * Track should be used during playback by default. * Useful for subtitle track that should be displayed * even when user did not explicitly ask for subtitles. */ #define AV_DISPOSITION_FORCED (1 << 6) /** * The stream is intended for hearing impaired audiences. */ #define AV_DISPOSITION_HEARING_IMPAIRED (1 << 7) /** * The stream is intended for visually impaired audiences. */ #define AV_DISPOSITION_VISUAL_IMPAIRED (1 << 8) /** * The audio stream contains music and sound effects without voice. */ #define AV_DISPOSITION_CLEAN_EFFECTS (1 << 9) /** * The stream is stored in the file as an attached picture/"cover art" (e.g. * APIC frame in ID3v2). The first (usually only) packet associated with it * will be returned among the first few packets read from the file unless * seeking takes place. It can also be accessed at any time in * AVStream.attached_pic. */ #define AV_DISPOSITION_ATTACHED_PIC (1 << 10) /** * The stream is sparse, and contains thumbnail images, often corresponding * to chapter markers. Only ever used with AV_DISPOSITION_ATTACHED_PIC. */ #define AV_DISPOSITION_TIMED_THUMBNAILS (1 << 11) /** * The stream is intended to be mixed with a spatial audio track. For example, * it could be used for narration or stereo music, and may remain unchanged by * listener head rotation. */ #define AV_DISPOSITION_NON_DIEGETIC (1 << 12) /** * The subtitle stream contains captions, providing a transcription and possibly * a translation of audio. Typically intended for hearing-impaired audiences. */ #define AV_DISPOSITION_CAPTIONS (1 << 16) /** * The subtitle stream contains a textual description of the video content. * Typically intended for visually-impaired audiences or for the cases where the * video cannot be seen. */ #define AV_DISPOSITION_DESCRIPTIONS (1 << 17) /** * The subtitle stream contains time-aligned metadata that is not intended to be * directly presented to the user. */ #define AV_DISPOSITION_METADATA (1 << 18) /** * The audio stream is intended to be mixed with another stream before * presentation. * Corresponds to mix_type=0 in mpegts. */ #define AV_DISPOSITION_DEPENDENT (1 << 19) /** * The video stream contains still images. */ #define AV_DISPOSITION_STILL_IMAGE (1 << 20) /** * @return The AV_DISPOSITION_* flag corresponding to disp or a negative error * code if disp does not correspond to a known stream disposition. */ int av_disposition_from_string(const char *disp); /** * @param disposition a combination of AV_DISPOSITION_* values * @return The string description corresponding to the lowest set bit in * disposition. NULL when the lowest set bit does not correspond * to a known disposition or when disposition is 0. */ const char *av_disposition_to_string(int disposition); /** * Options for behavior on timestamp wrap detection. */ #define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap #define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection #define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection /** * 流结构。 * 新字段可以添加到末尾,但会出现较小的版本冲突。 * 对现有字段的删除、重新排序和更改需要主要版本更新。 * sizeof(AVStream)不能在libav*之外使用。 */ typedef struct AVStream { #if FF_API_AVSTREAM_CLASS /** * A class for @ref avoptions. Set on stream creation. */ const AVClass *av_class; #endif int index; /**< stream index in AVFormatContext */ /** * Format-specific stream ID. * decoding: set by libavformat * encoding: set by the user, replaced by libavformat if left unset */ int id; void *priv_data; /** * 这是表示帧时间戳的基本时间单位(以秒为单位) * * decoding: set by libavformat * encoding: 可以由调用者在avformat_write_header()之前设置,以向muxer提供有关所需时基的提示。 * 在avformat_write_header()中,muxer将使用实际用于写入文件的时间戳的时基覆盖此字段(根据格式,该时基可能与用户提供的时间戳相关,也可能无关)。 */ AVRational time_base; /** * 解码:流的第一帧的pts,按呈现顺序,按流时基。 * 只有在100%确定设置的值确实是第一帧的点时,才能设置此选项。 * 这可能未定义(AV_NOPTS_VALUE)。 * @注意:ASF标头不包含正确的start_time,ASF解复用器不得设置此值。 */ int64_t start_time; /** * Decoding: duration of the stream, in stream time base. * If a source file does not specify a duration, but does specify * a bitrate, this value will be estimated from bitrate and file size. * * Encoding: May be set by the caller before avformat_write_header() to * provide a hint to the muxer about the estimated duration. */ int64_t duration; int64_t nb_frames; ///< 此流中的帧数(如果已知)或0 /** * Stream disposition - a combination of AV_DISPOSITION_* flags. * - demuxing: set by libavformat when creating the stream or in * avformat_find_stream_info(). * - muxing: may be set by the caller before avformat_write_header(). */ int disposition; enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. /** * sample aspect ratio (0 if unknown) * - encoding: Set by user. * - decoding: Set by libavformat. */ AVRational sample_aspect_ratio; AVDictionary *metadata; /** * 平均帧速率 * * - demuxing: May be set by libavformat when creating the stream or in * avformat_find_stream_info(). * - muxing: May be set by the caller before avformat_write_header(). */ AVRational avg_frame_rate; /** * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet * will contain the attached picture. * * decoding: set by libavformat, must not be modified by the caller. * encoding: unused */ AVPacket attached_pic; /** * An array of side data that applies to the whole stream (i.e. the * container does not allow it to change between packets). * * There may be no overlap between the side data in this array and side data * in the packets. I.e. a given side data is either exported by the muxer * (demuxing) / set by the caller (muxing) in this array, then it never * appears in the packets, or the side data is exported / sent through * the packets (always in the first packet where the value becomes known or * changes), then it does not appear in this array. * * - demuxing: Set by libavformat when the stream is created. * - muxing: May be set by the caller before avformat_write_header(). * * Freed by libavformat in avformat_free_context(). * * @see av_format_inject_global_side_data() */ AVPacketSideData *side_data; /** * The number of elements in the AVStream.side_data array. */ int nb_side_data; /** * Flags indicating events happening on the stream, a combination of * AVSTREAM_EVENT_FLAG_*. * * - demuxing: may be set by the demuxer in avformat_open_input(), * avformat_find_stream_info() and av_read_frame(). Flags must be cleared * by the user once the event has been handled. * - muxing: may be set by the user after avformat_write_header(). to * indicate a user-triggered event. The muxer will clear the flags for * events it has handled in av_[interleaved]_write_frame(). */ int event_flags; /** * - demuxing: the demuxer read new metadata from the file and updated * AVStream.metadata accordingly * - muxing: the user updated AVStream.metadata and wishes the muxer to write * it into the file */ #define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 /** * - demuxing: new packets for this stream were read from the file. This * event is informational only and does not guarantee that new packets * for this stream will necessarily be returned from av_read_frame(). */ #define AVSTREAM_EVENT_FLAG_NEW_PACKETS (1 << 1) /** * Real base framerate of the stream. * This is the lowest framerate with which all timestamps can be * represented accurately (it is the least common multiple of all * framerates in the stream). Note, this value is just a guess! * For example, if the time base is 1/90000 and all frames have either * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. */ AVRational r_frame_rate; /** * 与此流关联的编解码器参数。分别在avformat_new_stream()和avformat_free_context()中由libavformat分配和释放。 * * - demuxing: 在流创建时由libavformat填充或在avformat_find_stream_info()中填充 * - muxing: 由调用者在avformat_write_header()之前填充 */ AVCodecParameters *codecpar; /** * Number of bits in timestamps. Used for wrapping control. * * - demuxing: set by libavformat * - muxing: set by libavformat * */ int pts_wrap_bits; } AVStream; struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); /** * Returns the pts of the last muxed packet + its duration * * the retuned value is undefined when used with a demuxer. */ int64_t av_stream_get_end_pts(const AVStream *st); #define AV_PROGRAM_RUNNING 1 /** * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVProgram) must not be used outside libav*. */ typedef struct AVProgram { int id; int flags; enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller unsigned int *stream_index; unsigned int nb_stream_indexes; AVDictionary *metadata; int program_num; int pmt_pid; int pcr_pid; int pmt_version; /***************************************************************** * All fields below this line are not part of the public API. They * may not be used outside of libavformat and can be changed and * removed at will. * New public fields should be added right above. ***************************************************************** */ int64_t start_time; int64_t end_time; int64_t pts_wrap_reference; ///< reference dts for wrap detection int pts_wrap_behavior; ///< behavior on wrap detection } AVProgram; #define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present (streams are added dynamically) */ #define AVFMTCTX_UNSEEKABLE 0x0002 /**< signal that the stream is definitely not seekable, and attempts to call the seek function will fail. For some network protocols (e.g. HLS), this can change dynamically at runtime. */ typedef struct AVChapter { int64_t id; ///< unique ID to identify the chapter AVRational time_base; ///< time base in which the start/end timestamps are specified int64_t start, end; ///< chapter start/end time in time_base units AVDictionary *metadata; } AVChapter; /** * Callback used by devices to communicate with application. */ typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, void *data, size_t data_size); typedef int (*AVOpenCallback)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options); /** * The duration of a video can be estimated through various ways, and this enum can be used * to know how the duration was estimated. */ enum AVDurationEstimationMethod { AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) }; /** * Format I/O context. * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVFormatContext) must not be used outside libav*, use * avformat_alloc_context() to create an AVFormatContext. * * Fields can be accessed through AVOptions (av_opt*), * the name string used matches the associated command line parameter name and * can be found in libavformat/options_table.h. * The AVOption/command line parameter names differ in some cases from the C * structure field names for historic reasons or brevity. */ typedef struct AVFormatContext { /** * A class for logging and @ref avoptions. Set by avformat_alloc_context(). * Exports (de)muxer private options if they exist. */ const AVClass *av_class; /** * The input container format. * * Demuxing only, set by avformat_open_input(). */ const struct AVInputFormat *iformat; /** * The output container format. * * Muxing only, must be set by the caller before avformat_write_header(). */ const struct AVOutputFormat *oformat; /** * Format private data. This is an AVOptions-enabled struct * if and only if iformat/oformat.priv_class is not NULL. * * - muxing: set by avformat_write_header() * - demuxing: set by avformat_open_input() */ void *priv_data; /** * I/O context. * * - demuxing: either set by the user before avformat_open_input() (then * the user must close it manually) or set by avformat_open_input(). * - muxing: set by the user before avformat_write_header(). The caller must * take care of closing / freeing the IO context. * * 如果在iformat/oformat.flags中设置了AVFMT_NOFILE标志,则不要设置该字段。在这种情况下,(de)muxer将以其他方式处理I/O,该字段将为NULL。 */ AVIOContext *pb; /* stream info */ /** * Flags signalling stream properties. A combination of AVFMTCTX_*. * Set by libavformat. */ int ctx_flags; /** * Number of elements in AVFormatContext.streams. * * Set by avformat_new_stream(), must not be modified by any other code. */ unsigned int nb_streams; /** * A list of all streams in the file. New streams are created with * avformat_new_stream(). * * - demuxing: streams are created by libavformat in avformat_open_input(). * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also * appear in av_read_frame(). * - muxing: streams are created by the user before avformat_write_header(). * * Freed by libavformat in avformat_free_context(). */ AVStream **streams; /** * input or output URL. Unlike the old filename field, this field has no * length restriction. * * - demuxing: set by avformat_open_input(), initialized to an empty * string if url parameter was NULL in avformat_open_input(). * - muxing: may be set by the caller before calling avformat_write_header() * (or avformat_init_output() if that is called first) to a string * which is freeable by av_free(). Set to an empty string if it * was NULL in avformat_init_output(). * * Freed by libavformat in avformat_free_context(). */ char *url; /** * Position of the first frame of the component, in * AV_TIME_BASE fractional seconds. NEVER set this value directly: * It is deduced from the AVStream values. * * Demuxing only, set by libavformat. */ int64_t start_time; /** * 流的持续时间,以AV_TIME_BASE分数,秒为单位。 * 仅当您不知道任何单个流持续时间并且也不设置任何流持续时间时,才设置此值。 * 如果未设置,则从AVStream值推导得出。 * * 仅Demuxing,由libavformat设置。 */ int64_t duration; /** * Total stream bitrate in bit/s, 0 if not * available. Never set it directly if the file_size and the * duration are known as FFmpeg can compute it automatically. */ int64_t bit_rate; unsigned int packet_size; int max_delay; /** * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. * Set by the user before avformat_open_input() / avformat_write_header(). */ int flags; #define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. #define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. #define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. #define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS #define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container #define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled #define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible #define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. #define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted #define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. /** * When muxing, try to avoid writing any random/volatile data to the output. * This includes any random IDs, real-time timestamps/dates, muxer version, etc. * * This flag is mainly intended for testing. */ #define AVFMT_FLAG_BITEXACT 0x0400 #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #if FF_API_LAVF_PRIV_OPT #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (deprecated, does nothing) #endif #define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats #define AVFMT_FLAG_SHORTEST 0x100000 ///< Stop muxing when the shortest stream stops. #define AVFMT_FLAG_AUTO_BSF 0x200000 ///< Add bitstream filters as requested by the muxer /** * Maximum number of bytes read from input in order to determine stream * properties. Used when reading the global header and in * avformat_find_stream_info(). * * Demuxing only, set by the caller before avformat_open_input(). * * @note this is \e not used for determining the \ref AVInputFormat * "input format" * @sa format_probesize */ int64_t probesize; /** * Maximum duration (in AV_TIME_BASE units) of the data read * from input in avformat_find_stream_info(). * Demuxing only, set by the caller before avformat_find_stream_info(). * Can be set to 0 to let avformat choose using a heuristic. */ int64_t max_analyze_duration; const uint8_t *key; int keylen; unsigned int nb_programs; AVProgram **programs; /** * Forced video codec_id. * Demuxing: Set by user. */ enum AVCodecID video_codec_id; /** * Forced audio codec_id. * Demuxing: Set by user. */ enum AVCodecID audio_codec_id; /** * Forced subtitle codec_id. * Demuxing: Set by user. */ enum AVCodecID subtitle_codec_id; /** * Maximum amount of memory in bytes to use for the index of each stream. * If the index exceeds this size, entries will be discarded as * needed to maintain a smaller size. This can lead to slower or less * accurate seeking (depends on demuxer). * Demuxers for which a full in-memory index is mandatory will ignore * this. * - muxing: unused * - demuxing: set by user */ unsigned int max_index_size; /** * Maximum amount of memory in bytes to use for buffering frames * obtained from realtime capture devices. */ unsigned int max_picture_buffer; /** * Number of chapters in AVChapter array. * When muxing, chapters are normally written in the file header, * so nb_chapters should normally be initialized before write_header * is called. Some muxers (e.g. mov and mkv) can also write chapters * in the trailer. To write chapters in the trailer, nb_chapters * must be zero when write_header is called and non-zero when * write_trailer is called. * - muxing: set by user * - demuxing: set by libavformat */ unsigned int nb_chapters; AVChapter **chapters; /** * Metadata that applies to the whole file. * * - demuxing: set by libavformat in avformat_open_input() * - muxing: may be set by the caller before avformat_write_header() * * Freed by libavformat in avformat_free_context(). */ AVDictionary *metadata; /** * Start time of the stream in real world time, in microseconds * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the * stream was captured at this real world time. * - muxing: Set by the caller before avformat_write_header(). If set to * either 0 or AV_NOPTS_VALUE, then the current wall-time will * be used. * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that * the value may become known after some number of frames * have been received. */ int64_t start_time_realtime; /** * The number of frames used for determining the framerate in * avformat_find_stream_info(). * Demuxing only, set by the caller before avformat_find_stream_info(). */ int fps_probe_size; /** * Error recognition; higher values will detect more errors but may * misdetect some more or less valid parts as errors. * Demuxing only, set by the caller before avformat_open_input(). */ int error_recognition; /** * Custom interrupt callbacks for the I/O layer. * * demuxing: set by the user before avformat_open_input(). * muxing: set by the user before avformat_write_header() * (mainly useful for AVFMT_NOFILE formats). The callback * should also be passed to avio_open2() if it's used to * open the file. */ AVIOInterruptCB interrupt_callback; /** * Flags to enable debugging. */ int debug; #define FF_FDEBUG_TS 0x0001 /** * Maximum buffering duration for interleaving. * * To ensure all the streams are interleaved correctly, * av_interleaved_write_frame() will wait until it has at least one packet * for each stream before actually writing any packets to the output file. * When some streams are "sparse" (i.e. there are large gaps between * successive packets), this can result in excessive buffering. * * This field specifies the maximum difference between the timestamps of the * first and the last packet in the muxing queue, above which libavformat * will output a packet regardless of whether it has queued a packet for all * the streams. * * Muxing only, set by the caller before avformat_write_header(). */ int64_t max_interleave_delta; /** * Allow non-standard and experimental extension * @see AVCodecContext.strict_std_compliance */ int strict_std_compliance; /** * Flags indicating events happening on the file, a combination of * AVFMT_EVENT_FLAG_*. * * - demuxing: may be set by the demuxer in avformat_open_input(), * avformat_find_stream_info() and av_read_frame(). Flags must be cleared * by the user once the event has been handled. * - muxing: may be set by the user after avformat_write_header() to * indicate a user-triggered event. The muxer will clear the flags for * events it has handled in av_[interleaved]_write_frame(). */ int event_flags; /** * - demuxing: the demuxer read new metadata from the file and updated * AVFormatContext.metadata accordingly * - muxing: the user updated AVFormatContext.metadata and wishes the muxer to * write it into the file */ #define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 /** * Maximum number of packets to read while waiting for the first timestamp. * Decoding only. */ int max_ts_probe; /** * Avoid negative timestamps during muxing. * Any value of the AVFMT_AVOID_NEG_TS_* constants. * Note, this works better when using av_interleaved_write_frame(). * - muxing: Set by user * - demuxing: unused */ int avoid_negative_ts; #define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format #define AVFMT_AVOID_NEG_TS_DISABLED 0 ///< Do not shift timestamps even when they are negative. #define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative #define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0 /** * Transport stream id. * This will be moved into demuxer private options. Thus no API/ABI compatibility */ int ts_id; /** * Audio preload in microseconds. * Note, not all formats support this and unpredictable things may happen if it is used when not supported. * - encoding: Set by user * - decoding: unused */ int audio_preload; /** * Max chunk time in microseconds. * Note, not all formats support this and unpredictable things may happen if it is used when not supported. * - encoding: Set by user * - decoding: unused */ int max_chunk_duration; /** * Max chunk size in bytes * Note, not all formats support this and unpredictable things may happen if it is used when not supported. * - encoding: Set by user * - decoding: unused */ int max_chunk_size; /** * forces the use of wallclock timestamps as pts/dts of packets * This has undefined results in the presence of B frames. * - encoding: unused * - decoding: Set by user */ int use_wallclock_as_timestamps; /** * avio flags, used to force AVIO_FLAG_DIRECT. * - encoding: unused * - decoding: Set by user */ int avio_flags; /** * The duration field can be estimated through various ways, and this field can be used * to know how the duration was estimated. * - encoding: unused * - decoding: Read by user */ enum AVDurationEstimationMethod duration_estimation_method; /** * Skip initial bytes when opening stream * - encoding: unused * - decoding: Set by user */ int64_t skip_initial_bytes; /** * Correct single timestamp overflows * - encoding: unused * - decoding: Set by user */ unsigned int correct_ts_overflow; /** * Force seeking to any (also non key) frames. * - encoding: unused * - decoding: Set by user */ int seek2any; /** * Flush the I/O context after each packet. * - encoding: Set by user * - decoding: unused */ int flush_packets; /** * format probing score. * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes * the format. * - encoding: unused * - decoding: set by avformat, read by user */ int probe_score; /** * Maximum number of bytes read from input in order to identify the * \ref AVInputFormat "input format". Only used when the format is not set * explicitly by the caller. * * Demuxing only, set by the caller before avformat_open_input(). * * @sa probesize */ int format_probesize; /** * ',' separated list of allowed decoders. * If NULL then all are allowed * - encoding: unused * - decoding: set by user */ char *codec_whitelist; /** * ',' separated list of allowed demuxers. * If NULL then all are allowed * - encoding: unused * - decoding: set by user */ char *format_whitelist; /** * IO repositioned flag. * This is set by avformat when the underlaying IO context read pointer * is repositioned, for example when doing byte based seeking. * Demuxers can use the flag to detect such changes. */ int io_repositioned; /** * Forced video codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. * Demuxing: Set by user */ const AVCodec *video_codec; /** * Forced audio codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. * Demuxing: Set by user */ const AVCodec *audio_codec; /** * Forced subtitle codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. * Demuxing: Set by user */ const AVCodec *subtitle_codec; /** * Forced data codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. * Demuxing: Set by user */ const AVCodec *data_codec; /** * Number of bytes to be written as padding in a metadata header. * Demuxing: Unused. * Muxing: Set by user via av_format_set_metadata_header_padding. */ int metadata_header_padding; /** * User data. * This is a place for some private data of the user. */ void *opaque; /** * Callback used by devices to communicate with application. */ av_format_control_message control_message_cb; /** * Output timestamp offset, in microseconds. * Muxing: set by user */ int64_t output_ts_offset; /** * dump format separator. * can be ", " or "\n " or anything else * - muxing: Set by user. * - demuxing: Set by user. */ uint8_t *dump_separator; /** * Forced Data codec_id. * Demuxing: Set by user. */ enum AVCodecID data_codec_id; /** * ',' separated list of allowed protocols. * - encoding: unused * - decoding: set by user */ char *protocol_whitelist; /** * A callback for opening new IO streams. * * Whenever a muxer or a demuxer needs to open an IO stream (typically from * avformat_open_input() for demuxers, but for certain formats can happen at * other times as well), it will call this callback to obtain an IO context. * * @param s the format context * @param pb on success, the newly opened IO context should be returned here * @param url the url to open * @param flags a combination of AVIO_FLAG_* * @param options a dictionary of additional options, with the same * semantics as in avio_open2() * @return 0 on success, a negative AVERROR code on failure * * @note Certain muxers and demuxers do nesting, i.e. they open one or more * additional internal format contexts. Thus the AVFormatContext pointer * passed to this callback may be different from the one facing the caller. * It will, however, have the same 'opaque' field. */ int (*io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options); /** * A callback for closing the streams opened with AVFormatContext.io_open(). */ void (*io_close)(struct AVFormatContext *s, AVIOContext *pb); /** * ',' separated list of disallowed protocols. * - encoding: unused * - decoding: set by user */ char *protocol_blacklist; /** * The maximum number of streams. * - encoding: unused * - decoding: set by user */ int max_streams; /** * Skip duration calcuation in estimate_timings_from_pts. * - encoding: unused * - decoding: set by user */ int skip_estimate_duration_from_pts; /** * Maximum number of packets that can be probed * - encoding: unused * - decoding: set by user */ int max_probe_packets; /** * A callback for closing the streams opened with AVFormatContext.io_open(). * * Using this is preferred over io_close, because this can return an error. * Therefore this callback is used instead of io_close by the generic * libavformat code if io_close is NULL or the default. * * @param s the format context * @param pb IO context to be closed and freed * @return 0 on success, a negative AVERROR code on failure */ int (*io_close2)(struct AVFormatContext *s, AVIOContext *pb); } AVFormatContext; /** * This function will cause global side data to be injected in the next packet * of each stream as well as after any subsequent seek. */ void av_format_inject_global_side_data(AVFormatContext *s); /** * Returns the method used to set ctx->duration. * * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. */ enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); /** * @defgroup lavf_core Core functions * @ingroup libavf * * Functions for querying libavformat capabilities, allocating core structures, * etc. * @{ */ /** * Return the LIBAVFORMAT_VERSION_INT constant. */ unsigned avformat_version(void); /** * Return the libavformat build-time configuration. */ const char *avformat_configuration(void); /** * Return the libavformat license. */ const char *avformat_license(void); /** * 对网络库进行全局初始化。这是可选的,不再推荐。 * * 此函数仅用于解决旧GnuTLS或OpenSSL库的线程安全问题。 * 如果libavformat链接到这些库的更新版本,或者您不使用它们,则无需调用此函数。否则,您需要在启动任何其他使用它们的线程之前调用此函数。 * * 一旦删除对旧GnuTLS和OpenSSL库的支持,此函数将被弃用,并且此函数不再有任何用途。 */ int avformat_network_init(void); /** * Undo the initialization done by avformat_network_init. Call it only * once for each time you called avformat_network_init. */ int avformat_network_deinit(void); /** * Iterate over all registered muxers. * * @param opaque a pointer where libavformat will store the iteration state. Must * point to NULL to start the iteration. * * @return the next registered muxer or NULL when the iteration is * finished */ const AVOutputFormat *av_muxer_iterate(void **opaque); /** * Iterate over all registered demuxers. * * @param opaque a pointer where libavformat will store the iteration state. Must * point to NULL to start the iteration. * * @return the next registered demuxer or NULL when the iteration is * finished */ const AVInputFormat *av_demuxer_iterate(void **opaque); /** * Allocate an AVFormatContext. * avformat_free_context() can be used to free the context and everything * allocated by the framework within it. */ AVFormatContext *avformat_alloc_context(void); /** * Free an AVFormatContext and all its streams. * @param s context to free */ void avformat_free_context(AVFormatContext *s); /** * Get the AVClass for AVFormatContext. It can be used in combination with * AV_OPT_SEARCH_FAKE_OBJ for examining options. * * @see av_opt_find(). */ const AVClass *avformat_get_class(void); /** * Get the AVClass for AVStream. It can be used in combination with * AV_OPT_SEARCH_FAKE_OBJ for examining options. * * @see av_opt_find(). */ const AVClass *av_stream_get_class(void); /** * Add a new stream to a media file. * * When demuxing, it is called by the demuxer in read_header(). If the * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also * be called in read_packet(). * * When muxing, should be called by the user before avformat_write_header(). * * User is required to call avformat_free_context() to clean up the allocation * by avformat_new_stream(). * * @param s media file handle * @param c unused, does nothing * * @return newly created stream or NULL on error. */ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); /** * Wrap an existing array as stream side data. * * @param st stream * @param type side information type * @param data the side data array. It must be allocated with the av_malloc() * family of functions. The ownership of the data is transferred to * st. * @param size side information size * @return zero on success, a negative AVERROR code on failure. On failure, * the stream is unchanged and the data remains owned by the caller. */ int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, uint8_t *data, size_t size); /** * Allocate new information from stream. * * @param stream stream * @param type desired side information type * @param size side information size * @return pointer to fresh allocated data or NULL otherwise */ uint8_t *av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, size_t size); /** * Get side information from stream. * * @param stream stream * @param type desired side information type * @param size If supplied, *size will be set to the size of the side data * or to zero if the desired side data is not present. * @return pointer to data if present or NULL otherwise */ uint8_t *av_stream_get_side_data(const AVStream *stream, enum AVPacketSideDataType type, size_t *size); AVProgram *av_new_program(AVFormatContext *s, int id); /** * @} */ /** * Allocate an AVFormatContext for an output format. * avformat_free_context() can be used to free the context and * everything allocated by the framework within it. * * @param *ctx is set to the created format context, or to NULL in * case of failure * @param oformat format to use for allocating the context, if NULL * format_name and filename are used instead * @param format_name the name of output format to use for allocating the * context, if NULL filename is used instead * @param filename the name of the filename to use for allocating the * context, may be NULL * @return >= 0 in case of success, a negative AVERROR code in case of * failure */ int avformat_alloc_output_context2(AVFormatContext **ctx, const AVOutputFormat *oformat, const char *format_name, const char *filename); /** * @addtogroup lavf_decoding * @{ */ /** * Find AVInputFormat based on the short name of the input format. */ const AVInputFormat *av_find_input_format(const char *short_name); /** * Guess the file format. * * @param pd data to be probed * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. */ const AVInputFormat *av_probe_input_format(const AVProbeData *pd, int is_opened); /** * Guess the file format. * * @param pd data to be probed * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. * @param score_max A probe score larger that this is required to accept a * detection, the variable is set to the actual detection * score afterwards. * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended * to retry with a larger probe buffer. */ const AVInputFormat *av_probe_input_format2(const AVProbeData *pd, int is_opened, int *score_max); /** * Guess the file format. * * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. * @param score_ret The score of the best detection. */ const AVInputFormat *av_probe_input_format3(const AVProbeData *pd, int is_opened, int *score_ret); /** * Probe a bytestream to determine the input format. Each time a probe returns * with a score that is too low, the probe buffer size is increased and another * attempt is made. When the maximum probe size is reached, the input format * with the highest score is returned. * * @param pb the bytestream to probe * @param fmt the input format is put here * @param url the url of the stream * @param logctx the log context * @param offset the offset within the bytestream to probe from * @param max_probe_size the maximum probe buffer size (zero for default) * @return the score in case of success, a negative value corresponding to an * the maximal score is AVPROBE_SCORE_MAX * AVERROR code otherwise */ int av_probe_input_buffer2(AVIOContext *pb, const AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size); /** * Like av_probe_input_buffer2() but returns 0 on success */ int av_probe_input_buffer(AVIOContext *pb, const AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size); /** * 打开输入流并读取标头。编解码器未打开。 * 必须使用avformat_close_input()关闭流。 * * @param ps 指向用户提供的AVFormatContext(由avformat_alloc_context分配)的指针。 * 可能是一个指向NULL的指针,在这种情况下AVFormatContext被这个函数分配并写入ps * 注意,用户提供的AVFormatContext会在失败时被释放。 * @param url 要打开的流的url。 * @param fmt 如果非null,此参数强制使用特定的输入格式。 * 否则将自动检测格式。 * @param options 一个填充AVFormatContext和demuser-private选项的字典 * 返回时,此参数将被销毁并替换为包含以下内容的词典 * 未找到的选项。可以为NULL。 * * @return 成功时为0,失败时为负的AVERROR. * * @note 如果要使用自定义IO,请预先分配格式上下文并设置其pb字段。 */ int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options); /** * 读取媒体文件的数据包以获取流信息。 这对于没有头文件(如MPEG)的文件格式很有用。 * 该函数还计算MPEG-2重复帧模式下的实际帧率。 * 这个函数不会改变逻辑文件的位置; * 经过检查的数据包可能被缓冲以供以后处理。 * * @param ic 媒体文件句柄 * @param options 如果非null,则是一个指向字典的ic.nb_streams长数组, * 其中第i个成员包含与第i个流对应的编解码器选项。 * 返回时,每个字典将被未找到的选项填充。 * @return >=0 if OK, AVERROR_xxx on error * * @note 这个函数不能保证打开所有的编解码器,因此options在返回时非空是非常正常的行为。 * * @todo 让用户决定需要什么信息,这样我们就不会浪费时间去获取用户不需要的东西。 */ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); /** * Find the programs which belong to a given stream. * * @param ic media file handle * @param last the last found program, the search will start after this * program, or from the beginning if it is NULL * @param s stream index * @return the next program which belongs to s, NULL if no program is found or * the last program is not among the programs of ic. */ AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); /** * 在文件中查找“最佳”流。 * 最佳流是根据各种启发式来确定的,因为最可能是用户所期望的。 * 如果解码器参数为非空,av_find_best_stream将为流的编解码器找到默认解码器;找不到解码器的流将被忽略。 * * @param ic 媒体文件句柄 * @param type 流类型:视频、音频、字幕等。 * @param wanted_stream_nb 用户请求的流编号,或-1自动选择 * @param related_stream 尝试查找与此流相关的流(例如在同一程序中),如果没有,则为-1 * @param decoder_ret 如果非NULL,则返回所选流的解码器 * @param flags 当前没有定义 * @return 成功时的非负流编号, * AVERROR_STREAM_NOT_FOUND如果找不到具有请求类型的流, * 如AVERROR_DECODER_NOT_FOUND如果找到流但没有解码器 * @note 如果av_find_best_stream成功返回且decoder_ret不为NULL,则*decoder_ret保证被设置为有效的AVCodec。 */ int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags); /** * 返回流的下一帧。 * 此函数返回存储在文件中的内容,但不验证解码器是否有有效帧。 * 它会将文件中存储的内容拆分为帧,并为每个调用返回一个帧。 * 它不会在有效帧之间省略无效数据,从而为解码器提供解码所需的最大信息。 * * 如果成功,返回的数据包将被引用计数(pkt->buf被设置),并且无限期有效。 * 当不再需要包时,必须使用av_packet_unref()释放包。 * 对于视频,数据包只包含一个帧。对于音频,如果每个帧都有一个已知的固定大小(例如PCM或ADPCM数据),则它包含一个整数帧数。 * 如果音频帧的大小是可变的(例如MPEG音频),那么它就包含一个帧。 * * pkt->pts, pkt->dts和pkt->duration在AVStream中总是被设置为正确的值。 * Time_base单元(并猜测格式是否不能提供它们)。 * 如果视频格式有b帧,pkt->pts可以是AV_NOPTS_VALUE, * 所以如果不解压有效负载,最好依赖pkt->dts。 * * @return 如果OK则为0,如果错误或文件结束则< 0。出错时,pkt将为空(就像它来自av_packet_alloc()一样)。 * * @note pkt将被初始化,因此它可能未初始化,但不能包含需要释放的数据。 */ int av_read_frame(AVFormatContext *s, AVPacket *pkt); /** * Seek to the keyframe at timestamp. * 'timestamp' in 'stream_index'. * * @param s media file handle * @param stream_index If stream_index is (-1), a default * stream is selected, and timestamp is automatically converted * from AV_TIME_BASE units to the stream specific time_base. * @param timestamp Timestamp in AVStream.time_base units * or, if no stream is specified, in AV_TIME_BASE units. * @param flags flags which select direction and seeking mode * @return >= 0 on success */ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags); /** * Seek to timestamp ts. * Seeking will be done so that the point from which all active streams * can be presented successfully will be closest to ts and within min/max_ts. * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. * * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and * are the file position (this may not be supported by all demuxers). * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames * in the stream with stream_index (this may not be supported by all demuxers). * Otherwise all timestamps are in units of the stream selected by stream_index * or if stream_index is -1, in AV_TIME_BASE units. * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as * keyframes (this may not be supported by all demuxers). * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. * * @param s media file handle * @param stream_index index of the stream which is used as time base reference * @param min_ts smallest acceptable timestamp * @param ts target timestamp * @param max_ts largest acceptable timestamp * @param flags flags * @return >=0 on success, error code otherwise * * @note This is part of the new seek API which is still under construction. */ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); /** * 丢弃所有内部缓冲的数据。 * 这在处理字节流中的不连续时很有用。 * 通常只适用于可以重新同步的格式。 * 这包括无头格式,如MPEG-TS/TS,但也应该与NUT, Ogg和在有限的方式AVI,例如 * * 调用此函数时,流的集合、检测到的持续时间、流参数和编解码器不会改变。 * 如果你想要完全重置,最好打开一个新的AVFormatContext。 * * 这不会刷新AVIOContext (s->pb)。如果有必要,在调用此函数之前调用avio_flush(s->pb)。 * * @param s 媒体文件句柄 * @return >成功时为0,否则为错误代码 */ int avformat_flush(AVFormatContext *s); /** * Start playing a network-based stream (e.g. RTSP stream) at the * current position. */ int av_read_play(AVFormatContext *s); /** * Pause a network-based stream (e.g. RTSP stream). * * Use av_read_play() to resume it. */ int av_read_pause(AVFormatContext *s); /** * 关闭打开的输入AVFormatContext。释放它及其所有内容,并将*s设置为NULL。 */ void avformat_close_input(AVFormatContext **s); /** * @} */ #define AVSEEK_FLAG_BACKWARD 1 ///< seek backward #define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes #define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes #define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number /** * @addtogroup lavf_encoding * @{ */ #define AVSTREAM_INIT_IN_WRITE_HEADER 0 ///< stream parameters initialized in avformat_write_header #define AVSTREAM_INIT_IN_INIT_OUTPUT 1 ///< stream parameters initialized in avformat_init_output /** * Allocate the stream private data and write the stream header to * an output media file. * * @param s Media file handle, must be allocated with avformat_alloc_context(). * Its oformat field must be set to the desired output format; * Its pb field must be set to an already opened AVIOContext. * @param options An AVDictionary filled with AVFormatContext and muxer-private options. * On return this parameter will be destroyed and replaced with a dict containing * options that were not found. May be NULL. * * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec had not already been fully initialized in avformat_init, * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec had already been fully initialized in avformat_init, * negative AVERROR on failure. * * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_init_output. */ av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options); /** * Allocate the stream private data and initialize the codec, but do not write the header. * May optionally be used before avformat_write_header to initialize stream parameters * before actually writing the header. * If using this function, do not pass the same options to avformat_write_header. * * @param s Media file handle, must be allocated with avformat_alloc_context(). * Its oformat field must be set to the desired output format; * Its pb field must be set to an already opened AVIOContext. * @param options An AVDictionary filled with AVFormatContext and muxer-private options. * On return this parameter will be destroyed and replaced with a dict containing * options that were not found. May be NULL. * * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec requires avformat_write_header to fully initialize, * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec has been fully initialized, * negative AVERROR on failure. * * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_write_header. */ av_warn_unused_result int avformat_init_output(AVFormatContext *s, AVDictionary **options); /** * Write a packet to an output media file. * * This function passes the packet directly to the muxer, without any buffering * or reordering. The caller is responsible for correctly interleaving the * packets if the format requires it. Callers that want libavformat to handle * the interleaving should call av_interleaved_write_frame() instead of this * function. * * @param s media file handle * @param pkt The packet containing the data to be written. Note that unlike * av_interleaved_write_frame(), this function does not take * ownership of the packet passed to it (though some muxers may make * an internal reference to the input packet). *
* This parameter can be NULL (at any time, not just at the end), in * order to immediately flush data buffered within the muxer, for * muxers that buffer up data internally before writing it to the * output. *
* Packet's @ref AVPacket.stream_index "stream_index" field must be * set to the index of the corresponding stream in @ref * AVFormatContext.streams "s->streams". *
* The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") * must be set to correct values in the stream's timebase (unless the * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then * they can be set to AV_NOPTS_VALUE). * The dts for subsequent packets passed to this function must be strictly * increasing when compared in their respective timebases (unless the * output format is flagged with the AVFMT_TS_NONSTRICT, then they * merely have to be nondecreasing). @ref AVPacket.duration * "duration") should also be set if known. * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush * * @see av_interleaved_write_frame() */ int av_write_frame(AVFormatContext *s, AVPacket *pkt); /** * Write a packet to an output media file ensuring correct interleaving. * * This function will buffer the packets internally as needed to make sure the * packets in the output file are properly interleaved, usually ordered by * increasing dts. Callers doing their own interleaving should call * av_write_frame() instead of this function. * * Using this function instead of av_write_frame() can give muxers advance * knowledge of future packets, improving e.g. the behaviour of the mp4 * muxer for VFR content in fragmenting mode. * * @param s media file handle * @param pkt The packet containing the data to be written. *
* If the packet is reference-counted, this function will take * ownership of this reference and unreference it later when it sees * fit. If the packet is not reference-counted, libavformat will * make a copy. * The returned packet will be blank (as if returned from * av_packet_alloc()), even on error. *
* This parameter can be NULL (at any time, not just at the end), to * flush the interleaving queues. *
* Packet's @ref AVPacket.stream_index "stream_index" field must be * set to the index of the corresponding stream in @ref * AVFormatContext.streams "s->streams". *
* The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") * must be set to correct values in the stream's timebase (unless the * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then * they can be set to AV_NOPTS_VALUE). * The dts for subsequent packets in one stream must be strictly * increasing (unless the output format is flagged with the * AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing). * @ref AVPacket.duration "duration" should also be set if known. * * @return 0 on success, a negative AVERROR on error. * * @see av_write_frame(), AVFormatContext.max_interleave_delta */ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); /** * Write an uncoded frame to an output media file. * * The frame must be correctly interleaved according to the container * specification; if not, av_interleaved_write_uncoded_frame() must be used. * * See av_interleaved_write_uncoded_frame() for details. */ int av_write_uncoded_frame(AVFormatContext *s, int stream_index, AVFrame *frame); /** * Write an uncoded frame to an output media file. * * If the muxer supports it, this function makes it possible to write an AVFrame * structure directly, without encoding it into a packet. * It is mostly useful for devices and similar special muxers that use raw * video or PCM data and will not serialize it into a byte stream. * * To test whether it is possible to use it with a given muxer and stream, * use av_write_uncoded_frame_query(). * * The caller gives up ownership of the frame and must not access it * afterwards. * * @return >=0 for success, a negative code on error */ int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, AVFrame *frame); /** * Test whether a muxer supports uncoded frame. * * @return >=0 if an uncoded frame can be written to that muxer and stream, * <0 if not */ int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); /** * Write the stream trailer to an output media file and free the * file private data. * * May only be called after a successful call to avformat_write_header. * * @param s media file handle * @return 0 if OK, AVERROR_xxx on error */ int av_write_trailer(AVFormatContext *s); /** * Return the output format in the list of registered output formats * which best matches the provided parameters, or return NULL if * there is no match. * * @param short_name if non-NULL checks if short_name matches with the * names of the registered formats * @param filename if non-NULL checks if filename terminates with the * extensions of the registered formats * @param mime_type if non-NULL checks if mime_type matches with the * MIME type of the registered formats */ const AVOutputFormat *av_guess_format(const char *short_name, const char *filename, const char *mime_type); /** * Guess the codec ID based upon muxer and filename. */ enum AVCodecID av_guess_codec(const AVOutputFormat *fmt, const char *short_name, const char *filename, const char *mime_type, enum AVMediaType type); /** * Get timing information for the data currently output. * The exact meaning of "currently output" depends on the format. * It is mostly relevant for devices that have an internal buffer and/or * work in real time. * @param s media file handle * @param stream stream in the media file * @param[out] dts DTS of the last packet output for the stream, in stream * time_base units * @param[out] wall absolute time when that packet whas output, * in microsecond * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it * Note: some formats or devices may not allow to measure dts and wall * atomically. */ int av_get_output_timestamp(struct AVFormatContext *s, int stream, int64_t *dts, int64_t *wall); /** * @} */ /** * @defgroup lavf_misc Utility functions * @ingroup libavf * @{ * * Miscellaneous utility functions related to both muxing and demuxing * (or neither). */ /** * Send a nice hexadecimal dump of a buffer to the specified file stream. * * @param f The file stream pointer where the dump should be sent to. * @param buf buffer * @param size buffer size * * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 */ void av_hex_dump(FILE *f, const uint8_t *buf, int size); /** * Send a nice hexadecimal dump of a buffer to the log. * * @param avcl A pointer to an arbitrary struct of which the first field is a * pointer to an AVClass struct. * @param level The importance level of the message, lower values signifying * higher importance. * @param buf buffer * @param size buffer size * * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 */ void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); /** * Send a nice dump of a packet to the specified file stream. * * @param f The file stream pointer where the dump should be sent to. * @param pkt packet to dump * @param dump_payload True if the payload must be displayed, too. * @param st AVStream that the packet belongs to */ void av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st); /** * Send a nice dump of a packet to the log. * * @param avcl A pointer to an arbitrary struct of which the first field is a * pointer to an AVClass struct. * @param level The importance level of the message, lower values signifying * higher importance. * @param pkt packet to dump * @param dump_payload True if the payload must be displayed, too. * @param st AVStream that the packet belongs to */ void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st); /** * Get the AVCodecID for the given codec tag tag. * If no codec id is found returns AV_CODEC_ID_NONE. * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag * @param tag codec tag to match to a codec ID */ enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); /** * Get the codec tag for the given codec id id. * If no codec tag is found returns 0. * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag * @param id codec ID to match to a codec tag */ unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); /** * Get the codec tag for the given codec id. * * @param tags list of supported codec_id - codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag * @param id codec id that should be searched for in the list * @param tag A pointer to the found tag * @return 0 if id was not found in tags, > 0 if it was found */ int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, unsigned int *tag); int av_find_default_stream_index(AVFormatContext *s); /** * Get the index for a specific timestamp. * * @param st stream that the timestamp belongs to * @param timestamp timestamp to retrieve the index for * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond * to the timestamp which is <= the requested one, if backward * is 0, then it will be >= * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise * @return < 0 if no such timestamp could be found */ int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); /** * Get the index entry count for the given AVStream. * * @param st stream * @return the number of index entries in the stream */ int avformat_index_get_entries_count(const AVStream *st); /** * Get the AVIndexEntry corresponding to the given index. * * @param st Stream containing the requested AVIndexEntry. * @param idx The desired index. * @return A pointer to the requested AVIndexEntry if it exists, NULL otherwise. * * @note The pointer returned by this function is only guaranteed to be valid * until any function that takes the stream or the parent AVFormatContext * as input argument is called. */ const AVIndexEntry *avformat_index_get_entry(AVStream *st, int idx); /** * Get the AVIndexEntry corresponding to the given timestamp. * * @param st Stream containing the requested AVIndexEntry. * @param timestamp Timestamp to retrieve the index entry for. * @param flags If AVSEEK_FLAG_BACKWARD then the returned entry will correspond * to the timestamp which is <= the requested one, if backward * is 0, then it will be >= * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise. * @return A pointer to the requested AVIndexEntry if it exists, NULL otherwise. * * @note The pointer returned by this function is only guaranteed to be valid * until any function that takes the stream or the parent AVFormatContext * as input argument is called. */ const AVIndexEntry *avformat_index_get_entry_from_timestamp(AVStream *st, int64_t wanted_timestamp, int flags); /** * Add an index entry into a sorted list. Update the entry if the list * already contains it. * * @param timestamp timestamp in the time base of the given stream */ int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags); /** * Split a URL string into components. * * The pointers to buffers for storing individual components may be null, * in order to ignore that component. Buffers for components not found are * set to empty strings. If the port is not found, it is set to a negative * value. * * @param proto the buffer for the protocol * @param proto_size the size of the proto buffer * @param authorization the buffer for the authorization * @param authorization_size the size of the authorization buffer * @param hostname the buffer for the host name * @param hostname_size the size of the hostname buffer * @param port_ptr a pointer to store the port number in * @param path the buffer for the path * @param path_size the size of the path buffer * @param url the URL to split */ void av_url_split(char *proto, int proto_size, char *authorization, int authorization_size, char *hostname, int hostname_size, int *port_ptr, char *path, int path_size, const char *url); /** * 打印关于输入或输出格式的详细信息,例如持续时间、比特率、流、容器、程序、元数据、侧数据、编解码器和时间基数。 * * @param ic the context to analyze * @param index 要转储信息的流的索引 * @param url 要打印的URL,例如源文件或目标文件 * @param is_output 选择指定的上下文是输入(0)还是输出(1) */ void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output); #define AV_FRAME_FILENAME_FLAGS_MULTIPLE 1 ///< Allow multiple %d /** * Return in 'buf' the path with '%d' replaced by a number. * * Also handles the '%0nd' format where 'n' is the total number * of digits and '%%'. * * @param buf destination buffer * @param buf_size destination buffer size * @param path numbered sequence string * @param number frame number * @param flags AV_FRAME_FILENAME_FLAGS_* * @return 0 if OK, -1 on format error */ int av_get_frame_filename2(char *buf, int buf_size, const char *path, int number, int flags); int av_get_frame_filename(char *buf, int buf_size, const char *path, int number); /** * Check whether filename actually is a numbered sequence generator. * * @param filename possible numbered sequence string * @return 1 if a valid numbered sequence string, 0 otherwise */ int av_filename_number_test(const char *filename); /** * Generate an SDP for an RTP session. * * Note, this overwrites the id values of AVStreams in the muxer contexts * for getting unique dynamic payload types. * * @param ac array of AVFormatContexts describing the RTP streams. If the * array is composed by only one context, such context can contain * multiple AVStreams (one AVStream per RTP stream). Otherwise, * all the contexts in the array (an AVCodecContext per RTP stream) * must contain only one AVStream. * @param n_files number of AVCodecContexts contained in ac * @param buf buffer where the SDP will be stored (must be allocated by * the caller) * @param size the size of the buffer * @return 0 if OK, AVERROR_xxx on error */ int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); /** * Return a positive value if the given filename has one of the given * extensions, 0 otherwise. * * @param filename file name to check against the given extensions * @param extensions a comma-separated list of filename extensions */ int av_match_ext(const char *filename, const char *extensions); /** * Test if the given container can store a codec. * * @param ofmt container to check for compatibility * @param codec_id codec to potentially store in container * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* * * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. * A negative number if this information is not available. */ int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance); /** * @defgroup riff_fourcc RIFF FourCCs * @{ * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the * following code: * @code * uint32_t tag = MKTAG('H', '2', '6', '4'); * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; * enum AVCodecID id = av_codec_get_id(table, tag); * @endcode */ /** * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. */ const struct AVCodecTag *avformat_get_riff_video_tags(void); /** * @return the table mapping RIFF FourCCs for audio to AVCodecID. */ const struct AVCodecTag *avformat_get_riff_audio_tags(void); /** * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. */ const struct AVCodecTag *avformat_get_mov_video_tags(void); /** * @return the table mapping MOV FourCCs for audio to AVCodecID. */ const struct AVCodecTag *avformat_get_mov_audio_tags(void); /** * @} */ /** * Guess the sample aspect ratio of a frame, based on both the stream and the * frame aspect ratio. * * Since the frame aspect ratio is set by the codec but the stream aspect ratio * is set by the demuxer, these two may not be equal. This function tries to * return the value that you should use if you would like to display the frame. * * Basic logic is to use the stream aspect ratio if it is set to something sane * otherwise use the frame aspect ratio. This way a container setting, which is * usually easy to modify can override the coded value in the frames. * * @param format the format context which the stream is part of * @param stream the stream which the frame is part of * @param frame the frame with the aspect ratio to be determined * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea */ AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); /** * Guess the frame rate, based on both the container and codec information. * * @param ctx the format context which the stream is part of * @param stream the stream which the frame is part of * @param frame the frame for which the frame rate should be determined, may be NULL * @return the guessed (valid) frame rate, 0/1 if no idea */ AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); /** * Check if the stream st contained in s is matched by the stream specifier * spec. * * See the "stream specifiers" chapter in the documentation for the syntax * of spec. * * @return >0 if st is matched by spec; * 0 if st is not matched by spec; * AVERROR code if spec is invalid * * @note A stream specifier can match several streams in the format. */ int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec); int avformat_queue_attached_pictures(AVFormatContext *s); enum AVTimebaseSource { AVFMT_TBCF_AUTO = -1, AVFMT_TBCF_DECODER, AVFMT_TBCF_DEMUXER, #if FF_API_R_FRAME_RATE AVFMT_TBCF_R_FRAMERATE, #endif }; /** * Transfer internal timing information from one stream to another. * * This function is useful when doing stream copy. * * @param ofmt target output format for ost * @param ost output stream which needs timings copy and adjustments * @param ist reference input stream to copy timings from * @param copy_tb define from where the stream codec timebase needs to be imported */ int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb); /** * Get the internal codec timebase from a stream. * * @param st input stream to extract the timebase from */ AVRational av_stream_get_codec_timebase(const AVStream *st); /** * @} */ #endif /* AVFORMAT_AVFORMAT_H */