Go to the documentation of this file.
46 #include <stdatomic.h>
131 *
data = matrix_encoding;
145 w_align = 1 <<
desc->log2_chroma_w;
146 h_align = 1 <<
desc->log2_chroma_h;
149 switch (
s->pix_fmt) {
313 w_align =
FFMAX(w_align, 8);
333 for (
i = 0;
i < 4;
i++)
340 int chroma_shift =
desc->log2_chroma_w;
345 align =
FFMAX(linesize_align[0], linesize_align[3]);
346 linesize_align[1] <<= chroma_shift;
347 linesize_align[2] <<= chroma_shift;
348 align =
FFMAX3(align, linesize_align[1], linesize_align[2]);
358 *xpos = (
pos&1) * 128;
359 *ypos = ((
pos>>1)^(
pos<4)) * 128;
377 int buf_size,
int align)
382 frame->nb_samples, sample_fmt,
384 if (buf_size < needed_size)
397 sample_fmt, align)) < 0) {
417 for (p = 0; p<
desc->nb_components; p++) {
418 uint8_t *dst =
frame->data[p];
419 int is_chroma = p == 1 || p == 2;
422 if (
desc->comp[0].depth >= 9) {
423 ((uint16_t*)dst)[0] =
c[p];
425 dst +=
frame->linesize[p];
426 for (y = 1; y <
height; y++) {
427 memcpy(dst,
frame->data[p], 2*bytes);
428 dst +=
frame->linesize[p];
431 for (y = 0; y <
height; y++) {
432 memset(dst,
c[p], bytes);
433 dst +=
frame->linesize[p];
460 return "unknown_codec";
575 if (be < 0 || be > 1)
599 uint32_t
tag,
int bits_per_coded_sample, int64_t
bitrate,
600 uint8_t * extradata,
int frame_size,
int frame_bytes)
603 int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1;
606 if (
bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 &&
bps < 32768)
607 return (frame_bytes * 8LL) / (
bps * ch);
608 bps = bits_per_coded_sample;
626 if (framecount > INT_MAX/1024)
628 return 1024 * framecount;
638 return 256 * sr / 245;
640 return 588 * sr / 44100;
644 return (480 << (sr / 22050));
648 return sr <= 24000 ? 576 : 1152;
668 if (frame_bytes > 0) {
671 return 240 * (frame_bytes / 32);
673 return 256 * (frame_bytes / 64);
675 return 160 * (frame_bytes / 20);
680 return frame_bytes * 8 /
bps;
683 if (ch > 0 && ch < INT_MAX/16) {
687 return frame_bytes / (40 * ch) * 256;
689 return (frame_bytes - 4 * ch) / (128 * ch) * 256;
691 return frame_bytes / (9 * ch) * 16;
694 frame_bytes /= 16 * ch;
695 if (frame_bytes > INT_MAX / 28)
697 return frame_bytes * 28;
702 return (frame_bytes - 4 * ch) * 2 / ch;
704 return (frame_bytes - 4) * 2 / ch;
706 return (frame_bytes - 8) * 2;
710 return frame_bytes * 14LL / (8 * ch);
713 return (frame_bytes / 128) * 224 / ch;
715 return (frame_bytes - 6 - ch) / ch;
717 return (frame_bytes - 8) / ch;
719 return (frame_bytes - 2 * ch) / ch;
721 return 3 * frame_bytes / ch;
723 return 6 * frame_bytes / ch;
725 return 2 * (frame_bytes / (5 * ch));
728 return 4 * frame_bytes / ch;
735 return frame_bytes / ch;
737 return frame_bytes * 2 / ch;
743 int blocks = frame_bytes / ba;
747 if (bps < 2 || bps > 5)
749 tmp = blocks * (1LL + (ba - 4 * ch) / (
bps * ch) * 8);
752 tmp = blocks * (((ba - 16LL) * 2 / 3 * 4) / ch);
755 tmp = blocks * (1 + (ba - 4LL * ch) * 2 / ch);
758 tmp = blocks * ((ba - 4LL * ch) * 2 / ch);
761 tmp = blocks * (2 + (ba - 7LL * ch) * 2LL / ch);
764 tmp = blocks * (ba - 16LL) * 2 / ch;
778 if(
bps<4 || frame_bytes<3)
780 return 2 * ((frame_bytes - 3) / ((
bps * 2 / 8) * ch));
782 if(
bps<4 || frame_bytes<4)
784 return (frame_bytes - 4) / ((
FFALIGN(ch, 2) *
bps) / 8);
786 return 2 * (frame_bytes / ((
bps + 4) / 4)) / ch;
798 if (
bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) {
800 return (frame_bytes * 8LL * sr) /
bitrate;
897 f->owner[0] =
f->owner[1] = avctx;
945 uint32_t *av_restrict
state)
953 for (
i = 0;
i < 3;
i++) {
956 if (
tmp == 0x100 || p == end)
961 if (p[-1] > 1 ) p += 3;
962 else if (p[-2] ) p += 2;
963 else if (p[-3]|(p[-1]-1)) p++;
970 p =
FFMIN(p, end) - 4;
983 *
size =
sizeof(*props);
1023 unsigned low = bcd & 0xf;
1024 unsigned high = bcd >> 4;
1025 if (low > 9 || high > 9)
1027 return low + 10*high;
1031 void **
data,
size_t *sei_size)
1046 tc = (uint32_t*)sd->
data;
1049 *sei_size =
sizeof(uint32_t) * 4;
1053 sei_data = (uint8_t*)*
data + prefix_len;
1058 for (
int j = 1; j <= m; j++) {
1059 uint32_t tcsmpte =
tc[j];
1060 unsigned hh =
bcd2uint(tcsmpte & 0x3f);
1061 unsigned mm =
bcd2uint(tcsmpte>>8 & 0x7f);
1063 unsigned ff =
bcd2uint(tcsmpte>>24 & 0x3f);
1064 unsigned drop = tcsmpte & 1<<30 && !0;
1071 pc = !!(tcsmpte & 1 << 7);
1073 pc = !!(tcsmpte & 1 << 23);
1074 ff = (ff + pc) & 0x7f;
1105 if (!bits_per_coded_sample) {
1116 const int * array_valid_values,
int default_value)
1121 ref_val = array_valid_values[
i];
1122 if (ref_val == INT_MAX)
1130 "%s %d are not supported. Set to default value : %d\n", val_name,
val, default_value);
1131 return default_value;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
int frame_size
Number of samples per channel in an audio frame.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it be(in the first position) for now. Options ------- Then comes the options array. This is what will define the user accessible options. For example
@ AV_SAMPLE_FMT_FLTP
float, planar
@ AV_PIX_FMT_YUV420P9LE
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
#define AV_LOG_WARNING
Something somehow does not look correct.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
AVPixelFormat
Pixel format.
@ AV_CODEC_ID_ADPCM_IMA_QT
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
int sample_rate
samples per second
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
This struct describes the properties of an encoded stream.
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
const char * name
short name for the profile
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_YUV422P14LE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
const char * name
Name of the codec described by this descriptor.
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
int ff_thread_can_start_frame(AVCodecContext *avctx)
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
@ AV_CODEC_ID_PCM_S32LE_PLANAR
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
@ AV_PIX_FMT_YUVA444P10BE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV440P12BE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_CODEC_ID_PCM_S16BE_PLANAR
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
@ AV_PIX_FMT_YUV420P14BE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
@ AV_PIX_FMT_YUVA444P9BE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
@ AV_PIX_FMT_YUV422P9BE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
const char * avcodec_profile_name(enum AVCodecID codec_id, int profile)
Return a name for the specified profile, if available.
@ AV_CODEC_ID_PCM_S16LE_PLANAR
@ AV_CODEC_ID_ADPCM_THP_LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
@ AV_CODEC_ID_DSD_MSBF_PLANAR
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
Converts AVChromaLocation to swscale x/y chroma position.
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
int(* encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size, const struct AVSubtitle *sub)
const uint8_t * avpriv_find_start_code(const uint8_t *av_restrict p, const uint8_t *end, uint32_t *av_restrict state)
static const struct twinvq_data tab
AVCodecContext * owner[2]
static double val(void *priv, double ch)
@ AV_SAMPLE_FMT_S64P
signed 64 bits, planar
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
#define ss(width, name, subs,...)
@ AV_PIX_FMT_YUVA444P16BE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P10BE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
@ AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_PIX_FMT_YUV422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV444P14LE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define FF_ARRAY_ELEMS(a)
int frame_size
Audio only.
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
@ AV_CODEC_ID_ADPCM_IMA_ACORN
This struct describes the properties of a single codec described by an AVCodecID.
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
@ AV_CODEC_ID_ADPCM_IMA_APC
@ AV_PIX_FMT_YUVA420P16BE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding)
Add or update AV_FRAME_DATA_MATRIXENCODING side data.
@ AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_BINKAUDIO_DCT
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align)
Fill AVFrame audio data and linesize pointers.
int ff_alloc_entries(AVCodecContext *avctx, int count)
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
@ AV_PIX_FMT_YUV444P9BE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10BE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_EA_XAS
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
@ AV_CODEC_ID_INTERPLAY_VIDEO
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Rational number (pair of numerator and denominator).
@ AV_CODEC_ID_INTERPLAY_DPCM
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ AV_CODEC_ID_ADPCM_IMA_DK4
int64_t bit_rate
the average bitrate
enum AVPacketSideDataType type
@ AV_PIX_FMT_YUVA422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
@ AV_PIX_FMT_YUVA444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
const AVProfile * profiles
array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUVA444P9LE
planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_PIX_FMT_YUVA420P16LE
planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_YUVA420P9LE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
void ff_color_frame(AVFrame *frame, const int c[4])
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
@ AV_PIX_FMT_YUV420P14LE
planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
@ AV_CODEC_ID_PCM_S24LE_PLANAR
@ AV_CODEC_ID_GSM
as in Berlin toast format
AVCodecID
Identify the syntax and semantics of the bitstream.
@ AV_PIX_FMT_YUV444P14BE
planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
@ AV_PIX_FMT_YUV420P9BE
The following 12 formats have the disadvantage of needing 1 format for each bit depth.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int av_codec_is_decoder(const AVCodec *codec)
int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
@ AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_PIX_FMT_YUV420P12BE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_PIX_FMT_YUV422P14BE
planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec)
int(* encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt, const struct AVFrame *frame, int *got_packet_ptr)
Encode data to an AVPacket.
#define AV_NUM_DATA_POINTERS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
int(* receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt)
Encode API with decoupled frame/packet dataflow.
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
@ AVCHROMA_LOC_UNSPECIFIED
@ AV_PIX_FMT_YUV420P10BE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
void ff_reset_entries(AVCodecContext *avctx)
int ff_int_from_list_or_default(void *ctx, const char *val_name, int val, const int *array_valid_values, int default_value)
Check if a value is in the list.
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
void ff_thread_await_progress(ThreadFrame *f, int progress, int field)
Wait for earlier decoding threads to finish reference pictures.
int channels
number of audio channels
AVChromaLocation
Location of chroma samples.
@ AV_PIX_FMT_YUVA422P10BE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
int av_codec_is_encoder(const AVCodec *codec)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
@ AV_PIX_FMT_YUVA444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
@ AV_PIX_FMT_YUVA422P9BE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define i(width, name, range_min, range_max)
const char * av_get_profile_name(const AVCodec *codec, int profile)
Return a name for the specified profile, if available.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
int block_align
Audio only.
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
@ AV_CODEC_ID_DSD_LSBF_PLANAR
AVSampleFormat
Audio sample formats.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define AV_PIX_FMT_RGB555
unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
Encode extradata length to a buffer.
@ AV_CODEC_ID_ADPCM_IMA_APM
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
@ AV_CODEC_ID_ADPCM_IMA_DAT4
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
@ AV_PIX_FMT_YUV444P16BE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
Get an estimated video bitrate based on frame size, frame rate and coded bits per pixel.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
int caps_internal
Internal codec capabilities.
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, uint32_t tag, int bits_per_coded_sample, int64_t bitrate, uint8_t *extradata, int frame_size, int frame_bytes)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
@ AV_PIX_FMT_YUV444P12BE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
main external API structure.
@ AV_CODEC_ID_ADPCM_G726LE
@ AV_PIX_FMT_YUV444P9LE
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
@ AV_SAMPLE_FMT_DBLP
double, planar
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
Converts swscale x/y chroma position to AVChromaLocation.
@ AV_PIX_FMT_YUVA420P10BE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
@ AV_PIX_FMT_YUV420P16BE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
static int shift(int a, int b)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
Return the PCM codec associated with a sample format.
@ AV_PIX_FMT_YUV422P16BE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
int bits_per_coded_sample
The number of bits per sample in the codedwords.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static unsigned bcd2uint(uint8_t bcd)
@ AV_CODEC_ID_ADPCM_SBPRO_4
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Structure to hold side data for an AVFrame.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
@ AV_CODEC_ID_PCM_S24DAUD
@ AV_CODEC_ID_ADPCM_IMA_SSI
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
const VDPAUPixFmtMap * map
int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
@ AV_CODEC_ID_ADPCM_IMA_WAV
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
This function is the same as av_get_audio_frame_duration(), except it works with AVCodecParameters in...
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV440P10BE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@ AV_PIX_FMT_YUVA422P16BE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
@ AV_PIX_FMT_YUV422P9LE
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P16LE
planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
@ AV_CODEC_ID_PCM_S8_PLANAR
int width
picture width / height.
#define flags(name, subs,...)
@ AVCHROMA_LOC_NB
Not part of ABI.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
int(* decode)(struct AVCodecContext *avctx, void *outdata, int *got_frame_ptr, struct AVPacket *avpkt)
Decode picture or subtitle data.
@ AV_SAMPLE_FMT_DBL
double
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
@ AV_PIX_FMT_YUVA422P12BE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
Notify later decoding threads when part of their reference picture is ready.
@ AV_SAMPLE_FMT_S32
signed 32 bits
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
int ff_thread_init(AVCodecContext *s)
@ AV_PIX_FMT_YUVA420P9BE
planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@ AV_PIX_FMT_UYYVYY411
packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
@ AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_PIX_FMT_YUVA422P9LE
planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian