Go to the documentation of this file.
23 #include <AudioToolbox/AudioToolbox.h>
25 #define FF_BUFQUEUE_SIZE 256
64 return kAudioFormatMPEG4AAC;
66 return kAudioFormatMPEG4AAC_HE;
68 return kAudioFormatMPEG4AAC_HE_V2;
70 return kAudioFormatMPEG4AAC_LD;
72 return kAudioFormatMPEG4AAC_ELD;
75 return kAudioFormatAppleIMA4;
77 return kAudioFormatAppleLossless;
79 return kAudioFormatiLBC;
81 return kAudioFormatALaw;
83 return kAudioFormatULaw;
93 UInt32
size =
sizeof(unsigned);
94 AudioConverterPrimeInfo prime_info;
95 AudioStreamBasicDescription out_format;
98 kAudioConverterPropertyMaximumOutputPacketSize,
104 size =
sizeof(prime_info);
106 if (!AudioConverterGetProperty(at->
converter,
107 kAudioConverterPrimeInfo,
108 &
size, &prime_info)) {
112 size =
sizeof(out_format);
113 if (!AudioConverterGetProperty(at->
converter,
114 kAudioConverterCurrentOutputStreamDescription,
115 &
size, &out_format)) {
116 if (out_format.mFramesPerPacket)
117 avctx->
frame_size = out_format.mFramesPerPacket;
134 *
tag = bytestream2_get_byte(gb);
136 int c = bytestream2_get_byte(gb);
151 return avctx->
bit_rate <= 14000 ? 30 : 20;
176 return kAudioChannelLabel_LFE2;
185 layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
186 layout->mNumberChannelDescriptions = count;
187 for (
i = 0;
i < count;
i++) {
189 while (!(in_layout & (1 <<
c)) &&
c < 64)
194 layout->mChannelDescriptions[
i].mChannelLabel = label;
206 return kAudioChannelLayoutTag_Mono;
208 return kAudioChannelLayoutTag_Stereo;
210 return kAudioChannelLayoutTag_AAC_Quadraphonic;
212 return kAudioChannelLayoutTag_AAC_Octagonal;
214 return kAudioChannelLayoutTag_AAC_3_0;
216 return kAudioChannelLayoutTag_AAC_4_0;
218 return kAudioChannelLayoutTag_AAC_5_0;
220 return kAudioChannelLayoutTag_AAC_5_1;
222 return kAudioChannelLayoutTag_AAC_6_0;
224 return kAudioChannelLayoutTag_AAC_6_1;
226 return kAudioChannelLayoutTag_AAC_7_0;
228 return kAudioChannelLayoutTag_AAC_7_1;
230 return kAudioChannelLayoutTag_MPEG_7_1_C;
241 AudioStreamBasicDescription in_format = {
243 .mFormatID = kAudioFormatLinearPCM,
247 : kAudioFormatFlagIsSignedInteger)
248 | kAudioFormatFlagIsPacked,
250 .mFramesPerPacket = 1,
252 .mChannelsPerFrame = avctx->
channels,
255 AudioStreamBasicDescription out_format = {
258 .mChannelsPerFrame = in_format.mChannelsPerFrame,
260 UInt32 layout_size =
sizeof(AudioChannelLayout) +
261 sizeof(AudioChannelDescription) * avctx->
channels;
262 AudioChannelLayout *channel_layout =
av_malloc(layout_size);
269 out_format.mFramesPerPacket = 8000 *
mode / 1000;
270 out_format.mBytesPerPacket = (
mode == 20 ? 38 : 50);
290 if (AudioConverterSetProperty(at->
converter, kAudioConverterInputChannelLayout,
291 layout_size, channel_layout)) {
299 channel_layout->mChannelLayoutTag =
tag;
300 channel_layout->mNumberChannelDescriptions = 0;
303 if (AudioConverterSetProperty(at->
converter, kAudioConverterOutputChannelLayout,
304 layout_size, channel_layout)) {
313 kAudioConverterPropertyBitDepthHint,
317 #if !TARGET_OS_IPHONE
320 kAudioCodecBitRateControlMode_Variable :
321 kAudioCodecBitRateControlMode_Constant;
323 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyBitRateControlMode,
326 if (at->
mode == kAudioCodecBitRateControlMode_Variable) {
328 if (q < 0 || q > 14) {
330 "VBR quality %d out of range, should be 0-14\n", q);
334 AudioConverterSetProperty(at->
converter, kAudioCodecPropertySoundQualityForVBR,
342 kAudioConverterApplicableEncodeBitRates,
345 UInt32 new_rate = rate;
352 kAudioConverterApplicableEncodeBitRates,
354 count =
size /
sizeof(AudioValueRange);
355 for (
i = 0;
i < count;
i++) {
356 AudioValueRange *range = &ranges[
i];
357 if (rate >= range->mMinimum && rate <= range->mMaximum) {
360 }
else if (rate > range->mMaximum) {
361 new_rate = range->mMaximum;
363 new_rate = range->mMinimum;
367 if (new_rate != rate) {
369 "Bitrate %u not allowed; changing to %u\n", rate, new_rate);
374 AudioConverterSetProperty(at->
converter, kAudioConverterEncodeBitRate,
375 sizeof(rate), &rate);
379 AudioConverterSetProperty(at->
converter, kAudioConverterCodecQuality,
382 if (!AudioConverterGetPropertyInfo(at->
converter, kAudioConverterCompressionMagicCookie,
399 kAudioConverterCompressionMagicCookie,
400 &extradata_size, extradata);
422 flags = bytestream2_get_byte(&gb);
438 #if !TARGET_OS_IPHONE && defined(__MAC_10_9)
439 if (at->
mode == kAudioCodecBitRateControlMode_Variable && avctx->
rc_max_rate) {
442 AudioConverterSetProperty(at->
converter, kAudioCodecPropertyPacketSizeLimitForVBR,
443 sizeof(max_size), &max_size);
457 AudioBufferList *
data,
458 AudioStreamPacketDescription **packets,
478 data->mNumberBuffers = 1;
480 data->mBuffers[0].mDataByteSize =
frame->nb_samples *
483 data->mBuffers[0].mData =
frame->data[0];
484 if (*nb_packets >
frame->nb_samples)
485 *nb_packets =
frame->nb_samples;
505 AudioBufferList out_buffers = {
514 AudioStreamPacketDescription out_pkt_desc = {0};
545 out_buffers.mBuffers[0].mData = avpkt->
data;
550 got_packet_ptr, &out_buffers,
555 if ((!
ret ||
ret == 1) && *got_packet_ptr) {
556 avpkt->
size = out_buffers.mBuffers[0].mDataByteSize;
558 out_pkt_desc.mVariableFramesInPacket :
562 }
else if (
ret &&
ret != 1) {
597 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
599 #if !TARGET_OS_IPHONE
600 {
"aac_at_mode",
"ratecontrol mode", offsetof(
ATDecodeContext,
mode),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, kAudioCodecBitRateControlMode_Variable,
AE,
"mode"},
601 {
"auto",
"VBR if global quality is given; CBR otherwise", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX,
AE,
"mode"},
602 {
"cbr",
"constant bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Constant}, INT_MIN, INT_MAX,
AE,
"mode"},
603 {
"abr",
"long-term average bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_LongTermAverage}, INT_MIN, INT_MAX,
AE,
"mode"},
604 {
"cvbr",
"constrained variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_VariableConstrained}, INT_MIN, INT_MAX,
AE,
"mode"},
605 {
"vbr" ,
"variable bitrate", 0,
AV_OPT_TYPE_CONST, {.i64 = kAudioCodecBitRateControlMode_Variable}, INT_MIN, INT_MAX,
AE,
"mode"},
611 #define FFAT_ENC_CLASS(NAME) \
612 static const AVClass ffat_##NAME##_enc_class = { \
613 .class_name = "at_" #NAME "_enc", \
614 .item_name = av_default_item_name, \
616 .version = LIBAVUTIL_VERSION_INT, \
619 #define FFAT_ENC(NAME, ID, PROFILES, ...) \
620 FFAT_ENC_CLASS(NAME) \
621 const AVCodec ff_##NAME##_at_encoder = { \
622 .name = #NAME "_at", \
623 .long_name = NULL_IF_CONFIG_SMALL(#NAME " (AudioToolbox)"), \
624 .type = AVMEDIA_TYPE_AUDIO, \
626 .priv_data_size = sizeof(ATDecodeContext), \
627 .init = ffat_init_encoder, \
628 .close = ffat_close_encoder, \
629 .encode2 = ffat_encode, \
630 .flush = ffat_encode_flush, \
631 .priv_class = &ffat_##NAME##_enc_class, \
632 .capabilities = AV_CODEC_CAP_DELAY | \
633 AV_CODEC_CAP_ENCODER_FLUSH __VA_ARGS__, \
634 .sample_fmts = (const enum AVSampleFormat[]) { \
636 AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE \
638 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
639 .profiles = PROFILES, \
640 .wrapper_name = "at", \
#define AV_CH_LAYOUT_7POINT0
int frame_size
Number of samples per channel in an audio frame.
#define AV_CH_LAYOUT_6POINT1
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK
@ AV_CODEC_ID_ADPCM_IMA_QT
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
#define MP4DecConfigDescrTag
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
int sample_rate
samples per second
#define AV_CH_LOW_FREQUENCY_2
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
#define AV_CH_LAYOUT_MONO
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
#define AV_CH_SURROUND_DIRECT_RIGHT
#define AV_CH_LAYOUT_6POINT0
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
struct FFBufQueue used_frame_queue
#define MP4DecSpecificDescrTag
int initial_padding
Audio only.
int flags
AV_CODEC_FLAG_*.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
#define AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_QUAD
static int ff_bufqueue_is_full(struct FFBufQueue *queue)
Test if a buffer queue is full.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_PROFILE_AAC_HE_V2
#define AV_CH_LOW_FREQUENCY
int global_quality
Global quality for codecs which cannot change it per frame.
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t rc_max_rate
maximum bitrate
#define AV_CH_STEREO_RIGHT
See AV_CH_STEREO_LEFT.
Describe the class of an AVClass context structure.
int64_t bit_rate
the average bitrate
#define FF_PROFILE_AAC_LD
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
#define AV_CH_LAYOUT_5POINT1
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
#define FF_PROFILE_AAC_ELD
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
AVCodecID
Identify the syntax and semantics of the bitstream.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_PROFILE_AAC_LOW
enum AVSampleFormat sample_fmt
audio sample format
#define MKBETAG(a, b, c, d)
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
#define AV_CH_TOP_BACK_RIGHT
int channels
number of audio channels
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
#define AV_CH_LAYOUT_OCTAGONAL
#define AV_CH_LAYOUT_5POINT0
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Structure holding the queue.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
unsigned short available
number of available buffers
#define AV_CH_LAYOUT_7POINT1
#define AV_CH_BACK_CENTER
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t * buffer_end
#define FF_PROFILE_AAC_HE
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_CH_LAYOUT_SURROUND
#define AV_INPUT_BUFFER_PADDING_SIZE
AudioConverterRef converter
main external API structure.
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
const VDPAUPixFmtMap * map
This structure stores compressed data.
static const uint16_t channel_layouts[7]
#define AV_CH_LAYOUT_4POINT0
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
@ AV_SAMPLE_FMT_DBL
double
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
struct FFBufQueue frame_queue