Go to the documentation of this file.
28 #include <VapourSynth.h>
62 #define OFFSET(x) offsetof(VSContext, x)
63 #define A AV_OPT_FLAG_AUDIO_PARAM
64 #define D AV_OPT_FLAG_DECODING_PARAM
66 {
"max_script_size",
"set max file size supported (in bytes)",
OFFSET(max_script_size),
AV_OPT_TYPE_INT64, {.i64 = 1 * 1024 * 1024}, 0, SIZE_MAX - 1,
A|
D},
75 vsscript_freeScript(
vss->vss);
108 static const int yuv_order[4] = {0, 1, 2, 0};
109 static const int rgb_order[4] = {1, 2, 0, 0};
113 int is_rgb, is_yuv,
i;
127 is_rgb = vsf->colorFamily == cmRGB;
131 is_yuv = vsf->colorFamily == cmYUV ||
132 vsf->colorFamily == cmYCoCg ||
133 vsf->colorFamily == cmGray;
134 if (!is_rgb && !is_yuv)
143 if (strncmp(pd->
name,
"xyz", 3) == 0)
149 order = is_yuv ? yuv_order : rgb_order;
153 if (order[
c->plane] !=
i ||
154 c->offset != 0 ||
c->shift != 0 ||
155 c->step != vsf->bytesPerSample ||
156 c->depth != vsf->bitsPerSample)
161 memcpy(c_order, order,
sizeof(
int[4]));
178 const VSVideoInfo *
info;
195 if (!vsscript_init()) {
201 if (vsscript_createScript(&vss_state->
vss)) {
230 "value %"PRIi64
", consider increasing the max_script_size option\n",
237 if (vsscript_evaluateScript(&vss_state->
vss, buf,
s->url, 0)) {
238 const char *msg = vsscript_getError(vss_state->
vss);
244 vs->
vsapi = vsscript_getVSApi();
245 vs->
vscore = vsscript_getCore(vss_state->
vss);
247 vs->
outnode = vsscript_getOutput(vss_state->
vss, 0);
292 if (
info->format->colorFamily == cmYCoCg)
314 return err || res < INT_MIN || res > INT_MAX ? def : res;
328 d->vsapi->freeFrame(
d->frame);
341 const VSFrameRef *vsframe;
375 ref_data->
frame = vsframe;
383 props = vs->
vsapi->getFramePropsRO(vsframe);
412 for (
i = 0;
i <
info->format->numPlanes;
i++) {
414 ptrdiff_t plane_h =
frame->height;
416 frame->data[
i] = (
void *)vs->
vsapi->getReadPtr(vsframe, p);
417 frame->linesize[
i] = vs->
vsapi->getStride(vsframe, p);
429 if (
i == 1 ||
i == 2)
485 .
name =
"vapoursynth",
enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt)
Utility function to swap the endianness of a pixel format.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
enum AVMediaType codec_type
General type of the encoded data.
enum AVColorSpace color_space
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
@ AVCOL_RANGE_JPEG
Full range content.
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
#define AV_LOG_VERBOSE
Detailed information.
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
static int get_vs_prop_int(AVFormatContext *s, const VSMap *map, const char *name, int def)
int64_t avio_size(AVIOContext *s)
Get the filesize.
static void free_vss_state(void *opaque, uint8_t *data)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
static const AVClass class_vs
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
static av_cold int read_close(AVFormatContext *ctx)
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
int64_t duration
Decoding: duration of the stream, in stream time base.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const AVOption options[]
#define AV_CEIL_RSHIFT(a, b)
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const AVInputFormat ff_vapoursynth_demuxer
static av_cold int read_header_vs(AVFormatContext *s)
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
#define LIBAVUTIL_VERSION_INT
static int read_header(FFV1Context *f)
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
uint8_t nb_components
The number of components each pixel has, (1-4)
const char * av_default_item_name(void *ptr)
Return the context name.
This structure contains the data a format has to probe a file.
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
static int read_seek_vs(AVFormatContext *s, int stream_idx, int64_t ts, int flags)
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
static av_cold int is_native_endian(enum AVPixelFormat pixfmt)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
static av_cold int read_close_vs(AVFormatContext *s)
#define AVERROR_EXTERNAL
Generic error in an external library.
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
int flags
A combination of AV_PKT_FLAG values.
size_t size
Size of data in bytes.
#define AV_PIX_FMT_FLAG_BAYER
The pixel format is following a Bayer pattern.
static void free_frame(void *opaque, uint8_t *data)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int read_packet_vs(AVFormatContext *s, AVPacket *pkt)
#define AV_TIME_BASE
Internal time base represented as integer.
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_cold enum AVPixelFormat match_pixfmt(const VSFormat *vsf, int c_order[4])
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
enum AVPixelFormat pixfmt
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int probe_vs(const AVProbeData *p)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
A reference to a data buffer.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
const VDPAUPixFmtMap * map
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
#define flags(name, subs,...)
#define AV_PKT_FLAG_TRUSTED
The packet comes from a trusted source.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static void free_vsframe_ref(void *opaque, uint8_t *data)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.