Go to the documentation of this file.
23 #include <stdatomic.h>
62 frame->nb_rpl_elems = 0;
72 const int x_cb = x0 >>
fc->ps.sps->ctb_log2_size_y;
73 const int y_cb = y0 >>
fc->ps.sps->ctb_log2_size_y;
74 const int pic_width_cb =
fc->ps.pps->ctb_width;
75 const int ctb_addr_rs = y_cb * pic_width_cb + x_cb;
124 if (
frame->frame->buf[0])
137 frame->nb_rpl_elems =
s->current_frame.nb_units;
140 if (!
frame->tab_dmvr_mvf)
146 frame->ctb_count =
pps->ctb_width *
pps->ctb_height;
147 for (
int j = 0; j <
frame->ctb_count; j++)
150 win->left_offset =
pps->r->pps_scaling_win_left_offset <<
sps->hshift[
CHROMA];
151 win->right_offset =
pps->r->pps_scaling_win_right_offset <<
sps->hshift[
CHROMA];
152 win->top_offset =
pps->r->pps_scaling_win_top_offset <<
sps->vshift[
CHROMA];
153 win->bottom_offset =
pps->r->pps_scaling_win_bottom_offset <<
sps->vshift[
CHROMA];
154 frame->ref_width =
pps->r->pps_pic_width_in_luma_samples -
win->left_offset -
win->right_offset;
155 frame->ref_height =
pps->r->pps_pic_height_in_luma_samples -
win->bottom_offset -
win->top_offset;
158 if (!
frame->progress)
176 bool has_b =
false, has_inter =
false;
184 if (
fc->ps.ph.r->ph_inter_slice_allowed_flag) {
187 for (
int i = 0;
i < current->
nb_units && !has_b;
i++) {
191 has_inter |= !
IS_I(rsh);
207 const int poc =
ph->poc;
214 if (
frame->frame->buf[0] &&
frame->sequence ==
s->seq_decode &&
231 else if (
ph->r->ph_pic_output_flag)
234 if (!
ph->r->ph_non_ref_pic_flag)
238 ref->sequence =
s->seq_decode;
239 ref->frame->crop_left =
fc->ps.pps->r->pps_conf_win_left_offset <<
fc->ps.sps->hshift[
CHROMA];
240 ref->frame->crop_right =
fc->ps.pps->r->pps_conf_win_right_offset <<
fc->ps.sps->hshift[
CHROMA];
241 ref->frame->crop_top =
fc->ps.pps->r->pps_conf_win_top_offset <<
fc->ps.sps->vshift[
CHROMA];
242 ref->frame->crop_bottom =
fc->ps.pps->r->pps_conf_win_bottom_offset <<
fc->ps.sps->vshift[
CHROMA];
252 int min_poc = INT_MAX;
255 if (no_output_of_prior_pics_flag) {
259 frame->sequence ==
s->seq_output) {
268 frame->sequence ==
s->seq_output) {
270 if (
frame->poc < min_poc || nb_output == 1) {
271 min_poc =
frame->poc;
278 if (!
flush &&
s->seq_output ==
s->seq_decode &&
sps &&
279 nb_output <= sps->
r->sps_dpb_params.dpb_max_num_reorder_pics[
sps->r->sps_max_sublayers_minus1])
297 "Output frame with POC %d.\n",
frame->poc);
301 if (
s->seq_output !=
s->seq_decode)
302 s->seq_output = (
s->seq_output + 1) & 0xff;
312 const int poc =
fc->ps.ph.poc;
314 int min_poc = INT_MAX;
318 if ((
frame->flags) &&
319 frame->sequence ==
s->seq_output &&
325 if (
sps && dpb >=
sps->r->sps_dpb_params.dpb_max_dec_pic_buffering_minus1[
sps->r->sps_max_sublayers_minus1] + 1) {
328 if ((
frame->flags) &&
329 frame->sequence ==
s->seq_output &&
332 min_poc =
frame->poc;
340 frame->sequence ==
s->seq_output &&
341 frame->poc <= min_poc) {
352 const unsigned mask = use_msb ? ~0 :
fc->ps.sps->max_pic_order_cnt_lsb - 1;
356 if (
ref->frame->buf[0] &&
ref->sequence ==
s->seq_decode) {
380 if (!
s->avctx->hwaccel) {
381 if (!
sps->pixel_shift) {
382 for (
int i = 0;
frame->frame->buf[
i];
i++)
383 memset(
frame->frame->buf[
i]->data, 1 << (
sps->bit_depth - 1),
384 frame->frame->buf[
i]->size);
386 for (
int i = 0;
frame->frame->data[
i];
i++)
387 for (
int y = 0; y < (
pps->height >>
sps->vshift[
i]); y++) {
396 frame->sequence =
s->seq_decode;
404 #define CHECK_MAX(d) (frame->ref_##d * frame->sps->r->sps_pic_##d##_max_in_luma_samples >= ref->ref_##d * (frame->pps->r->pps_pic_##d##_in_luma_samples - max))
405 #define CHECK_SAMPLES(d) (frame->pps->r->pps_pic_##d##_in_luma_samples == ref->pps->r->pps_pic_##d##_in_luma_samples)
412 return frame->ref_width * 2 >=
ref->ref_width &&
413 frame->ref_height * 2 >=
ref->ref_height &&
414 frame->ref_width <=
ref->ref_width * 8 &&
415 frame->ref_height <=
ref->ref_height * 8 &&
421 #define RPR_SCALE(f) (((ref->f << 14) + (fc->ref->f >> 1)) / fc->ref->f)
424 int poc,
int ref_flag, uint8_t use_msb)
436 if (ref_corrupt && !recovering) {
454 refp->
is_scaled =
ref->sps->r->sps_num_subpics_minus1 !=
fc->ref->sps->r->sps_num_subpics_minus1||
455 memcmp(&
ref->scaling_win, &
fc->ref->scaling_win,
sizeof(
ref->scaling_win)) ||
456 ref->pps->r->pps_pic_width_in_luma_samples !=
fc->ref->pps->r->pps_pic_width_in_luma_samples ||
457 ref->pps->r->pps_pic_height_in_luma_samples !=
fc->ref->pps->r->pps_pic_height_in_luma_samples;
494 if (!((
sps->r->sps_weighted_pred_flag ||
495 sps->r->sps_weighted_bipred_flag) &&
i != 0))
501 const int lx,
const int j,
const int max_poc_lsb)
508 lt_poc += poc -
delta * max_poc_lsb - (poc & (max_poc_lsb - 1));
509 *prev_delta_poc_msb =
delta;
520 const int max_poc_lsb =
sps->max_pic_order_cnt_lsb;
529 for (
int lx =
L0; lx <=
L1; lx++) {
532 int poc_base =
ph->poc;
533 int prev_delta_poc_msb = 0;
547 poc =
poc_lt(&prev_delta_poc_msb,
ph->poc, ref_lists, lx, j, max_poc_lsb);
561 if (
ph->r->ph_temporal_mvp_enabled_flag &&
567 fc->ref->collocated_ref = refp->
ref;
631 prev = &(*prev)->
next;
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
uint16_t poc_lsb_lt[2][VVC_MAX_REF_ENTRIES]
void * content_ref
If content is reference counted, a RefStruct reference backing content.
uint32_t num_ctus_in_curr_slice
NumCtusInCurrSlice.
static int ff_mutex_init(AVMutex *mutex, const void *attr)
static int add_candidate_ref(VVCContext *s, VVCFrameContext *fc, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_vvc_report_frame_finished(VVCFrame *frame)
RefStruct is an API for creating reference-counted objects with minimal overhead.
static int is_progress_done(const FrameProgress *p, const VVCProgressListener *l)
#define VVC_FRAME_FLAG_BUMPING
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
This structure describes decoded (raw) audio or video data.
const H266RawSliceHeader * r
RefStruct reference.
uint8_t st_ref_pic_flag[VVC_MAX_REF_ENTRIES]
CodedBitstreamUnitType type
Codec-specific type of this unit.
#define fc(width, name, range_min, range_max)
uint8_t ltrp_in_header_flag
int ff_vvc_slice_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
void ff_vvc_report_progress(VVCFrame *frame, const VVCProgress vp, const int y)
Coded bitstream unit structure.
static float win(SuperEqualizerContext *s, float n, int N)
static int ff_mutex_unlock(AVMutex *mutex)
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
uint8_t inter_layer_ref_pic_flag[VVC_MAX_REF_ENTRIES]
H266RefPicListStruct rpl_ref_list[2]
VVCProgressListener * listener[VVC_PROGRESS_LAST]
uint8_t delta_poc_msb_cycle_present_flag[2][VVC_MAX_REF_ENTRIES]
static void * av_refstruct_allocz(size_t size)
Equivalent to av_refstruct_alloc_ext(size, 0, NULL, NULL)
CodedBitstreamUnit * units
Pointer to an array of units of length nb_units_allocated.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
VVCRefPic refs[VVC_MAX_REF_ENTRIES]
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
void ff_vvc_unref_frame(VVCFrameContext *fc, VVCFrame *frame, int flags)
Coded bitstream fragment structure, combining one or more units.
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static VVCFrame * generate_missing_ref(VVCContext *s, VVCFrameContext *fc, int poc)
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static int delta_poc_st(const H266RefPicListStruct *rpls, const int lx, const int i, const VVCSPS *sps)
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
void ff_vvc_clear_refs(VVCFrameContext *fc)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void add_listener(VVCProgressListener **prev, VVCProgressListener *l)
uint8_t abs_delta_poc_st[VVC_MAX_REF_ENTRIES]
uint8_t ctb_log2_size_y
CtbLog2SizeY.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
uint8_t strp_entry_sign_flag[VVC_MAX_REF_ENTRIES]
#define VVC_FRAME_FLAG_OUTPUT
@ AV_PICTURE_TYPE_I
Intra.
static void set_pict_type(AVFrame *frame, const VVCContext *s, const VVCFrameContext *fc)
static VVCFrame * find_ref_idx(VVCContext *s, VVCFrameContext *fc, int poc, uint8_t use_msb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
void(* flush)(AVBSFContext *ctx)
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
uint8_t rpls_poc_lsb_lt[VVC_MAX_REF_ENTRIES]
static int init_slice_rpl(const VVCFrameContext *fc, SliceContext *sc)
int is_scaled
RprConstraintsActiveFlag.
static int ff_mutex_destroy(AVMutex *mutex)
void ff_vvc_flush_dpb(VVCFrameContext *fc)
static void free_progress(AVRefStructOpaque unused, void *obj)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define VVC_FRAME_FLAG_CORRUPT
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int poc_lt(int *prev_delta_poc_msb, const int poc, const H266RefPicLists *ref_lists, const int lx, const int j, const int max_poc_lsb)
#define VVC_FRAME_FLAG_SHORT_REF
static void mark_ref(VVCFrame *frame, int flag)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const uint32_t * ctb_addr_in_curr_slice
CtbAddrInCurrSlice.
static int ff_mutex_lock(AVMutex *mutex)
int ff_vvc_output_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *out, const int no_output_of_prior_pics_flag, int flush)
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
#define i(width, name, range_min, range_max)
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
static FrameProgress * alloc_progress(void)
atomic_int progress[VVC_PROGRESS_LAST]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
VVCProgressListener * next
#define GDR_IS_RECOVERED(s)
uint16_t delta_poc_msb_cycle_lt[2][VVC_MAX_REF_ENTRIES]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static int check_candidate_ref(const VVCFrame *frame, const VVCRefPic *refp)
static VVCProgressListener * remove_listener(VVCProgressListener **prev, VVCProgressListener *l)
const void * av_refstruct_ref_c(const void *obj)
Analog of av_refstruct_ref(), but for constant objects.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
#define VVC_FRAME_FLAG_LONG_REF
static int ref[MAX_W *MAX_W]
static int ff_cond_signal(AVCond *cond)
int ff_vvc_set_new_ref(VVCContext *s, VVCFrameContext *fc, AVFrame **frame)
const RefPicList * ff_vvc_get_ref_list(const VVCFrameContext *fc, const VVCFrame *ref, int x0, int y0)
int ff_vvc_frame_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
@ AV_PICTURE_TYPE_P
Predicted.
static VVCFrame * alloc_frame(VVCContext *s, VVCFrameContext *fc)
static VVCProgressListener * get_done_listener(FrameProgress *p, const VVCProgress vp)
static int ff_cond_destroy(AVCond *cond)
progress_done_fn progress_done
#define flags(name, subs,...)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int ff_cond_init(AVCond *cond, const void *attr)
void ff_vvc_add_progress_listener(VVCFrame *frame, VVCProgressListener *l)
const VVCSPS * sps
RefStruct reference.
int nb_units
Number of units in this fragment.
void ff_vvc_bump_frame(VVCContext *s, VVCFrameContext *fc)
int scale[2]
RefPicScale[].