FFmpeg
amfdec.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
21 #include "amfdec.h"
22 #include "codec_internal.h"
23 #include "hwconfig.h"
24 #include "libavutil/time.h"
25 #include "decode.h"
26 #include "decode_bsf.h"
28 
29 #if CONFIG_D3D11VA
31 #endif
32 #if CONFIG_DXVA2
33 #define COBJMACROS
35 #endif
36 
37 #ifdef _WIN32
38 #include "compat/w32dlfcn.h"
39 #else
40 #include <dlfcn.h>
41 #endif
42 //will be in public headers soon
43 #define AMF_VIDEO_DECODER_OUTPUT_FORMAT L"OutputDecodeFormat"
44 
45 static const AVCodecHWConfigInternal *const amf_hw_configs[] = {
46  &(const AVCodecHWConfigInternal) {
47  .public = {
51  .device_type = AV_HWDEVICE_TYPE_AMF,
52  },
53  .hwaccel = NULL,
54  },
55  NULL
56 };
57 
58 static void amf_free_amfsurface(void *opaque, uint8_t *data)
59 {
60  AMFSurface *surface = (AMFSurface*)(data);
61  surface->pVtbl->Release(surface);
62 }
63 
65 {
66  if( AMF_GET_MAJOR_VERSION(amf_device_ctx->version) <= 1 &&
67  AMF_GET_MINOR_VERSION(amf_device_ctx->version) <= 4 &&
68  AMF_GET_SUBMINOR_VERSION(amf_device_ctx->version) < 36)
69  return 1;
70  return 0;
71 }
72 
73 static int amf_init_decoder(AVCodecContext *avctx)
74 {
76  AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
77  AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext*)hw_device_ctx->hwctx;
78  const wchar_t *codec_id = NULL;
79  AMF_RESULT res;
80  AMFBuffer *buffer;
81  amf_int64 color_profile;
82  int pool_size = 36;
83  // way-around for older drivers that don't support dynamic bitness detection -
84  // define HEVC and VP9 10-bit based on container info
85  int no_bitness_detect = amf_legacy_driver_no_bitness_detect(amf_device_ctx);
86 
87  ctx->drain = 0;
88  ctx->resolution_changed = 0;
89 
90  switch (avctx->codec->id) {
91  case AV_CODEC_ID_H264:
92  codec_id = AMFVideoDecoderUVD_H264_AVC;
93  break;
94  case AV_CODEC_ID_HEVC: {
95  codec_id = AMFVideoDecoderHW_H265_HEVC;
96  if(no_bitness_detect){
97  if(avctx->pix_fmt == AV_PIX_FMT_YUV420P10)
98  codec_id = AMFVideoDecoderHW_H265_MAIN10;
99  }
100  } break;
101  case AV_CODEC_ID_VP9: {
102  codec_id = AMFVideoDecoderHW_VP9;
103  if(no_bitness_detect){
104  if(avctx->pix_fmt == AV_PIX_FMT_YUV420P10)
105  codec_id = AMFVideoDecoderHW_VP9_10BIT;
106  }
107  } break;
108  case AV_CODEC_ID_AV1:
109  codec_id = AMFVideoDecoderHW_AV1;
110  break;
111  default:
112  break;
113  }
114  AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
115 
116  res = amf_device_ctx->factory->pVtbl->CreateComponent(amf_device_ctx->factory, amf_device_ctx->context, codec_id, &ctx->decoder);
117  AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
118 
119  // Color Metadata
120  /// Color Range (Support for older Drivers)
121  if (avctx->color_range == AVCOL_RANGE_JPEG) {
122  AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 1);
123  } else if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) {
124  AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 0);
125  }
126  color_profile = av_amf_get_color_profile(avctx->color_range, avctx->colorspace);
127  if (color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN)
128  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PROFILE, color_profile);
129  if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
130  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
131 
133  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
134 
135  if (ctx->timestamp_mode != -1)
136  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_TIMESTAMP_MODE, ctx->timestamp_mode);
137  if (ctx->decoder_mode != -1)
138  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_REORDER_MODE, ctx->decoder_mode);
139  if (ctx->dpb_size != -1)
140  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_DPB_SIZE, ctx->dpb_size);
141  if (ctx->lowlatency != -1)
142  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, ctx->lowlatency);
143  if (ctx->smart_access_video != -1) {
144  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
145  if (res != AMF_OK) {
146  av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF decoder.\n");
147  return AVERROR(EINVAL);
148  } else {
149  av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
150  // Set low latency mode if Smart Access Video is enabled
151  if (ctx->smart_access_video != 0) {
152  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, true);
153  av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode for decoder.\n");
154  }
155  }
156  }
157  if (ctx->skip_transfer_sav != -1)
158  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SKIP_TRANSFER_SMART_ACCESS_VIDEO, ctx->skip_transfer_sav);
159 
160  if (ctx->copy_output != -1)
161  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SURFACE_COPY, ctx->copy_output);
162 
163  if (avctx->extradata_size) {
164  const uint8_t *extradata;
165  int extradata_size;
166  ff_decode_get_extradata(avctx, &extradata, &extradata_size);
167  res = amf_device_ctx->context->pVtbl->AllocBuffer(amf_device_ctx->context, AMF_MEMORY_HOST, extradata_size, &buffer);
168  if (res == AMF_OK) {
169  memcpy(buffer->pVtbl->GetNative(buffer), extradata, extradata_size);
170  AMF_ASSIGN_PROPERTY_INTERFACE(res,ctx->decoder, AMF_VIDEO_DECODER_EXTRADATA, buffer);
171  buffer->pVtbl->Release(buffer);
172  buffer = NULL;
173  }
174  }
175  if (ctx->surface_pool_size == -1) {
176  ctx->surface_pool_size = pool_size;
177  if (avctx->extra_hw_frames > 0)
178  ctx->surface_pool_size += avctx->extra_hw_frames;
179  if (avctx->active_thread_type & FF_THREAD_FRAME)
180  ctx->surface_pool_size += avctx->thread_count;
181  }
182 
183  //at the moment, there is such a restriction in AMF.
184  //when it is possible, I will remove this code
185  if (ctx->surface_pool_size > 100)
186  ctx->surface_pool_size = 100;
187 
188  AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SURFACE_POOL_SIZE, ctx->surface_pool_size);
189  res = ctx->decoder->pVtbl->Init(ctx->decoder, AMF_SURFACE_UNKNOWN, avctx->width, avctx->height);
190  if (res != AMF_OK) {
191  av_log(avctx, AV_LOG_ERROR, "Decoder initialization failed with error %d\n", res);
192  return AVERROR(EINVAL);
193  }
194  return 0;
195 }
196 
198 {
199  AMFDecoderContext *ctx = avctx->priv_data;
200 
201  if (ctx->decoder) {
202  ctx->decoder->pVtbl->Terminate(ctx->decoder);
203  ctx->decoder->pVtbl->Release(ctx->decoder);
204  ctx->decoder = NULL;
205  }
206 
207  av_buffer_unref(&ctx->device_ctx_ref);
208  av_packet_free(&ctx->in_pkt);
209 
210  return 0;
211 }
212 
213 static int amf_init_frames_context(AVCodecContext *avctx, int sw_format, int new_width, int new_height)
214 {
215  int ret;
216  AVHWDeviceContext *hwdev_ctx;
217  AVHWFramesContext *hwframes_ctx;
219  if (!avctx->hw_frames_ctx || !avctx->hw_device_ctx)
220  return 0;
221  hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
222  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
223  ctx = avctx->priv_data;
224 
225  if (hwdev_ctx->type != AV_HWDEVICE_TYPE_AMF)
226  return 0;
227 
228  hwframes_ctx->width = new_width;
229  hwframes_ctx->height = new_height;
230  hwframes_ctx->format = AV_PIX_FMT_AMF_SURFACE;
231  hwframes_ctx->sw_format = sw_format;
232  hwframes_ctx->initial_pool_size = ctx->surface_pool_size + 8;
233 
235  if (ret < 0) {
236  av_log(NULL, AV_LOG_ERROR, "Error initializing a AMF frame pool\n");
238  return ret;
239  }
240  return 0;
241 }
242 
243 static int amf_decode_init(AVCodecContext *avctx)
244 {
245  AMFDecoderContext *ctx = avctx->priv_data;
246  int ret;
247  ctx->in_pkt = av_packet_alloc();
248  if (!ctx->in_pkt)
249  return AVERROR(ENOMEM);
250 
251  if (avctx->hw_device_ctx) {
252  AVHWDeviceContext *hwdev_ctx;
253  hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
254  if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
255  {
256  ctx->device_ctx_ref = av_buffer_ref(avctx->hw_device_ctx);
257  if (!avctx->hw_frames_ctx) {
259  AMF_GOTO_FAIL_IF_FALSE(avctx, !!avctx->hw_frames_ctx, AVERROR(ENOMEM), "av_hwframe_ctx_alloc failed\n");
260  }
261  } else {
263  AMF_GOTO_FAIL_IF_FALSE(avctx, ret == 0, ret, "Failed to create derived AMF device context: %s\n", av_err2str(ret));
264  }
265  } else {
266  ret = av_hwdevice_ctx_create(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, NULL, NULL, 0);
267  AMF_GOTO_FAIL_IF_FALSE(avctx, ret == 0, ret, "Failed to create hardware device context (AMF) : %s\n", av_err2str(ret));
268  }
269  if ((ret = amf_init_decoder(avctx)) == 0) {
270  AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
271  AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext*)hw_device_ctx->hwctx;
272  enum AVPixelFormat surf_pix_fmt = AV_PIX_FMT_NONE;
273 
274  if(amf_legacy_driver_no_bitness_detect(amf_device_ctx)){
275  // if bitness detection is not supported in legacy driver use format from container
276  switch (avctx->pix_fmt) {
277  case AV_PIX_FMT_YUV420P:
278  case AV_PIX_FMT_YUVJ420P:
279  surf_pix_fmt = AV_PIX_FMT_NV12; break;
281  surf_pix_fmt = AV_PIX_FMT_P010; break;
282  }
283  }else{
284  AMFVariantStruct format_var = {0};
285 
286  ret = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_OUTPUT_FORMAT, &format_var);
287  AMF_GOTO_FAIL_IF_FALSE(avctx, ret == AMF_OK, AVERROR(EINVAL), "Failed to get output format (AMF) : %d\n", ret);
288 
289  surf_pix_fmt = av_amf_to_av_format(format_var.int64Value);
290  }
291  if(avctx->hw_frames_ctx)
292  {
293  // this values should be set for avcodec_open2
294  // will be updated after header decoded if not true.
295  if(surf_pix_fmt == AV_PIX_FMT_NONE)
296  surf_pix_fmt = AV_PIX_FMT_NV12; // for older drivers
297  if (!avctx->coded_width)
298  avctx->coded_width = 1280;
299  if (!avctx->coded_height)
300  avctx->coded_height = 720;
301  ret = amf_init_frames_context(avctx, surf_pix_fmt, avctx->coded_width, avctx->coded_height);
302  AMF_GOTO_FAIL_IF_FALSE(avctx, ret == 0, ret, "Failed to init frames context (AMF) : %s\n", av_err2str(ret));
303  }
304  else
305  avctx->pix_fmt = surf_pix_fmt;
306 
307  return 0;
308  }
309 fail:
310  amf_decode_close(avctx);
311  return ret;
312 }
313 
314 static AMF_RESULT amf_get_property_buffer(AMFData *object, const wchar_t *name, AMFBuffer **val)
315 {
316  AMF_RESULT res;
317  AMFVariantStruct var;
318  res = AMFVariantInit(&var);
319  if (res == AMF_OK) {
320  res = object->pVtbl->GetProperty(object, name, &var);
321  if (res == AMF_OK) {
322  if (var.type == AMF_VARIANT_INTERFACE) {
323  AMFGuid guid_AMFBuffer = IID_AMFBuffer();
324  AMFInterface *amf_interface = AMFVariantInterface(&var);
325  res = amf_interface->pVtbl->QueryInterface(amf_interface, &guid_AMFBuffer, (void**)val);
326  } else {
327  res = AMF_INVALID_DATA_TYPE;
328  }
329  }
330  AMFVariantClear(&var);
331  }
332  return res;
333 }
334 
335 static int amf_amfsurface_to_avframe(AVCodecContext *avctx, AMFSurface* surface, AVFrame *frame)
336 {
337  AMFVariantStruct var = {0};
338  AMFPlane *plane;
339  int i;
340  int ret;
341  int format_amf;
342 
343  if (avctx->hw_device_ctx && ((AVHWDeviceContext*)avctx->hw_device_ctx->data)->type == AV_HWDEVICE_TYPE_AMF) {
344  // prepare frame similar to ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
345 
346  ret = ff_decode_frame_props(avctx, frame);
347  if (ret < 0)
348  return ret;
349 
350  avctx->sw_pix_fmt = avctx->pix_fmt;
351 
353  if (ret < 0)
354  return ret;
355  frame->width = avctx->width;
356  frame->height = avctx->height;
357 
358  ////
359  frame->buf[0] = av_buffer_create((uint8_t *)surface, sizeof(surface),
360  amf_free_amfsurface, (void*)avctx,
362  AMF_RETURN_IF_FALSE(avctx, !!frame->buf[0], AVERROR(ENOMEM), "av_buffer_create for amf surface failed.");
363 
364  frame->data[0] = (uint8_t *)surface;
365  frame->format = AV_PIX_FMT_AMF_SURFACE;
366  format_amf = surface->pVtbl->GetFormat(surface);
367  avctx->sw_pix_fmt = av_amf_to_av_format(format_amf);
368  frame->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
369  } else {
370  ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
371  AMF_RETURN_IF_FALSE(avctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", ret);
372 
373  for (i = 0; i < surface->pVtbl->GetPlanesCount(surface); i++) {
374  plane = surface->pVtbl->GetPlaneAt(surface, i);
375  frame->data[i] = plane->pVtbl->GetNative(plane);
376  frame->linesize[i] = plane->pVtbl->GetHPitch(plane);
377  }
378 
379  frame->buf[0] = av_buffer_create((uint8_t *)surface, sizeof(surface),
380  amf_free_amfsurface, (void*)avctx,
382  AMF_RETURN_IF_FALSE(avctx, !!frame->buf[0], AVERROR(ENOMEM), "av_buffer_create for amf surface failed.");
383 
384  format_amf = surface->pVtbl->GetFormat(surface);
385  frame->format = av_amf_to_av_format(format_amf);
386  }
387 
388  frame->width = avctx->width;
389  frame->height = avctx->height;
390 
391  frame->pts = surface->pVtbl->GetPts(surface);
392 
393  surface->pVtbl->GetProperty(surface, L"FFMPEG:dts", &var);
394  frame->pkt_dts = var.int64Value;
395 
396  frame->duration = surface->pVtbl->GetDuration(surface);
397  if (frame->duration < 0)
398  frame->duration = 0;
399 
400  frame->color_range = avctx->color_range;
401  frame->colorspace = avctx->colorspace;
402  frame->color_trc = avctx->color_trc;
403  frame->color_primaries = avctx->color_primaries;
404 
405  if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
406  AMFBuffer * hdrmeta_buffer = NULL;
407  ret = amf_get_property_buffer((AMFData *)surface, AMF_VIDEO_DECODER_HDR_METADATA, &hdrmeta_buffer);
408  if (hdrmeta_buffer != NULL) {
409  AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
410  if (ret != AMF_OK)
411  return ret;
412 
414  if (ret < 0)
415  return ret;
416  }
417  }
418  return 0;
419 }
420 
421 static AMF_RESULT amf_receive_frame(AVCodecContext *avctx, AVFrame *frame)
422 {
423  AMFDecoderContext *ctx = avctx->priv_data;
424  AMF_RESULT ret = AMF_OK;
425  AMFSurface *surface = NULL;
426  AMFData *data_out = NULL;
427 
428  ret = ctx->decoder->pVtbl->QueryOutput(ctx->decoder, &data_out);
429  if (ret != AMF_OK && ret != AMF_REPEAT) {
430  return ret;
431  }
432  if (data_out == NULL) {
433  return AMF_REPEAT;
434  }
435 
436  if (data_out) {
437  AMFGuid guid = IID_AMFSurface();
438  data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface); // query for buffer interface
439  data_out->pVtbl->Release(data_out);
440  data_out = NULL;
441  }
442 
443  ret = amf_amfsurface_to_avframe(avctx, surface, frame);
444  AMF_GOTO_FAIL_IF_FALSE(avctx, ret >= 0, AMF_FAIL, "Failed to convert AMFSurface to AVFrame = %d\n", ret);
445  return AMF_OK;
446 fail:
447 
448  if (surface) {
449  surface->pVtbl->Release(surface);
450  surface = NULL;
451  }
452  return ret;
453 }
454 
455 static AMF_RESULT amf_update_buffer_properties(AVCodecContext *avctx, AMFBuffer* buffer, const AVPacket* pkt)
456 {
457  AMF_RESULT res;
458 
459  AMF_RETURN_IF_FALSE(avctx, buffer != NULL, AMF_INVALID_ARG, "update_buffer_properties() - buffer not passed in");
460  AMF_RETURN_IF_FALSE(avctx, pkt != NULL, AMF_INVALID_ARG, "update_buffer_properties() - packet not passed in");
461  buffer->pVtbl->SetPts(buffer, pkt->pts);
462  buffer->pVtbl->SetDuration(buffer, pkt->duration);
463  AMF_ASSIGN_PROPERTY_INT64(res, buffer, L"FFMPEG:dts", pkt->dts);
464  if (res != AMF_OK)
465  av_log(avctx, AV_LOG_VERBOSE, "Failed to assign dts value.");
466  return AMF_OK;
467 }
468 
469 static AMF_RESULT amf_buffer_from_packet(AVCodecContext *avctx, const AVPacket* pkt, AMFBuffer** buffer)
470 {
471  AMFDecoderContext *ctx = avctx->priv_data;
472  AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
473  AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx;
474  AMFContext *ctxt = amf_device_ctx->context;
475  void *mem;
476  AMF_RESULT err;
477  AMFBuffer *buf = NULL;
478 
479  AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - packet not passed in");
480  AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - buffer pointer not passed in");
481 
482  err = ctxt->pVtbl->AllocBuffer(ctxt, AMF_MEMORY_HOST, pkt->size + AV_INPUT_BUFFER_PADDING_SIZE, buffer);
483  AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - failed");
484  buf = *buffer;
485  err = buf->pVtbl->SetSize(buf, pkt->size);
486  AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - SetSize failed");
487  // get the memory location and check the buffer was indeed allocated
488  mem = buf->pVtbl->GetNative(buf);
489  AMF_RETURN_IF_FALSE(ctxt, mem != NULL, AMF_INVALID_POINTER, "amf_buffer_from_packet() - GetNative failed");
490 
491  // copy the packet memory and clear data padding
492  memcpy(mem, pkt->data, pkt->size);
493  memset((amf_int8*)(mem)+pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
494 
495  return amf_update_buffer_properties(avctx, buf, pkt);
496 }
497 
498 static int amf_decode_frame(AVCodecContext *avctx, struct AVFrame *frame)
499 {
500  AMFDecoderContext *ctx = avctx->priv_data;
501  AMFBuffer *buf;
502  AMF_RESULT res;
503  int got_frame = 0;
504  AVPacket *avpkt = ctx->in_pkt;
505 
506  if (!ctx->decoder)
507  return AVERROR(EINVAL);
508 
509  // get packet if needed
510  if(!ctx->drain){
511  if(ctx->resolution_changed)
512  ctx->resolution_changed = 0;
513  else{
514  int ret;
515  av_packet_unref(avpkt);
516  ret = ff_decode_get_packet(avctx, avpkt);
517  if (ret < 0 && ret != AVERROR_EOF)
518  return ret;
519  if (ret == AVERROR_EOF) {
520  //nothing to consume, start external drain
521  ctx->decoder->pVtbl->Drain(ctx->decoder);
522  ctx->drain = 1;
523  }
524  }
525  }
526 
527  if(!ctx->drain){
528  // submit frame
529  res = amf_buffer_from_packet(avctx, avpkt, &buf);
530  AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, 0, "Cannot convert AVPacket to AMFbuffer");
531  do{
532  res = ctx->decoder->pVtbl->SubmitInput(ctx->decoder, (AMFData*) buf);
533  if(res == AMF_DECODER_NO_FREE_SURFACES)
534  {
535  av_usleep(100);
536  }
537  } while (res == AMF_DECODER_NO_FREE_SURFACES);
538 
539  buf->pVtbl->Release(buf);
540 
541  if(res == AMF_DECODER_NO_FREE_SURFACES) {
542  // input is not consumed, need to QueryOutput and submit again
543  av_log(avctx, AV_LOG_VERBOSE, "SubmitInput() returned NO_FREE_SURFACES and came out of loop - should never happen\n");
544  res = AMF_OK;
545  } else if (res == AMF_RESOLUTION_CHANGED) {
546  //input is not consumed, start internal drain
547  ctx->decoder->pVtbl->Drain(ctx->decoder);
548  ctx->drain = 1;
549  // process resolution_changed when internal drain is complete
550  ctx->resolution_changed = 1;
551  res = AMF_OK;
552  } else if (res != AMF_OK && res != AMF_NEED_MORE_INPUT && res != AMF_REPEAT) {
553  av_log(avctx, AV_LOG_ERROR, "SubmitInput() returned error %d\n", res);
554  return AVERROR(EINVAL);
555  }
556  }
557 
558  res = amf_receive_frame(avctx, frame);
559  if (res == AMF_OK)
560  got_frame = 1;
561  else if (res == AMF_REPEAT)
562  // decoder has no output yet
563  res = AMF_OK;
564  else if (res == AMF_EOF) {
565  // drain is complete
566  ctx->drain = 0;
567  if(ctx->resolution_changed){
568  // re-initialze decoder
569  AMFVariantStruct size_var = {0};
570  AMFVariantStruct format_var = {0};
571  res = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_CURRENT_SIZE, &size_var);
572  if (res != AMF_OK) {
573  return AVERROR(EINVAL);
574  }
575 
576  avctx->width = size_var.sizeValue.width;
577  avctx->height = size_var.sizeValue.height;
578  avctx->coded_width = size_var.sizeValue.width;
579  avctx->coded_height = size_var.sizeValue.height;
580  res = ctx->decoder->pVtbl->ReInit(ctx->decoder, avctx->width, avctx->height);
581  if (res != AMF_OK) {
582  av_log(avctx, AV_LOG_ERROR, "ReInit() returned %d\n", res);
583  return AVERROR(EINVAL);
584  }
585  res = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_OUTPUT_FORMAT, &format_var);
586  if (res != AMF_OK) {
587  return AVERROR(EINVAL);
588  }
589  int ret = amf_init_frames_context(avctx, av_amf_to_av_format(format_var.int64Value), avctx->coded_width, avctx->coded_height);
590  if (ret < 0)
591  return ret;
592  }else
593  return AVERROR_EOF;
594  } else {
595  av_log(avctx, AV_LOG_ERROR, "Unknown result from QueryOutput %d\n", res);
596  }
597  return got_frame ? 0 : AVERROR(EAGAIN);
598 }
599 
600 static void amf_decode_flush(AVCodecContext *avctx)
601 {
602  AMFDecoderContext *ctx = avctx->priv_data;
603  ctx->decoder->pVtbl->Flush(ctx->decoder);
604 }
605 
606 #define OFFSET(x) offsetof(AMFDecoderContext, x)
607 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
608 
609 static const AVOption options[] = {
610  // Decoder mode
611  { "decoder_mode", "Decoder mode", OFFSET(decoder_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_VIDEO_DECODER_MODE_LOW_LATENCY, VD, "decoder_mode" },
612  { "regular", "DPB delay is based on number of reference frames + 1", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_REGULAR }, 0, 0, VD, "decoder_mode" },
613  { "compliant", "DPB delay is based on profile - up to 16", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_COMPLIANT }, 0, 0, VD, "decoder_mode" },
614  { "low_latency", "DPB delay is 0", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_LOW_LATENCY }, 0, 0, VD, "decoder_mode" },
615 
616  // Timestamp mode
617  { "timestamp_mode", "Timestamp mode", OFFSET(timestamp_mode), AV_OPT_TYPE_INT, { .i64 = AMF_TS_SORT }, -1, AMF_TS_DECODE, VD, "timestamp_mode" },
618  { "presentation", "Preserve timestamps from input to output", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_PRESENTATION }, 0, 0, VD, "timestamp_mode" },
619  { "sort", "Resort PTS list", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_SORT }, 0, 0, VD, "timestamp_mode" },
620  { "decode", "Decode order", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_DECODE }, 0, 0, VD, "timestamp_mode" },
621 
622  // Reference frame management
623  { "surface_pool_size", "Number of surfaces in the decode pool", OFFSET(surface_pool_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VD, NULL },
624  { "dpb_size", "Minimum number of surfaces for reordering", OFFSET(dpb_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 32, VD, NULL },
625 
626  { "lowlatency", "Low latency", OFFSET(lowlatency), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
627  { "smart_access_video", "Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
628  { "skip_transfer_sav", "Skip transfer on another GPU when SAV enabled", OFFSET(skip_transfer_sav), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
629  { "copy_output", "Copy Output", OFFSET(copy_output), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
630 
631  { NULL }
632 };
633 
634 static const AVClass amf_decode_class = {
635  .class_name = "amf",
636  .item_name = av_default_item_name,
637  .option = options,
638  .version = LIBAVUTIL_VERSION_INT,
639 };
640 
641 #define DEFINE_AMF_DECODER(x, X, bsf_name) \
642 const FFCodec ff_##x##_amf_decoder = { \
643  .p.name = #x "_amf", \
644  CODEC_LONG_NAME(#X " AMD AMF video decoder"), \
645  .priv_data_size = sizeof(AMFDecoderContext), \
646  .p.type = AVMEDIA_TYPE_VIDEO, \
647  .p.id = AV_CODEC_ID_##X, \
648  .init = amf_decode_init, \
649  FF_CODEC_RECEIVE_FRAME_CB(amf_decode_frame), \
650  .flush = amf_decode_flush, \
651  .close = amf_decode_close, \
652  .bsfs = bsf_name, \
653  .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
654  .p.priv_class = &amf_decode_class, \
655  .hw_configs = amf_hw_configs, \
656  .p.wrapper_name = "amf", \
657  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
658 }; \
659 
660 DEFINE_AMF_DECODER(h264, H264, "h264_mp4toannexb")
661 DEFINE_AMF_DECODER(hevc, HEVC, NULL)
662 DEFINE_AMF_DECODER(vp9, VP9, NULL)
663 DEFINE_AMF_DECODER(av1, AV1, NULL)
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:251
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
amf_amfsurface_to_avframe
static int amf_amfsurface_to_avframe(AVCodecContext *avctx, AMFSurface *surface, AVFrame *frame)
Definition: amfdec.c:335
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
amf_decode_frame
static int amf_decode_frame(AVCodecContext *avctx, struct AVFrame *frame)
Definition: amfdec.c:498
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:200
amf_decode_init
static int amf_decode_init(AVCodecContext *avctx)
Definition: amfdec.c:243
av_amf_attach_hdr_metadata
int av_amf_attach_hdr_metadata(AVFrame *frame, const AMFHDRMetadata *hdrmeta)
Definition: hwcontext_amf.c:255
AMF_VIDEO_DECODER_OUTPUT_FORMAT
#define AMF_VIDEO_DECODER_OUTPUT_FORMAT
Definition: amfdec.c:43
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:337
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:660
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:263
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:311
AVPacket::data
uint8_t * data
Definition: packet.h:588
AVOption
AVOption.
Definition: opt.h:429
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:669
data
const char data[16]
Definition: mxf.c:149
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AMF_RETURN_IF_FALSE
#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value,...)
Error handling helper.
Definition: amfenc.h:169
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:220
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
AV_PIX_FMT_AMF_SURFACE
@ AV_PIX_FMT_AMF_SURFACE
HW acceleration through AMF.
Definition: pixfmt.h:477
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
copy_output
static int copy_output(SANMVideoContext *ctx, int sanm)
Definition: sanm.c:2674
fail
#define fail()
Definition: checkasm.h:221
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1573
av_amf_to_av_format
enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt)
Definition: hwcontext_amf.c:144
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
decode_bsf.h
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:653
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_amf_get_color_profile
enum AMF_VIDEO_CONVERTER_COLOR_PROFILE_ENUM av_amf_get_color_profile(enum AVColorRange color_range, enum AVColorSpace color_space)
Definition: hwcontext_amf.c:155
AVHWFramesContext::height
int height
Definition: hwcontext.h:220
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
AV_BUFFER_FLAG_READONLY
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:114
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
amf_update_buffer_properties
static AMF_RESULT amf_update_buffer_properties(AVCodecContext *avctx, AMFBuffer *buffer, const AVPacket *pkt)
Definition: amfdec.c:455
AV_HWDEVICE_TYPE_AMF
@ AV_HWDEVICE_TYPE_AMF
Definition: hwcontext.h:41
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
decode.h
AMF_GOTO_FAIL_IF_FALSE
#define AMF_GOTO_FAIL_IF_FALSE(avctx, exp, ret_value,...)
Definition: hwcontext_amf_internal.h:34
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:339
hwcontext_amf.h
AVAMFDeviceContext::version
int64_t version
version of AMF runtime
Definition: hwcontext_amf.h:40
codec_id
enum AVCodecID codec_id
Definition: vaapi_decode.c:410
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:639
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
dpb_size
int dpb_size
Definition: h264_levels.c:111
if
if(ret)
Definition: filter_design.txt:179
DEFINE_AMF_DECODER
#define DEFINE_AMF_DECODER(x, X, bsf_name)
Definition: amfdec.c:641
AMFDecoderContext
AMF decoder context.
Definition: amfdec.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:213
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
amf_buffer_from_packet
static AMF_RESULT amf_buffer_from_packet(AVCodecContext *avctx, const AVPacket *pkt, AMFBuffer **buffer)
Definition: amfdec.c:469
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
OFFSET
#define OFFSET(x)
Definition: amfdec.c:606
options
Definition: swscale.c:44
time.h
ff_decode_get_extradata
static void ff_decode_get_extradata(const AVCodecContext *avctx, const uint8_t **extradata, int *extradata_size)
Helper function for decoders that may use a BSF that changes extradata.
Definition: decode_bsf.h:32
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
AVCOL_TRC_SMPTE2084
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:683
hwcontext_dxva2.h
AVPacket::size
int size
Definition: packet.h:589
AVCodecContext::extra_hw_frames
int extra_hw_frames
Video decoding only.
Definition: avcodec.h:1510
codec_internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVAMFDeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_amf.h:35
AVCodecHWConfigInternal
Definition: hwconfig.h:25
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
amfdec.h
amf_init_decoder
static int amf_init_decoder(AVCodecContext *avctx)
Definition: amfdec.c:73
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:718
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
amf_init_frames_context
static int amf_init_frames_context(AVCodecContext *avctx, int sw_format, int new_width, int new_height)
Definition: amfdec.c:213
hw_device_ctx
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
VD
#define VD
Definition: amfdec.c:607
amf_legacy_driver_no_bitness_detect
static int amf_legacy_driver_no_bitness_detect(AVAMFDeviceContext *amf_device_ctx)
Definition: amfdec.c:64
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1487
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
amf_decode_flush
static void amf_decode_flush(AVCodecContext *avctx)
Definition: amfdec.c:600
amf_hw_configs
static const AVCodecHWConfigInternal *const amf_hw_configs[]
Definition: amfdec.c:45
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1465
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
ret
ret
Definition: filter_design.txt:187
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:75
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:615
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
amf_decode_class
static const AVClass amf_decode_class
Definition: amfdec.c:634
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1573
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1592
hwcontext_amf_internal.h
amf_decode_close
static int amf_decode_close(AVCodecContext *avctx)
Definition: amfdec.c:197
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
amf_receive_frame
static AMF_RESULT amf_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: amfdec.c:421
L
#define L(x)
Definition: vpx_arith.h:36
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:298
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:602
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:190
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
amf_get_property_buffer
static AMF_RESULT amf_get_property_buffer(AMFData *object, const wchar_t *name, AMFBuffer **val)
Definition: amfdec.c:314
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1650
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:646
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:30
amf_free_amfsurface
static void amf_free_amfsurface(void *opaque, uint8_t *data)
Definition: amfdec.c:58
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
hwcontext_d3d11va.h
options
static const AVOption options[]
Definition: amfdec.c:609
w32dlfcn.h