Go to the documentation of this file.
22 #define MAX_SUPPORTED_OUTPUTS_NB 4
26 char *
val, **parsed_vals =
NULL;
28 if (!expr || !val_sep || !separated_nb) {
40 parsed_vals[val_num] =
val;
48 parsed_vals[val_num] =
NULL;
49 *separated_nb = val_num;
56 if (!
ctx->model_filename) {
60 if (!
ctx->model_inputname) {
66 if (!
ctx->model_outputnames) {
72 if (!
ctx->dnn_module) {
76 if (!
ctx->dnn_module->load_model) {
81 ctx->model = (
ctx->dnn_module->load_model)(
ctx->model_filename, func_type,
ctx->backend_options,
filter_ctx);
92 ctx->model->frame_pre_proc = pre_proc;
93 ctx->model->frame_post_proc = post_proc;
99 ctx->model->detect_post_proc = post_proc;
105 ctx->model->classify_post_proc = post_proc;
111 return ctx->model->get_input(
ctx->model->model,
input,
ctx->model_inputname);
116 return ctx->model->get_output(
ctx->model->model,
ctx->model_inputname, input_width, input_height,
117 (
const char *)
ctx->model_outputnames[0], output_width, output_height);
124 .output_names = (
const char **)
ctx->model_outputnames,
125 .nb_output =
ctx->nb_outputs,
126 .in_frame = in_frame,
127 .out_frame = out_frame,
129 return (
ctx->dnn_module->execute_model)(
ctx->model, &exec_params);
136 .input_name =
ctx->model_inputname,
137 .output_names = (
const char **)
ctx->model_outputnames,
138 .nb_output =
ctx->nb_outputs,
139 .in_frame = in_frame,
140 .out_frame = out_frame,
144 return (
ctx->dnn_module->execute_model)(
ctx->model, &class_params.
base);
149 return (
ctx->dnn_module->get_result)(
ctx->model, in_frame, out_frame);
154 return (
ctx->dnn_module->flush)(
ctx->model);
159 if (
ctx->dnn_module) {
160 (
ctx->dnn_module->free_model)(&
ctx->model);
#define MAX_SUPPORTED_OUTPUTS_NB
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static FilteringContext * filter_ctx
DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
This structure describes decoded (raw) audio or video data.
int(* DetectPostProc)(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx)
static char ** separate_output_names(const char *expr, const char *val_sep, int *separated_nb)
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input)
static double val(void *priv, double ch)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc)
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
int(* ClassifyPostProc)(AVFrame *frame, DNNData *output, uint32_t bbox_index, AVFilterContext *filter_ctx)
DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc)
void * av_calloc(size_t nmemb, size_t size)
DNNModule * ff_get_dnn_module(DNNBackendType backend_type)
int(* FramePrePostProc)(AVFrame *frame, DNNData *model, AVFilterContext *filter_ctx)
DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
DNNReturnType ff_dnn_flush(DnnContext *ctx)
void ff_dnn_uninit(DnnContext *ctx)
int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc)