FFmpeg
vf_scale_npp.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter
22  */
23 
24 #include <nppi.h>
25 #include <stdio.h>
26 #include <string.h>
27 
28 #include "libavutil/hwcontext.h"
30 #include "libavutil/cuda_check.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/pixdesc.h"
37 
38 #include "avfilter.h"
39 #include "filters.h"
40 #include "formats.h"
41 #include "scale_eval.h"
42 #include "video.h"
43 
44 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, device_hwctx->internal->cuda_dl, x)
45 
46 static const enum AVPixelFormat supported_formats[] = {
51 };
52 
53 static const enum AVPixelFormat deinterleaved_formats[][2] = {
55 };
56 
57 enum ScaleStage {
62 };
63 
64 typedef struct NPPScaleStageContext {
68 
69  struct {
70  int width;
71  int height;
72  } planes_in[4], planes_out[4];
73 
77 
78 static const char *const var_names[] = {
79  "in_w", "iw",
80  "in_h", "ih",
81  "out_w", "ow",
82  "out_h", "oh",
83  "a",
84  "sar",
85  "dar",
86  "n",
87  "t",
88  "main_w",
89  "main_h",
90  "main_a",
91  "main_sar",
92  "main_dar", "mdar",
93  "main_n",
94  "main_t",
95  NULL
96 };
97 
98 enum var_name {
116 };
117 
118 enum EvalMode {
122 };
123 
124 typedef struct NPPScaleContext {
125  const AVClass *class;
126 
130 
132 
133  /**
134  * New dimensions. Special values are:
135  * 0 = original width/height
136  * -1 = keep original aspect
137  */
138  int w, h;
139 
140  /**
141  * Output sw format. AV_PIX_FMT_NONE for no conversion.
142  */
144 
145  char *w_expr; ///< width expression string
146  char *h_expr; ///< height expression string
147  char *format_str;
148 
152 
154 
155  char* size_str;
156 
159 
161 
164 
166 #define IS_SCALE2REF(ctx) ((ctx)->filter == &ff_vf_scale2ref_npp.p)
167 
168 static int config_props(AVFilterLink *outlink);
169 
171 {
172  NPPScaleContext* scale = ctx->priv;
173  unsigned vars_w[VARS_NB] = {0}, vars_h[VARS_NB] = {0};
174 
175  if (!scale->w_pexpr && !scale->h_pexpr)
176  return AVERROR(EINVAL);
177 
178  if (scale->w_pexpr)
179  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
180  if (scale->h_pexpr)
181  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
182 
183  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
184  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
185  return AVERROR(EINVAL);
186  }
187 
188  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
189  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
190  return AVERROR(EINVAL);
191  }
192 
193  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
194  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
195  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
196  }
197 
198  if (!IS_SCALE2REF(ctx) &&
199  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
200  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
201  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
202  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
203  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
204  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
205  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
206  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T])) {
207  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref_npp variables are not valid in scale_npp filter.\n");
208  return AVERROR(EINVAL);
209  }
210 
211  if (scale->eval_mode == EVAL_MODE_INIT &&
212  (vars_w[VAR_N] || vars_h[VAR_N] ||
213  vars_w[VAR_T] || vars_h[VAR_T] ||
214  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
215  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T])) {
216  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', are not valid in init eval_mode.\n");
217  return AVERROR(EINVAL);
218  }
219 
220  return 0;
221 }
222 
223 static int nppscale_parse_expr(AVFilterContext* ctx, char* str_expr,
224  AVExpr** pexpr_ptr, const char* var,
225  const char* args)
226 {
227  NPPScaleContext* scale = ctx->priv;
228  int ret, is_inited = 0;
229  char* old_str_expr = NULL;
230  AVExpr* old_pexpr = NULL;
231 
232  if (str_expr) {
233  old_str_expr = av_strdup(str_expr);
234  if (!old_str_expr)
235  return AVERROR(ENOMEM);
236  av_opt_set(scale, var, args, 0);
237  }
238 
239  if (*pexpr_ptr) {
240  old_pexpr = *pexpr_ptr;
241  *pexpr_ptr = NULL;
242  is_inited = 1;
243  }
244 
245  ret = av_expr_parse(pexpr_ptr, args, var_names, NULL, NULL, NULL, NULL, 0,
246  ctx);
247  if (ret < 0) {
248  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var,
249  args);
250  goto revert;
251  }
252 
253  ret = check_exprs(ctx);
254  if (ret < 0)
255  goto revert;
256 
257  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
258  goto revert;
259 
260  av_expr_free(old_pexpr);
261  old_pexpr = NULL;
262  av_freep(&old_str_expr);
263 
264  return 0;
265 
266 revert:
267  av_expr_free(*pexpr_ptr);
268  *pexpr_ptr = NULL;
269  if (old_str_expr) {
270  av_opt_set(scale, var, old_str_expr, 0);
271  av_free(old_str_expr);
272  }
273  if (old_pexpr)
274  *pexpr_ptr = old_pexpr;
275 
276  return ret;
277 }
278 
280 {
281  NPPScaleContext* scale = ctx->priv;
282  int i, ret;
283 
284  av_log(ctx, AV_LOG_WARNING, "The libnpp based filters are deprecated.\n");
285 
286  if (!strcmp(scale->format_str, "same")) {
287  scale->format = AV_PIX_FMT_NONE;
288  } else {
289  scale->format = av_get_pix_fmt(scale->format_str);
290  if (scale->format == AV_PIX_FMT_NONE) {
291  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", scale->format_str);
292  return AVERROR(EINVAL);
293  }
294  }
295 
296  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
298  "Size and width/height exprs cannot be set at the same time.\n");
299  return AVERROR(EINVAL);
300  }
301 
302  if (scale->w_expr && !scale->h_expr)
303  FFSWAP(char*, scale->w_expr, scale->size_str);
304 
305  if (scale->size_str) {
306  char buf[32];
307  ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str);
308  if (0 > ret) {
309  av_log(ctx, AV_LOG_ERROR, "Invalid size '%s'\n", scale->size_str);
310  return ret;
311  }
312 
313  snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
314  ret = av_opt_set(scale, "w", buf, 0);
315  if (ret < 0)
316  return ret;
317 
318  snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
319  ret = av_opt_set(scale, "h", buf, 0);
320  if (ret < 0)
321  return ret;
322  }
323 
324  if (!scale->w_expr) {
325  ret = av_opt_set(scale, "w", "iw", 0);
326  if (ret < 0)
327  return ret;
328  }
329 
330  if (!scale->h_expr) {
331  ret = av_opt_set(scale, "h", "ih", 0);
332  if (ret < 0)
333  return ret;
334  }
335 
336  ret = nppscale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
337  if (ret < 0)
338  return ret;
339 
340  ret = nppscale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
341  if (ret < 0)
342  return ret;
343 
344  for (i = 0; i < FF_ARRAY_ELEMS(scale->stages); i++) {
345  scale->stages[i].frame = av_frame_alloc();
346  if (!scale->stages[i].frame)
347  return AVERROR(ENOMEM);
348  }
349  scale->tmp_frame = av_frame_alloc();
350  if (!scale->tmp_frame)
351  return AVERROR(ENOMEM);
352 
353  return 0;
354 }
355 
357 {
358  NPPScaleContext* scale = ctx->priv;
359  const char scale2ref = IS_SCALE2REF(ctx);
360  const AVFilterLink* inlink = ctx->inputs[scale2ref ? 1 : 0];
361  char* expr;
362  int eval_w, eval_h;
363  int ret;
364  double res;
365 
366  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
367  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
368  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
369  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
370  scale->var_values[VAR_A] = (double)inlink->w / inlink->h;
371  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
372  (double)inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
373  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
374 
375  if (scale2ref) {
376  const AVFilterLink* main_link = ctx->inputs[0];
377 
378  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
379  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
380  scale->var_values[VAR_S2R_MAIN_A] = (double)main_link->w / main_link->h;
381  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
382  (double)main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
383  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
384  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
385  }
386 
387  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
388  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
389 
390  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
391  if (isnan(res)) {
392  expr = scale->h_expr;
393  ret = AVERROR(EINVAL);
394  goto fail;
395  }
396  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int)res == 0 ? inlink->h : (int)res;
397 
398  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
399  if (isnan(res)) {
400  expr = scale->w_expr;
401  ret = AVERROR(EINVAL);
402  goto fail;
403  }
404  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
405 
406  scale->w = eval_w;
407  scale->h = eval_h;
408 
409  return 0;
410 
411 fail:
412  av_log(ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s'.\n",
413  expr);
414  return ret;
415 }
416 
418 {
419  NPPScaleContext *s = ctx->priv;
420  int i;
421 
422  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
423  av_frame_free(&s->stages[i].frame);
424  av_buffer_unref(&s->stages[i].frames_ctx);
425  }
426  av_frame_free(&s->tmp_frame);
427 
428  av_expr_free(s->w_pexpr);
429  av_expr_free(s->h_pexpr);
430  s->w_pexpr = s->h_pexpr = NULL;
431 }
432 
433 static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
434 {
435  AVBufferRef *out_ref = NULL;
436  AVHWFramesContext *out_ctx;
437  int in_sw, in_sh, out_sw, out_sh;
438  int ret, i;
439 
440  av_pix_fmt_get_chroma_sub_sample(stage->in_fmt, &in_sw, &in_sh);
441  av_pix_fmt_get_chroma_sub_sample(stage->out_fmt, &out_sw, &out_sh);
442  if (!stage->planes_out[0].width) {
443  stage->planes_out[0].width = stage->planes_in[0].width;
444  stage->planes_out[0].height = stage->planes_in[0].height;
445  }
446 
447  for (i = 1; i < FF_ARRAY_ELEMS(stage->planes_in); i++) {
448  stage->planes_in[i].width = stage->planes_in[0].width >> in_sw;
449  stage->planes_in[i].height = stage->planes_in[0].height >> in_sh;
450  stage->planes_out[i].width = stage->planes_out[0].width >> out_sw;
451  stage->planes_out[i].height = stage->planes_out[0].height >> out_sh;
452  }
453 
454  if (AV_PIX_FMT_YUVA420P == stage->in_fmt) {
455  stage->planes_in[3].width = stage->planes_in[0].width;
456  stage->planes_in[3].height = stage->planes_in[0].height;
457  stage->planes_out[3].width = stage->planes_out[0].width;
458  stage->planes_out[3].height = stage->planes_out[0].height;
459  }
460 
461  out_ref = av_hwframe_ctx_alloc(device_ctx);
462  if (!out_ref)
463  return AVERROR(ENOMEM);
464  out_ctx = (AVHWFramesContext*)out_ref->data;
465 
466  out_ctx->format = AV_PIX_FMT_CUDA;
467  out_ctx->sw_format = stage->out_fmt;
468  out_ctx->width = FFALIGN(stage->planes_out[0].width, 32);
469  out_ctx->height = FFALIGN(stage->planes_out[0].height, 32);
470 
471  ret = av_hwframe_ctx_init(out_ref);
472  if (ret < 0)
473  goto fail;
474 
475  av_frame_unref(stage->frame);
476  ret = av_hwframe_get_buffer(out_ref, stage->frame, 0);
477  if (ret < 0)
478  goto fail;
479 
480  stage->frame->width = stage->planes_out[0].width;
481  stage->frame->height = stage->planes_out[0].height;
482 
483  av_buffer_unref(&stage->frames_ctx);
484  stage->frames_ctx = out_ref;
485 
486  return 0;
487 fail:
488  av_buffer_unref(&out_ref);
489  return ret;
490 }
491 
492 static int format_is_supported(enum AVPixelFormat fmt)
493 {
494  int i;
495 
496  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
497  if (supported_formats[i] == fmt)
498  return 1;
499  return 0;
500 }
501 
503 {
505  int i, planes;
506 
508  if (planes == desc->nb_components)
509  return fmt;
510  for (i = 0; i < FF_ARRAY_ELEMS(deinterleaved_formats); i++)
511  if (deinterleaved_formats[i][0] == fmt)
512  return deinterleaved_formats[i][1];
513  return AV_PIX_FMT_NONE;
514 }
515 
516 static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height,
517  int out_width, int out_height)
518 {
519  NPPScaleContext *s = ctx->priv;
520  FilterLink *inl = ff_filter_link(ctx->inputs[0]);
521  FilterLink *outl = ff_filter_link(ctx->outputs[0]);
522 
523  AVHWFramesContext *in_frames_ctx;
524 
525  enum AVPixelFormat in_format;
526  enum AVPixelFormat out_format;
527  enum AVPixelFormat in_deinterleaved_format;
528  enum AVPixelFormat out_deinterleaved_format;
529 
530  int i, ret, last_stage = -1;
531 
532  /* check that we have a hw context */
533  if (!inl->hw_frames_ctx) {
534  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
535  return AVERROR(EINVAL);
536  }
537  in_frames_ctx = (AVHWFramesContext*)inl->hw_frames_ctx->data;
538  in_format = in_frames_ctx->sw_format;
539  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
540 
541  if (!format_is_supported(in_format)) {
542  av_log(ctx, AV_LOG_ERROR, "Unsupported input format: %s\n",
543  av_get_pix_fmt_name(in_format));
544  return AVERROR(ENOSYS);
545  }
546  if (!format_is_supported(out_format)) {
547  av_log(ctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
548  av_get_pix_fmt_name(out_format));
549  return AVERROR(ENOSYS);
550  }
551 
552  in_deinterleaved_format = get_deinterleaved_format(in_format);
553  out_deinterleaved_format = get_deinterleaved_format(out_format);
554  if (in_deinterleaved_format == AV_PIX_FMT_NONE ||
555  out_deinterleaved_format == AV_PIX_FMT_NONE)
556  return AVERROR_BUG;
557 
558  /* figure out which stages need to be done */
559  if (in_width != out_width || in_height != out_height ||
560  in_deinterleaved_format != out_deinterleaved_format) {
561  s->stages[STAGE_RESIZE].stage_needed = 1;
562 
563  if (s->interp_algo == NPPI_INTER_SUPER &&
564  (out_width > in_width && out_height > in_height)) {
565  s->interp_algo = NPPI_INTER_LANCZOS;
566  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using lanczos instead.\n");
567  }
568  if (s->interp_algo == NPPI_INTER_SUPER &&
569  !(out_width < in_width && out_height < in_height)) {
570  s->interp_algo = NPPI_INTER_CUBIC;
571  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using cubic instead.\n");
572  }
573  }
574 
575  if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format)
576  s->passthrough = 1;
577 
578  if (!s->passthrough) {
579  if (in_format != in_deinterleaved_format)
580  s->stages[STAGE_DEINTERLEAVE].stage_needed = 1;
581  if (out_format != out_deinterleaved_format)
582  s->stages[STAGE_INTERLEAVE].stage_needed = 1;
583  }
584 
585  s->stages[STAGE_DEINTERLEAVE].in_fmt = in_format;
586  s->stages[STAGE_DEINTERLEAVE].out_fmt = in_deinterleaved_format;
587  s->stages[STAGE_DEINTERLEAVE].planes_in[0].width = in_width;
588  s->stages[STAGE_DEINTERLEAVE].planes_in[0].height = in_height;
589 
590  s->stages[STAGE_RESIZE].in_fmt = in_deinterleaved_format;
591  s->stages[STAGE_RESIZE].out_fmt = out_deinterleaved_format;
592  s->stages[STAGE_RESIZE].planes_in[0].width = in_width;
593  s->stages[STAGE_RESIZE].planes_in[0].height = in_height;
594  s->stages[STAGE_RESIZE].planes_out[0].width = out_width;
595  s->stages[STAGE_RESIZE].planes_out[0].height = out_height;
596 
597  s->stages[STAGE_INTERLEAVE].in_fmt = out_deinterleaved_format;
598  s->stages[STAGE_INTERLEAVE].out_fmt = out_format;
599  s->stages[STAGE_INTERLEAVE].planes_in[0].width = out_width;
600  s->stages[STAGE_INTERLEAVE].planes_in[0].height = out_height;
601 
602  /* init the hardware contexts */
603  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
604  if (!s->stages[i].stage_needed)
605  continue;
606 
607  ret = init_stage(&s->stages[i], in_frames_ctx->device_ref);
608  if (ret < 0)
609  return ret;
610 
611  last_stage = i;
612  }
613 
614  if (last_stage >= 0)
615  outl->hw_frames_ctx = av_buffer_ref(s->stages[last_stage].frames_ctx);
616  else
618 
619  if (!outl->hw_frames_ctx)
620  return AVERROR(ENOMEM);
621 
622  return 0;
623 }
624 
625 static int config_props(AVFilterLink *outlink)
626 {
627  AVFilterContext *ctx = outlink->src;
628  AVFilterLink *inlink0 = outlink->src->inputs[0];
630  outlink->src->inputs[1] :
631  outlink->src->inputs[0];
632  NPPScaleContext *s = ctx->priv;
633  double w_adj = 1.0;
634  int ret;
635 
636  if ((ret = nppscale_eval_dimensions(ctx)) < 0)
637  goto fail;
638 
639  if (s->reset_sar)
640  w_adj = IS_SCALE2REF(ctx) ? s->var_values[VAR_S2R_MAIN_SAR] :
641  s->var_values[VAR_SAR];
642 
644  s->force_original_aspect_ratio,
645  s->force_divisible_by, w_adj);
646 
647  if (s->w > INT_MAX || s->h > INT_MAX ||
648  (s->h * inlink->w) > INT_MAX ||
649  (s->w * inlink->h) > INT_MAX)
650  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
651 
652  outlink->w = s->w;
653  outlink->h = s->h;
654 
655  ret = init_processing_chain(ctx, inlink0->w, inlink0->h, outlink->w, outlink->h);
656  if (ret < 0)
657  return ret;
658 
659  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
660  inlink->w, inlink->h, outlink->w, outlink->h);
661 
662  if (s->reset_sar)
663  outlink->sample_aspect_ratio = (AVRational){1, 1};
664  else if (inlink->sample_aspect_ratio.num)
665  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
666  outlink->w*inlink->h},
667  inlink->sample_aspect_ratio);
668  else
669  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
670 
671  return 0;
672 
673 fail:
674  return ret;
675 }
676 
677 static int config_props_ref(AVFilterLink *outlink)
678 {
679  FilterLink *outl = ff_filter_link(outlink);
680  AVFilterLink *inlink = outlink->src->inputs[1];
682  FilterLink *ol = ff_filter_link(outlink);
683 
684  outlink->w = inlink->w;
685  outlink->h = inlink->h;
686  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
687  outlink->time_base = inlink->time_base;
688  ol->frame_rate = inl->frame_rate;
689 
691 
692  return 0;
693 }
694 
696  AVFrame *out, AVFrame *in)
697 {
698  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)in->hw_frames_ctx->data;
699  NppStatus err;
700 
701  switch (in_frames_ctx->sw_format) {
702  case AV_PIX_FMT_NV12:
703  err = nppiYCbCr420_8u_P2P3R(in->data[0], in->linesize[0],
704  in->data[1], in->linesize[1],
705  out->data, out->linesize,
706  (NppiSize){ in->width, in->height });
707  break;
708  default:
709  return AVERROR_BUG;
710  }
711  if (err != NPP_SUCCESS) {
712  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
713  return AVERROR_UNKNOWN;
714  }
715 
716  return 0;
717 }
718 
720  AVFrame *out, AVFrame *in)
721 {
722  NPPScaleContext *s = ctx->priv;
723  NppStatus err;
724  int i;
725 
726  for (i = 0; i < FF_ARRAY_ELEMS(stage->planes_in) && i < FF_ARRAY_ELEMS(in->data) && in->data[i]; i++) {
727  int iw = stage->planes_in[i].width;
728  int ih = stage->planes_in[i].height;
729  int ow = stage->planes_out[i].width;
730  int oh = stage->planes_out[i].height;
731 
732  err = nppiResizeSqrPixel_8u_C1R(in->data[i], (NppiSize){ iw, ih },
733  in->linesize[i], (NppiRect){ 0, 0, iw, ih },
734  out->data[i], out->linesize[i],
735  (NppiRect){ 0, 0, ow, oh },
736  (double)ow / iw, (double)oh / ih,
737  0.0, 0.0, s->interp_algo);
738  if (err != NPP_SUCCESS) {
739  av_log(ctx, AV_LOG_ERROR, "NPP resize error: %d\n", err);
740  return AVERROR_UNKNOWN;
741  }
742  }
743 
744  return 0;
745 }
746 
748  AVFrame *out, AVFrame *in)
749 {
750  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)out->hw_frames_ctx->data;
751  NppStatus err;
752 
753  switch (out_frames_ctx->sw_format) {
754  case AV_PIX_FMT_NV12:
755  err = nppiYCbCr420_8u_P3P2R((const uint8_t**)in->data,
756  in->linesize,
757  out->data[0], out->linesize[0],
758  out->data[1], out->linesize[1],
759  (NppiSize){ in->width, in->height });
760  break;
761  default:
762  return AVERROR_BUG;
763  }
764  if (err != NPP_SUCCESS) {
765  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
766  return AVERROR_UNKNOWN;
767  }
768 
769  return 0;
770 }
771 
773  AVFrame *out, AVFrame *in) = {
777 };
778 
780 {
782  AVFilterContext *ctx = link->dst;
783  NPPScaleContext *s = ctx->priv;
784  AVFilterLink *outlink = ctx->outputs[0];
785  AVFrame *src = in;
786  char buf[32];
787  int i, ret, last_stage = -1;
788  int frame_changed;
789 
790  frame_changed = in->width != link->w ||
791  in->height != link->h ||
792  in->format != link->format ||
795 
796  if (s->eval_mode == EVAL_MODE_FRAME || frame_changed) {
797  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
798 
799  av_expr_count_vars(s->w_pexpr, vars_w, VARS_NB);
800  av_expr_count_vars(s->h_pexpr, vars_h, VARS_NB);
801 
802  if (s->eval_mode == EVAL_MODE_FRAME && !frame_changed && !IS_SCALE2REF(ctx) &&
803  !(vars_w[VAR_N] || vars_w[VAR_T]) &&
804  !(vars_h[VAR_N] || vars_h[VAR_T]) && s->w && s->h)
805  goto scale;
806 
807  if (s->eval_mode == EVAL_MODE_INIT) {
808  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
809  av_opt_set(s, "w", buf, 0);
810  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
811  av_opt_set(s, "h", buf, 0);
812 
813  ret = nppscale_parse_expr(ctx, NULL, &s->w_pexpr, "width", s->w_expr);
814  if (ret < 0)
815  return ret;
816 
817  ret = nppscale_parse_expr(ctx, NULL, &s->h_pexpr, "height", s->h_expr);
818  if (ret < 0)
819  return ret;
820  }
821 
822  if (IS_SCALE2REF(ctx)) {
823  s->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
824  s->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
825  } else {
826  s->var_values[VAR_N] = inl->frame_count_out;
827  s->var_values[VAR_T] = TS2T(in->pts, link->time_base);
828  }
829 
830  link->format = in->format;
831  link->w = in->width;
832  link->h = in->height;
833 
836 
837  if ((ret = config_props(outlink)) < 0)
838  return ret;
839  }
840 
841 scale:
842  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
843  if (!s->stages[i].stage_needed)
844  continue;
845 
846  ret = nppscale_process[i](ctx, &s->stages[i], s->stages[i].frame, src);
847  if (ret < 0)
848  return ret;
849 
850  src = s->stages[i].frame;
851  last_stage = i;
852  }
853  if (last_stage < 0)
854  return AVERROR_BUG;
855 
856  ret = av_hwframe_get_buffer(src->hw_frames_ctx, s->tmp_frame, 0);
857  if (ret < 0)
858  return ret;
859 
860  s->tmp_frame->width = src->width;
861  s->tmp_frame->height = src->height;
862 
864  av_frame_move_ref(src, s->tmp_frame);
865 
866  ret = av_frame_copy_props(out, in);
867  if (ret < 0)
868  return ret;
869 
870  if (out->width != in->width || out->height != in->height) {
871  av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
873  }
874 
875  return 0;
876 }
877 
879 {
880  AVFilterContext *ctx = link->dst;
881  NPPScaleContext *s = ctx->priv;
882  AVFilterLink *outlink = ctx->outputs[0];
883  FilterLink *l = ff_filter_link(outlink);
885  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
886 
887  AVFrame *out = NULL;
888  CUcontext dummy;
889  int ret = 0;
890 
891  if (s->passthrough)
892  return ff_filter_frame(outlink, in);
893 
894  out = av_frame_alloc();
895  if (!out) {
896  ret = AVERROR(ENOMEM);
897  goto fail;
898  }
899 
900  ret = CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPushCurrent(device_hwctx->cuda_ctx));
901  if (ret < 0)
902  goto fail;
903 
904  ret = nppscale_scale(link, out, in);
905 
906  CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPopCurrent(&dummy));
907  if (ret < 0)
908  goto fail;
909 
910  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
911  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
912  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
913  INT_MAX);
914 
915  av_frame_free(&in);
916  return ff_filter_frame(outlink, out);
917 fail:
918  av_frame_free(&in);
919  av_frame_free(&out);
920  return ret;
921 }
922 
924 {
926  NPPScaleContext *scale = link->dst->priv;
927  AVFilterLink *outlink = link->dst->outputs[1];
928  int frame_changed;
929 
930  frame_changed = in->width != link->w ||
931  in->height != link->h ||
932  in->format != link->format ||
935 
936  if (frame_changed) {
937  link->format = in->format;
938  link->w = in->width;
939  link->h = in->height;
942 
943  config_props_ref(outlink);
944  }
945 
946  if (scale->eval_mode == EVAL_MODE_FRAME) {
947  scale->var_values[VAR_N] = inl->frame_count_out;
948  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
949  }
950 
951  return ff_filter_frame(outlink, in);
952 }
953 
954 static int request_frame(AVFilterLink *outlink)
955 {
956  return ff_request_frame(outlink->src->inputs[0]);
957 }
958 
959 static int request_frame_ref(AVFilterLink *outlink)
960 {
961  return ff_request_frame(outlink->src->inputs[1]);
962 }
963 
964 #define OFFSET(x) offsetof(NPPScaleContext, x)
965 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
966 static const AVOption options[] = {
967  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
968  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
969  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
970  { "s", "Output video size", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
971 
972  { "interp_algo", "Interpolation algorithm used for resizing", OFFSET(interp_algo), AV_OPT_TYPE_INT, { .i64 = NPPI_INTER_CUBIC }, 0, INT_MAX, FLAGS, .unit = "interp_algo" },
973  { "nn", "nearest neighbour", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_NN }, 0, 0, FLAGS, .unit = "interp_algo" },
974  { "linear", "linear", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LINEAR }, 0, 0, FLAGS, .unit = "interp_algo" },
975  { "cubic", "cubic", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC }, 0, 0, FLAGS, .unit = "interp_algo" },
976  { "cubic2p_bspline", "2-parameter cubic (B=1, C=0)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_BSPLINE }, 0, 0, FLAGS, .unit = "interp_algo" },
977  { "cubic2p_catmullrom", "2-parameter cubic (B=0, C=1/2)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_CATMULLROM }, 0, 0, FLAGS, .unit = "interp_algo" },
978  { "cubic2p_b05c03", "2-parameter cubic (B=1/2, C=3/10)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_B05C03 }, 0, 0, FLAGS, .unit = "interp_algo" },
979  { "super", "supersampling", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_SUPER }, 0, 0, FLAGS, .unit = "interp_algo" },
980  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LANCZOS }, 0, 0, FLAGS, .unit = "interp_algo" },
981  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, SCALE_FORCE_OAR_NB-1, FLAGS, .unit = "force_oar" },
982  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_DISABLE }, 0, 0, FLAGS, .unit = "force_oar" },
983  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_DECREASE }, 0, 0, FLAGS, .unit = "force_oar" },
984  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_INCREASE }, 0, 0, FLAGS, .unit = "force_oar" },
985  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, FLAGS },
986  { "reset_sar", "reset SAR to 1 and scale to square pixels if scaling proportionally", OFFSET(reset_sar), AV_OPT_TYPE_BOOL, { .i64 = 0}, 0, 1, FLAGS },
987  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, { .i64 = EVAL_MODE_INIT }, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
988  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_INIT }, 0, 0, FLAGS, .unit = "eval" },
989  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_FRAME }, 0, 0, FLAGS, .unit = "eval" },
990  { NULL },
991 };
992 
993 static const AVClass nppscale_class = {
994  .class_name = "nppscale",
995  .item_name = av_default_item_name,
996  .option = options,
997  .version = LIBAVUTIL_VERSION_INT,
998  .category = AV_CLASS_CATEGORY_FILTER,
999 };
1000 
1001 static const AVFilterPad nppscale_inputs[] = {
1002  {
1003  .name = "default",
1004  .type = AVMEDIA_TYPE_VIDEO,
1005  .filter_frame = nppscale_filter_frame,
1006  }
1007 };
1008 
1009 static const AVFilterPad nppscale_outputs[] = {
1010  {
1011  .name = "default",
1012  .type = AVMEDIA_TYPE_VIDEO,
1013  .config_props = config_props,
1014  }
1015 };
1016 
1018  .p.name = "scale_npp",
1019  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1020  "scaling and format conversion"),
1021  .p.priv_class = &nppscale_class,
1022 
1023  .init = nppscale_init,
1024  .uninit = nppscale_uninit,
1025 
1026  .priv_size = sizeof(NPPScaleContext),
1027 
1030 
1032 
1033  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1034 };
1035 
1037  {
1038  .name = "default",
1039  .type = AVMEDIA_TYPE_VIDEO,
1040  .filter_frame = nppscale_filter_frame,
1041  },
1042  {
1043  .name = "ref",
1044  .type = AVMEDIA_TYPE_VIDEO,
1045  .filter_frame = nppscale_filter_frame_ref,
1046  }
1047 };
1048 
1050  {
1051  .name = "default",
1052  .type = AVMEDIA_TYPE_VIDEO,
1053  .config_props = config_props,
1054  .request_frame= request_frame,
1055  },
1056  {
1057  .name = "ref",
1058  .type = AVMEDIA_TYPE_VIDEO,
1059  .config_props = config_props_ref,
1060  .request_frame= request_frame_ref,
1061  }
1062 };
1063 
1065  .p.name = "scale2ref_npp",
1066  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1067  "scaling and format conversion to the "
1068  "given reference."),
1069  .p.priv_class = &nppscale_class,
1070 
1071  .init = nppscale_init,
1072  .uninit = nppscale_uninit,
1073 
1074  .priv_size = sizeof(NPPScaleContext),
1075 
1078 
1080 
1081  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1082 };
format_is_supported
static int format_is_supported(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:492
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:88
NPPScaleContext::passthrough
int passthrough
Definition: vf_scale_npp.c:129
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vf_scale_npp
const FFFilter ff_vf_scale_npp
Definition: vf_scale_npp.c:1017
VAR_OW
@ VAR_OW
Definition: vf_scale_npp.c:101
nppscale_inputs
static const AVFilterPad nppscale_inputs[]
Definition: vf_scale_npp.c:1001
AVERROR
Filter the word โ€œframeโ€ indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nppscale2ref_inputs
static const AVFilterPad nppscale2ref_inputs[]
Definition: vf_scale_npp.c:1036
opt.h
var_name
var_name
Definition: noise.c:46
hwcontext_cuda_internal.h
out
FILE * out
Definition: movenc.c:55
NPPScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale_npp.c:158
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1067
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3447
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
CHECK_CU
#define CHECK_CU(x)
Definition: vf_scale_npp.c:44
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:200
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ScaleStage
ScaleStage
Definition: vf_scale_npp.c:57
nppscale_class
static const AVClass nppscale_class
Definition: vf_scale_npp.c:993
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale_npp.c:113
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:337
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:263
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:263
NPPScaleStageContext::stage_needed
int stage_needed
Definition: vf_scale_npp.c:65
VAR_A
@ VAR_A
Definition: vf_scale_npp.c:103
AVOption
AVOption.
Definition: opt.h:429
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:483
SCALE_FORCE_OAR_NB
@ SCALE_FORCE_OAR_NB
Definition: scale_eval.h:28
supported_formats
static enum AVPixelFormat supported_formats[]
Definition: vf_scale_npp.c:46
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
STAGE_NB
@ STAGE_NB
Definition: vf_scale_npp.c:61
VAR_SAR
@ VAR_SAR
Definition: vf_scale_npp.c:104
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:220
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale_npp.c:114
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:220
video.h
NPPScaleStageContext::frame
AVFrame * frame
Definition: vf_scale_npp.c:75
VAR_OH
@ VAR_OH
Definition: vf_scale_npp.c:102
NPPScaleContext::size_str
char * size_str
Definition: vf_scale_npp.c:155
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3487
IS_SCALE2REF
#define IS_SCALE2REF(ctx)
Definition: vf_scale_npp.c:166
fail
#define fail()
Definition: checkasm.h:202
get_deinterleaved_format
static enum AVPixelFormat get_deinterleaved_format(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:502
dummy
int dummy
Definition: motion.c:66
STAGE_RESIZE
@ STAGE_RESIZE
Definition: vf_scale_npp.c:59
NPPScaleContext::tmp_frame
AVFrame * tmp_frame
Definition: vf_scale_npp.c:128
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3475
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
NPPScaleContext::shift_width
int shift_width
Definition: vf_scale_npp.c:131
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_SIDE_DATA_PROP_SIZE_DEPENDENT
@ AV_SIDE_DATA_PROP_SIZE_DEPENDENT
Side data depends on the video dimensions.
Definition: frame.h:309
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:100
VAR_IH
@ VAR_IH
Definition: vf_scale_npp.c:100
AVHWFramesContext::height
int height
Definition: hwcontext.h:220
FFFilter
Definition: filters.h:266
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
nppscale_uninit
static void nppscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_npp.c:417
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:782
NPPScaleContext::stages
NPPScaleStageContext stages[STAGE_NB]
Definition: vf_scale_npp.c:127
filters.h
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale_npp.c:625
NPPScaleContext::eval_mode
int eval_mode
Definition: vf_scale_npp.c:162
nppscale_eval_dimensions
static int nppscale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale_npp.c:356
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
NPPScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale_npp.c:149
AVExpr
Definition: eval.c:158
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:264
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale_npp.c:170
NAN
#define NAN
Definition: mathematics.h:115
nppscale2ref_outputs
static const AVFilterPad nppscale2ref_outputs[]
Definition: vf_scale_npp.c:1049
link
Filter the word โ€œframeโ€ indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
nppscale_scale
static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:779
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:959
NPPScaleContext
Definition: vf_scale_npp.c:124
if
if(ret)
Definition: filter_design.txt:179
FLAGS
#define FLAGS
Definition: vf_scale_npp.c:965
var_names
static const char *const var_names[]
Definition: vf_scale_npp.c:78
init_stage
static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
Definition: vf_scale_npp.c:433
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
nppscale_interleave
static int nppscale_interleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:747
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:213
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
SCALE_FORCE_OAR_DISABLE
@ SCALE_FORCE_OAR_DISABLE
Definition: scale_eval.h:25
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:129
isnan
#define isnan(x)
Definition: libm.h:342
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:282
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale_npp.c:119
VAR_IW
@ VAR_IW
Definition: vf_scale_npp.c:99
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
parseutils.h
options
Definition: swscale.c:43
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale_npp.c:954
NPPScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_npp.c:146
double
double
Definition: af_crystalizer.c:132
NPPScaleStageContext::planes_in
struct NPPScaleStageContext::@394 planes_in[4]
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:198
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: filters.h:207
nppscale_parse_expr
static int nppscale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale_npp.c:223
NPPScaleContext::shift_height
int shift_height
Definition: vf_scale_npp.c:131
NPPScaleStageContext::height
int height
Definition: vf_scale_npp.c:71
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_npp.c:100
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale_npp.c:109
eval.h
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale_npp.c:110
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
NPPScaleStageContext::out_fmt
enum AVPixelFormat out_fmt
Definition: vf_scale_npp.c:67
OFFSET
#define OFFSET(x)
Definition: vf_scale_npp.c:964
NPPScaleStageContext::frames_ctx
AVBufferRef * frames_ctx
Definition: vf_scale_npp.c:74
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:544
nppscale_filter_frame_ref
static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:923
TS2T
#define TS2T(ts, tb)
Definition: filters.h:482
nppscale_filter_frame
static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:878
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_npp.c:99
scale_eval.h
init_processing_chain
static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_npp.c:516
VARS_NB
@ VARS_NB
Definition: vf_scale_npp.c:115
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: side_data.c:117
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale_npp.c:108
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_npp.c:101
nppscale_resize
static int nppscale_resize(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:719
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale_npp.c:112
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale_npp.c:111
NPPScaleContext::h
int h
Definition: vf_scale_npp.c:138
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
EvalMode
EvalMode
Definition: af_volume.h:39
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale_npp.c:120
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:45
NPPScaleContext::w
int w
New dimensions.
Definition: vf_scale_npp.c:138
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:677
ret
ret
Definition: filter_design.txt:187
NPPScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_npp.c:143
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:137
cuda_check.h
NPPScaleContext::interp_algo
int interp_algo
Definition: vf_scale_npp.c:153
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
NPPScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_npp.c:145
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:3379
NPPScaleStageContext::in_fmt
enum AVPixelFormat in_fmt
Definition: vf_scale_npp.c:66
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:724
nppscale_init
static av_cold int nppscale_init(AVFilterContext *ctx)
Definition: vf_scale_npp.c:279
ff_vf_scale2ref_npp
const FFFilter ff_vf_scale2ref_npp
Definition: vf_scale_npp.c:165
AVFrame::height
int height
Definition: frame.h:499
SCALE_FORCE_OAR_DECREASE
@ SCALE_FORCE_OAR_DECREASE
Definition: scale_eval.h:26
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale_npp.c:121
NPPScaleContext::format_str
char * format_str
Definition: vf_scale_npp.c:147
VAR_DAR
@ VAR_DAR
Definition: vf_scale_npp.c:105
STAGE_INTERLEAVE
@ STAGE_INTERLEAVE
Definition: vf_scale_npp.c:60
SCALE_FORCE_OAR_INCREASE
@ SCALE_FORCE_OAR_INCREASE
Definition: scale_eval.h:27
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
nppscale_process
static int(*const nppscale_process[])(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:772
nppscale_outputs
static const AVFilterPad nppscale_outputs[]
Definition: vf_scale_npp.c:1009
NPPScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale_npp.c:150
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
NPPScaleStageContext::width
int width
Definition: vf_scale_npp.c:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale_npp.c:112
NPPScaleStageContext
Definition: vf_scale_npp.c:64
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
STAGE_DEINTERLEAVE
@ STAGE_DEINTERLEAVE
Definition: vf_scale_npp.c:58
VAR_T
@ VAR_T
Definition: vf_scale_npp.c:107
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:270
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
options
static const AVOption options[]
Definition: vf_scale_npp.c:966
nppscale_deinterleave
static int nppscale_deinterleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:695
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_npp.c:102
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
deinterleaved_formats
static enum AVPixelFormat deinterleaved_formats[][2]
Definition: vf_scale_npp.c:53
NPPScaleContext::reset_sar
int reset_sar
Definition: vf_scale_npp.c:151
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
NPPScaleStageContext::planes_out
struct NPPScaleStageContext::@394 planes_out[4]
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
NPPScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale_npp.c:160
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:506
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: filters.h:253
snprintf
#define snprintf
Definition: snprintf.h:34
NPPScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale_npp.c:157
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by, double w_adj)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
src
#define src
Definition: vp8dsp.c:248
planes
static const struct @528 planes[]
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3367
VAR_N
@ VAR_N
Definition: vf_scale_npp.c:106