FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59 {
60  static const struct {
61  int class;
62  int index;
63  const uint8_t *bits;
64  const uint8_t *values;
65  int length;
66  } ht[] = {
68  ff_mjpeg_val_dc, 12 },
70  ff_mjpeg_val_dc, 12 },
79  };
80  int i, ret;
81 
82  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
83  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
84  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
85  ht[i].bits, ht[i].values,
86  ht[i].class == 1, s->avctx);
87  if (ret < 0)
88  return ret;
89 
90  if (ht[i].class < 2) {
91  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
92  ht[i].bits + 1, 16);
93  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
94  ht[i].values, ht[i].length);
95  }
96  }
97 
98  return 0;
99 }
100 
101 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
102 {
103  s->buggy_avid = 1;
104  if (len > 12 && buf[12] == 1) /* 1 - NTSC */
105  s->interlace_polarity = 1;
106  if (len > 12 && buf[12] == 2) /* 2 - PAL */
107  s->interlace_polarity = 0;
108  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
109  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 12 ? buf[12] : -1);
110 }
111 
112 static void init_idct(AVCodecContext *avctx)
113 {
114  MJpegDecodeContext *s = avctx->priv_data;
115 
116  ff_idctdsp_init(&s->idsp, avctx);
117  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
118  s->idsp.idct_permutation);
119 }
120 
122 {
123  MJpegDecodeContext *s = avctx->priv_data;
124  int ret;
125 
126  if (!s->picture_ptr) {
127  s->picture = av_frame_alloc();
128  if (!s->picture)
129  return AVERROR(ENOMEM);
130  s->picture_ptr = s->picture;
131  }
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp);
135  init_idct(avctx);
136  s->buffer_size = 0;
137  s->buffer = NULL;
138  s->start_code = -1;
139  s->first_picture = 1;
140  s->got_picture = 0;
141  s->orig_height = avctx->coded_height;
143  avctx->colorspace = AVCOL_SPC_BT470BG;
144  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
145 
146  if ((ret = init_default_huffman_tables(s)) < 0)
147  return ret;
148 
149  if (s->extern_huff) {
150  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
151  bytestream2_init(&s->gB, avctx->extradata, avctx->extradata_size);
152  if (ff_mjpeg_decode_dht(s)) {
153  av_log(avctx, AV_LOG_ERROR,
154  "error using external huffman table, switching back to internal\n");
155  if ((ret = init_default_huffman_tables(s)) < 0)
156  return ret;
157  }
158  }
159  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
160  s->interlace_polarity = 1; /* bottom field first */
161  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
162  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
163  if (avctx->codec_tag == AV_RL32("MJPG"))
164  s->interlace_polarity = 1;
165  }
166 
167  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
168  if (avctx->extradata_size >= 4)
169  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
170 
171  if (s->smv_frames_per_jpeg <= 0) {
172  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
173  return AVERROR_INVALIDDATA;
174  }
175 
176  s->smv_frame = av_frame_alloc();
177  if (!s->smv_frame)
178  return AVERROR(ENOMEM);
179  } else if (avctx->extradata_size > 8
180  && AV_RL32(avctx->extradata) == 0x2C
181  && AV_RL32(avctx->extradata+4) == 0x18) {
182  parse_avid(s, avctx->extradata, avctx->extradata_size);
183  }
184 
185  if (avctx->codec->id == AV_CODEC_ID_AMV)
186  s->flipped = 1;
187 
188  return 0;
189 }
190 
191 
192 static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
193 {
194  int len = bytestream2_get_be16u(&s->gB);
195  if (len < 2 || bytestream2_get_bytes_left(&s->gB) < (len - 2)) {
196  av_log(s->avctx, AV_LOG_ERROR, "%s: invalid len %d\n", name, len);
197  return AVERROR_INVALIDDATA;
198  }
199  *plen = len - 2;
200  return 0;
201 }
202 
203 /* quantize tables */
205 {
206  int len, index, i;
207 
208  int ret = mjpeg_parse_len(s, &len, "dqt");
209  if (ret < 0)
210  return ret;
211 
212  while (len >= 65) {
213  uint8_t b = bytestream2_get_byteu(&s->gB);
214  int pr = b >> 4;
215  if (pr > 1) {
216  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
217  return AVERROR_INVALIDDATA;
218  }
219  if (len < (1 + 64 * (1+pr)))
220  return AVERROR_INVALIDDATA;
221  index = b & 0x0F;
222  if (index >= 4)
223  return AVERROR_INVALIDDATA;
224  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
225  /* read quant table */
226  for (i = 0; i < 64; i++) {
227  s->quant_matrixes[index][i] = pr ? bytestream2_get_be16u(&s->gB) : bytestream2_get_byteu(&s->gB);
228  if (s->quant_matrixes[index][i] == 0) {
229  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
230  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
231  if (s->avctx->err_recognition & AV_EF_EXPLODE)
232  return AVERROR_INVALIDDATA;
233  }
234  }
235 
236  // XXX FIXME fine-tune, and perhaps add dc too
237  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
238  s->quant_matrixes[index][8]) >> 1;
239  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
240  index, s->qscale[index]);
241  len -= 1 + 64 * (1+pr);
242  }
243  return 0;
244 }
245 
246 /* decode huffman tables and build VLC decoders */
248 {
249  int len, index, i, class, n, v;
250  uint8_t bits_table[17];
251  uint8_t val_table[256];
252  int ret = 0;
253 
254  ret = mjpeg_parse_len(s, &len, "dht");
255  if (ret < 0)
256  return ret;
257 
258  while (len > 0) {
259  if (len < 17)
260  return AVERROR_INVALIDDATA;
261  uint8_t b = bytestream2_get_byteu(&s->gB);
262  class = b >> 4;
263  if (class >= 2)
264  return AVERROR_INVALIDDATA;
265  index = b & 0x0F;
266  if (index >= 4)
267  return AVERROR_INVALIDDATA;
268  n = 0;
269  for (i = 1; i <= 16; i++) {
270  bits_table[i] = bytestream2_get_byteu(&s->gB);
271  n += bits_table[i];
272  }
273  len -= 17;
274  if (len < n || n > 256)
275  return AVERROR_INVALIDDATA;
276 
277  for (i = 0; i < n; i++) {
278  v = bytestream2_get_byteu(&s->gB);
279  val_table[i] = v;
280  }
281  len -= n;
282 
283  /* build VLC and flush previous vlc if present */
284  ff_vlc_free(&s->vlcs[class][index]);
285  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
286  class, index, n);
287  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
288  val_table, class > 0, s->avctx)) < 0)
289  return ret;
290 
291  if (class > 0) {
292  ff_vlc_free(&s->vlcs[2][index]);
293  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
294  val_table, 0, s->avctx)) < 0)
295  return ret;
296  }
297 
298  for (i = 0; i < 16; i++)
299  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
300  for (i = 0; i < 256; i++)
301  s->raw_huffman_values[class][index][i] = val_table[i];
302  }
303  return 0;
304 }
305 
307 {
308  int len, nb_components, i, width, height, bits, ret, size_change;
309  unsigned pix_fmt_id;
310  int h_count[MAX_COMPONENTS] = { 0 };
311  int v_count[MAX_COMPONENTS] = { 0 };
312 
313  s->cur_scan = 0;
314  memset(s->upscale_h, 0, sizeof(s->upscale_h));
315  memset(s->upscale_v, 0, sizeof(s->upscale_v));
316 
317  ret = mjpeg_parse_len(s, &len, "sof");
318  if (ret < 0)
319  return ret;
320  if (len < 6)
321  return AVERROR_INVALIDDATA;
322  bits = bytestream2_get_byteu(&s->gB);
323 
324  if (bits > 16 || bits < 1) {
325  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
326  return AVERROR_INVALIDDATA;
327  }
328 
329  if (s->avctx->bits_per_raw_sample != bits) {
330  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
331  s->avctx->bits_per_raw_sample = bits;
332  init_idct(s->avctx);
333  }
334  if (s->pegasus_rct)
335  bits = 9;
336  if (bits == 9 && !s->pegasus_rct)
337  s->rct = 1; // FIXME ugly
338 
339  if(s->lossless && s->avctx->lowres){
340  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
341  return AVERROR(ENOSYS);
342  }
343 
344  height = bytestream2_get_be16u(&s->gB);
345  width = bytestream2_get_be16u(&s->gB);
346 
347  // HACK for odd_height.mov
348  if (s->interlaced && s->width == width && s->height == height + 1)
349  height= s->height;
350 
351  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
352  if (av_image_check_size(width, height, 0, s->avctx) < 0)
353  return AVERROR_INVALIDDATA;
354 
355  if (!s->progressive && !s->ls) {
356  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
357  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
358  return AVERROR_INVALIDDATA;
359  }
360 
361  nb_components = bytestream2_get_byteu(&s->gB);
362  if (nb_components <= 0 ||
363  nb_components > MAX_COMPONENTS)
364  return AVERROR_INVALIDDATA;
365  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
366  if (nb_components != s->nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR,
368  "nb_components changing in interlaced picture\n");
369  return AVERROR_INVALIDDATA;
370  }
371  }
372  if (s->ls && !(bits <= 8 || nb_components == 1)) {
374  "JPEG-LS that is not <= 8 "
375  "bits/component or 16-bit gray");
376  return AVERROR_PATCHWELCOME;
377  }
378  len -= 6;
379  if (len != 3 * nb_components) {
380  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
381  return AVERROR_INVALIDDATA;
382  }
383 
384  s->nb_components = nb_components;
385  s->h_max = 1;
386  s->v_max = 1;
387  for (i = 0; i < nb_components; i++) {
388  /* component id */
389  s->component_id[i] = bytestream2_get_byteu(&s->gB);
390  uint8_t b = bytestream2_get_byteu(&s->gB);
391  h_count[i] = b >> 4;
392  v_count[i] = b & 0x0F;
393  /* compute hmax and vmax (only used in interleaved case) */
394  if (h_count[i] > s->h_max)
395  s->h_max = h_count[i];
396  if (v_count[i] > s->v_max)
397  s->v_max = v_count[i];
398  s->quant_index[i] = bytestream2_get_byteu(&s->gB);
399  if (s->quant_index[i] >= 4) {
400  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
401  return AVERROR_INVALIDDATA;
402  }
403  if (!h_count[i] || !v_count[i]) {
404  av_log(s->avctx, AV_LOG_ERROR,
405  "Invalid sampling factor in component %d %d:%d\n",
406  i, h_count[i], v_count[i]);
407  return AVERROR_INVALIDDATA;
408  }
409 
410  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
411  i, h_count[i], v_count[i],
412  s->component_id[i], s->quant_index[i]);
413  }
414  if ( nb_components == 4
415  && s->component_id[0] == 'C'
416  && s->component_id[1] == 'M'
417  && s->component_id[2] == 'Y'
418  && s->component_id[3] == 'K')
419  s->adobe_transform = 0;
420 
421  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
422  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
423  return AVERROR_PATCHWELCOME;
424  }
425 
426  if (s->bayer) {
427  if (nb_components == 2) {
428  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
429  width stored in their SOF3 markers is the width of each one. We only output
430  a single component, therefore we need to adjust the output image width. We
431  handle the deinterleaving (but not the debayering) in this file. */
432  width *= 2;
433  }
434  /* They can also contain 1 component, which is double the width and half the height
435  of the final image (rows are interleaved). We don't handle the decoding in this
436  file, but leave that to the TIFF/DNG decoder. */
437  }
438 
439  /* if different size, realloc/alloc picture */
440  if (width != s->width || height != s->height || bits != s->bits ||
441  memcmp(s->h_count, h_count, sizeof(h_count)) ||
442  memcmp(s->v_count, v_count, sizeof(v_count))) {
443  size_change = 1;
444 
445  s->width = width;
446  s->height = height;
447  s->bits = bits;
448  memcpy(s->h_count, h_count, sizeof(h_count));
449  memcpy(s->v_count, v_count, sizeof(v_count));
450  s->interlaced = 0;
451  s->got_picture = 0;
452 
453  /* test interlaced mode */
454  if (s->first_picture &&
455  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
456  s->orig_height != 0 &&
457  s->height < ((s->orig_height * 3) / 4)) {
458  s->interlaced = 1;
459  s->bottom_field = s->interlace_polarity;
460  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
461  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
462  height *= 2;
463  }
464 
465  ret = ff_set_dimensions(s->avctx, width, height);
466  if (ret < 0)
467  return ret;
468 
469  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
470  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
471  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
472  s->orig_height < height)
473  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
474 
475  s->first_picture = 0;
476  } else {
477  size_change = 0;
478  }
479 
480  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
481  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
482  if (s->avctx->height <= 0)
483  return AVERROR_INVALIDDATA;
484  }
485  if (s->bayer && s->progressive) {
486  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
487  return AVERROR_INVALIDDATA;
488  }
489 
490  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
491  if (s->progressive) {
492  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
493  return AVERROR_INVALIDDATA;
494  }
495  } else {
496  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
497  s->rgb = 1;
498  else if (!s->lossless)
499  s->rgb = 0;
500  /* XXX: not complete test ! */
501  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
502  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
503  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
504  (s->h_count[3] << 4) | s->v_count[3];
505  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
506  /* NOTE we do not allocate pictures large enough for the possible
507  * padding of h/v_count being 4 */
508  if (!(pix_fmt_id & 0xD0D0D0D0))
509  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
510  if (!(pix_fmt_id & 0x0D0D0D0D))
511  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
512 
513  for (i = 0; i < 8; i++) {
514  int j = 6 + (i&1) - (i&6);
515  int is = (pix_fmt_id >> (4*i)) & 0xF;
516  int js = (pix_fmt_id >> (4*j)) & 0xF;
517 
518  if (is == 1 && js != 2 && (i < 2 || i > 5))
519  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
520  if (is == 1 && js != 2 && (i < 2 || i > 5))
521  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
522 
523  if (is == 1 && js == 2) {
524  if (i & 1) s->upscale_h[j/2] = 1;
525  else s->upscale_v[j/2] = 1;
526  }
527  }
528 
529  if (s->bayer) {
530  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
531  goto unk_pixfmt;
532  }
533 
534  switch (pix_fmt_id) {
535  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
536  if (!s->bayer)
537  goto unk_pixfmt;
538  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
539  break;
540  case 0x11111100:
541  if (s->rgb)
542  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
543  else {
544  if ( s->adobe_transform == 0
545  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
546  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
547  } else {
548  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
549  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
550  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
551  }
552  }
553  av_assert0(s->nb_components == 3);
554  break;
555  case 0x11111111:
556  if (s->rgb)
557  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
558  else {
559  if (s->adobe_transform == 0 && s->bits <= 8) {
560  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
561  } else {
562  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
563  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
564  }
565  }
566  av_assert0(s->nb_components == 4);
567  break;
568  case 0x11412100:
569  if (s->bits > 8)
570  goto unk_pixfmt;
571  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
572  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
573  s->upscale_h[0] = 4;
574  s->upscale_h[1] = 0;
575  s->upscale_h[2] = 1;
576  } else {
577  goto unk_pixfmt;
578  }
579  break;
580  case 0x22111122:
581  case 0x22111111:
582  if (s->adobe_transform == 0 && s->bits <= 8) {
583  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
584  s->upscale_v[1] = s->upscale_v[2] = 1;
585  s->upscale_h[1] = s->upscale_h[2] = 1;
586  } else if (s->adobe_transform == 2 && s->bits <= 8) {
587  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
588  s->upscale_v[1] = s->upscale_v[2] = 1;
589  s->upscale_h[1] = s->upscale_h[2] = 1;
590  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
591  } else {
592  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
593  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
594  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
595  }
596  av_assert0(s->nb_components == 4);
597  break;
598  case 0x12121100:
599  case 0x22122100:
600  case 0x21211100:
601  case 0x21112100:
602  case 0x22211200:
603  case 0x22221100:
604  case 0x22112200:
605  case 0x11222200:
606  if (s->bits > 8)
607  goto unk_pixfmt;
608  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
609  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
610  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
611  } else {
612  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
613  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
614  }
615  break;
616  case 0x11000000:
617  case 0x13000000:
618  case 0x14000000:
619  case 0x31000000:
620  case 0x33000000:
621  case 0x34000000:
622  case 0x41000000:
623  case 0x43000000:
624  case 0x44000000:
625  if(s->bits <= 8)
626  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
627  else
628  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
629  break;
630  case 0x12111100:
631  case 0x14121200:
632  case 0x14111100:
633  case 0x22211100:
634  case 0x22112100:
635  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
636  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
637  else
638  goto unk_pixfmt;
639  s->upscale_v[1] = s->upscale_v[2] = 1;
640  } else {
641  if (pix_fmt_id == 0x14111100)
642  s->upscale_v[1] = s->upscale_v[2] = 1;
643  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
644  else
645  goto unk_pixfmt;
646  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
647  }
648  break;
649  case 0x21111100:
650  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
651  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
652  else
653  goto unk_pixfmt;
654  s->upscale_h[1] = s->upscale_h[2] = 1;
655  } else {
656  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
657  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
658  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
659  }
660  break;
661  case 0x11311100:
662  if (s->bits > 8)
663  goto unk_pixfmt;
664  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
665  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
666  else
667  goto unk_pixfmt;
668  s->upscale_h[0] = s->upscale_h[2] = 2;
669  break;
670  case 0x31111100:
671  if (s->bits > 8)
672  goto unk_pixfmt;
673  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
674  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
675  s->upscale_h[1] = s->upscale_h[2] = 2;
676  break;
677  case 0x22121100:
678  case 0x22111200:
679  case 0x41211100:
680  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
681  else
682  goto unk_pixfmt;
683  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
684  break;
685  case 0x22111100:
686  case 0x23111100:
687  case 0x42111100:
688  case 0x24111100:
689  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
690  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
691  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
692  if (pix_fmt_id == 0x42111100) {
693  if (s->bits > 8)
694  goto unk_pixfmt;
695  s->upscale_h[1] = s->upscale_h[2] = 1;
696  } else if (pix_fmt_id == 0x24111100) {
697  if (s->bits > 8)
698  goto unk_pixfmt;
699  s->upscale_v[1] = s->upscale_v[2] = 1;
700  } else if (pix_fmt_id == 0x23111100) {
701  if (s->bits > 8)
702  goto unk_pixfmt;
703  s->upscale_v[1] = s->upscale_v[2] = 2;
704  }
705  break;
706  case 0x41111100:
707  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
708  else
709  goto unk_pixfmt;
710  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
711  break;
712  default:
713  unk_pixfmt:
714  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
715  memset(s->upscale_h, 0, sizeof(s->upscale_h));
716  memset(s->upscale_v, 0, sizeof(s->upscale_v));
717  return AVERROR_PATCHWELCOME;
718  }
719  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
720  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
721  return AVERROR_PATCHWELCOME;
722  }
723  if (s->ls) {
724  memset(s->upscale_h, 0, sizeof(s->upscale_h));
725  memset(s->upscale_v, 0, sizeof(s->upscale_v));
726  if (s->nb_components == 3) {
727  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
728  } else if (s->nb_components != 1) {
729  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
730  return AVERROR_PATCHWELCOME;
731  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
732  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
733  else if (s->bits <= 8)
734  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
735  else
736  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
737  }
738 
739  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
740  if (!s->pix_desc) {
741  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
742  return AVERROR_BUG;
743  }
744 
745  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
746  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
747  } else {
748  enum AVPixelFormat pix_fmts[] = {
749 #if CONFIG_MJPEG_NVDEC_HWACCEL
751 #endif
752 #if CONFIG_MJPEG_VAAPI_HWACCEL
754 #endif
755  s->avctx->pix_fmt,
757  };
758  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
759  if (s->hwaccel_pix_fmt < 0)
760  return AVERROR(EINVAL);
761 
762  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
763  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
764  }
765 
766  if (s->avctx->skip_frame == AVDISCARD_ALL) {
767  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
768  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
769  s->got_picture = 1;
770  return 0;
771  }
772 
773  av_frame_unref(s->picture_ptr);
774  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
775  if (ret < 0)
776  return ret;
777  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
778  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
779  s->got_picture = 1;
780 
781  // Lets clear the palette to avoid leaving uninitialized values in it
782  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
783  memset(s->picture_ptr->data[1], 0, 1024);
784 
785  for (i = 0; i < 4; i++)
786  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
787 
788  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
789  s->width, s->height, s->linesize[0], s->linesize[1],
790  s->interlaced, s->avctx->height);
791 
792  }
793 
794  if ((s->rgb && !s->lossless && !s->ls) ||
795  (!s->rgb && s->ls && s->nb_components > 1) ||
796  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
797  ) {
798  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
799  return AVERROR_PATCHWELCOME;
800  }
801 
802  /* totally blank picture as progressive JPEG will only add details to it */
803  if (s->progressive) {
804  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
805  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
806  for (i = 0; i < s->nb_components; i++) {
807  int size = bw * bh * s->h_count[i] * s->v_count[i];
808  av_freep(&s->blocks[i]);
809  av_freep(&s->last_nnz[i]);
810  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
811  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
812  if (!s->blocks[i] || !s->last_nnz[i])
813  return AVERROR(ENOMEM);
814  s->block_stride[i] = bw * s->h_count[i];
815  }
816  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
817  }
818 
819  if (s->avctx->hwaccel) {
820  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
821  s->hwaccel_picture_private =
822  av_mallocz(hwaccel->frame_priv_data_size);
823  if (!s->hwaccel_picture_private)
824  return AVERROR(ENOMEM);
825 
826  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
827  s->raw_image_buffer_size);
828  if (ret < 0)
829  return ret;
830  }
831 
832  return 0;
833 }
834 
835 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
836 {
837  int code;
838  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
839  if (code < 0 || code > 16) {
840  av_log(s->avctx, AV_LOG_ERROR,
841  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
842  return AVERROR_INVALIDDATA;
843  }
844 
845  *val = code ? get_xbits(&s->gb, code) : 0;
846  return 0;
847 }
848 
849 /* decode block and dequantize */
850 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
851  int dc_index, int ac_index, uint16_t *quant_matrix)
852 {
853  int code, i, j, level, val;
854 
855  /* DC coef */
856  int ret = mjpeg_decode_dc(s, dc_index, &val);
857  if (ret < 0)
858  return ret;
859 
860  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
861  s->last_dc[component] = val;
862  block[0] = av_clip_int16(val);
863  /* AC coefs */
864  i = 0;
865  {OPEN_READER(re, &s->gb);
866  do {
867  UPDATE_CACHE(re, &s->gb);
868  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
869 
870  i += ((unsigned)code) >> 4;
871  code &= 0xf;
872  if (code) {
873  // GET_VLC updates the cache if parsing reaches the second stage.
874  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
875  // and don't need to refill the cache.
876  {
877  int cache = GET_CACHE(re, &s->gb);
878  int sign = (~cache) >> 31;
879  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
880  }
881 
882  LAST_SKIP_BITS(re, &s->gb, code);
883 
884  if (i > 63) {
885  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
886  return AVERROR_INVALIDDATA;
887  }
888  j = s->permutated_scantable[i];
889  block[j] = level * quant_matrix[i];
890  }
891  } while (i < 63);
892  CLOSE_READER(re, &s->gb);}
893 
894  return 0;
895 }
896 
898  int component, int dc_index,
899  uint16_t *quant_matrix, int Al)
900 {
901  unsigned val;
902  s->bdsp.clear_block(block);
903  int ret = mjpeg_decode_dc(s, dc_index, &val);
904  if (ret < 0)
905  return ret;
906 
907  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
908  s->last_dc[component] = val;
909  block[0] = val;
910  return 0;
911 }
912 
913 /* decode block and dequantize - progressive JPEG version */
915  uint8_t *last_nnz, int ac_index,
916  uint16_t *quant_matrix,
917  int ss, int se, int Al, int *EOBRUN)
918 {
919  int code, i, j, val, run;
920  unsigned level;
921 
922  if (*EOBRUN) {
923  (*EOBRUN)--;
924  return 0;
925  }
926 
927  {
928  OPEN_READER(re, &s->gb);
929  for (i = ss; ; i++) {
930  UPDATE_CACHE(re, &s->gb);
931  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
932 
933  run = ((unsigned) code) >> 4;
934  code &= 0xF;
935  if (code) {
936  i += run;
937 
938  {
939  int cache = GET_CACHE(re, &s->gb);
940  int sign = (~cache) >> 31;
941  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
942  }
943 
944  LAST_SKIP_BITS(re, &s->gb, code);
945 
946  if (i >= se) {
947  if (i == se) {
948  j = s->permutated_scantable[se];
949  block[j] = level * (quant_matrix[se] << Al);
950  break;
951  }
952  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
953  return AVERROR_INVALIDDATA;
954  }
955  j = s->permutated_scantable[i];
956  block[j] = level * (quant_matrix[i] << Al);
957  } else {
958  if (run == 0xF) {// ZRL - skip 15 coefficients
959  i += 15;
960  if (i >= se) {
961  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
962  return AVERROR_INVALIDDATA;
963  }
964  } else {
965  val = (1 << run);
966  if (run) {
967  // Given that GET_VLC reloads internally, we always
968  // have at least 16 bits in the cache here.
969  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
970  LAST_SKIP_BITS(re, &s->gb, run);
971  }
972  *EOBRUN = val - 1;
973  break;
974  }
975  }
976  }
977  CLOSE_READER(re, &s->gb);
978  }
979 
980  if (i > *last_nnz)
981  *last_nnz = i;
982 
983  return 0;
984 }
985 
986 #define REFINE_BIT(j) { \
987  UPDATE_CACHE(re, &s->gb); \
988  sign = block[j] >> 15; \
989  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
990  ((quant_matrix[i] ^ sign) - sign) << Al; \
991  LAST_SKIP_BITS(re, &s->gb, 1); \
992 }
993 
994 #define ZERO_RUN \
995 for (; ; i++) { \
996  if (i > last) { \
997  i += run; \
998  if (i > se) { \
999  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
1000  return -1; \
1001  } \
1002  break; \
1003  } \
1004  j = s->permutated_scantable[i]; \
1005  if (block[j]) \
1006  REFINE_BIT(j) \
1007  else if (run-- == 0) \
1008  break; \
1009 }
1010 
1011 /* decode block and dequantize - progressive JPEG refinement pass */
1013  uint8_t *last_nnz,
1014  int ac_index, uint16_t *quant_matrix,
1015  int ss, int se, int Al, int *EOBRUN)
1016 {
1017  int code, i = ss, j, sign, val, run;
1018  int last = FFMIN(se, *last_nnz);
1019 
1020  OPEN_READER(re, &s->gb);
1021  if (*EOBRUN) {
1022  (*EOBRUN)--;
1023  } else {
1024  for (; ; i++) {
1025  UPDATE_CACHE(re, &s->gb);
1026  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1027 
1028  if (code & 0xF) {
1029  run = ((unsigned) code) >> 4;
1030  val = SHOW_UBITS(re, &s->gb, 1);
1031  LAST_SKIP_BITS(re, &s->gb, 1);
1032  ZERO_RUN;
1033  j = s->permutated_scantable[i];
1034  val--;
1035  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1036  if (i == se) {
1037  if (i > *last_nnz)
1038  *last_nnz = i;
1039  CLOSE_READER(re, &s->gb);
1040  return 0;
1041  }
1042  } else {
1043  run = ((unsigned) code) >> 4;
1044  if (run == 0xF) {
1045  ZERO_RUN;
1046  } else {
1047  val = run;
1048  run = (1 << run);
1049  if (val) {
1050  // Given that GET_VLC reloads internally, we always
1051  // have at least 16 bits in the cache here.
1052  run += SHOW_UBITS(re, &s->gb, val);
1053  LAST_SKIP_BITS(re, &s->gb, val);
1054  }
1055  *EOBRUN = run - 1;
1056  break;
1057  }
1058  }
1059  }
1060 
1061  if (i > *last_nnz)
1062  *last_nnz = i;
1063  }
1064 
1065  for (; i <= last; i++) {
1066  j = s->permutated_scantable[i];
1067  if (block[j])
1068  REFINE_BIT(j)
1069  }
1070  CLOSE_READER(re, &s->gb);
1071 
1072  return 0;
1073 }
1074 #undef REFINE_BIT
1075 #undef ZERO_RUN
1076 
1077 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1078 {
1079  int i;
1080  int reset = 0;
1081 
1082  if (s->restart_interval) {
1083  s->restart_count--;
1084  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1085  align_get_bits(&s->gb);
1086  for (i = 0; i < nb_components; i++) /* reset dc */
1087  s->last_dc[i] = (4 << s->bits);
1088  }
1089 
1090  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1091  /* skip RSTn */
1092  if (s->restart_count == 0) {
1093  if( show_bits(&s->gb, i) == (1 << i) - 1
1094  || show_bits(&s->gb, i) == 0xFF) {
1095  int pos = get_bits_count(&s->gb);
1096  align_get_bits(&s->gb);
1097  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1098  skip_bits(&s->gb, 8);
1099  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1100  for (i = 0; i < nb_components; i++) /* reset dc */
1101  s->last_dc[i] = (4 << s->bits);
1102  reset = 1;
1103  } else
1104  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1105  }
1106  }
1107  }
1108  return reset;
1109 }
1110 
1111 /* Handles 1 to 4 components */
1112 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1113 {
1114  int i, mb_x, mb_y;
1115  unsigned width;
1116  uint16_t (*buffer)[4];
1117  int left[4], top[4], topleft[4];
1118  const int linesize = s->linesize[0];
1119  const int mask = ((1 << s->bits) - 1) << point_transform;
1120  int resync_mb_y = 0;
1121  int resync_mb_x = 0;
1122  int vpred[6];
1123  int ret;
1124 
1125  if (!s->bayer && s->nb_components < 3)
1126  return AVERROR_INVALIDDATA;
1127  if (s->bayer && s->nb_components > 2)
1128  return AVERROR_INVALIDDATA;
1129  if (s->nb_components <= 0 || s->nb_components > 4)
1130  return AVERROR_INVALIDDATA;
1131  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1132  return AVERROR_INVALIDDATA;
1133  if (s->bayer) {
1134  if (s->rct || s->pegasus_rct)
1135  return AVERROR_INVALIDDATA;
1136  }
1137 
1138 
1139  s->restart_count = s->restart_interval;
1140 
1141  if (s->restart_interval == 0)
1142  s->restart_interval = INT_MAX;
1143 
1144  if (s->bayer)
1145  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1146  else
1147  width = s->mb_width;
1148 
1149  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1150  if (!s->ljpeg_buffer)
1151  return AVERROR(ENOMEM);
1152 
1153  buffer = s->ljpeg_buffer;
1154 
1155  for (i = 0; i < 4; i++)
1156  buffer[0][i] = 1 << (s->bits - 1);
1157 
1158  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1159  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1160 
1161  if (s->interlaced && s->bottom_field)
1162  ptr += linesize >> 1;
1163 
1164  for (i = 0; i < 4; i++)
1165  top[i] = left[i] = topleft[i] = buffer[0][i];
1166 
1167  if ((mb_y * s->width) % s->restart_interval == 0) {
1168  for (i = 0; i < 6; i++)
1169  vpred[i] = 1 << (s->bits-1);
1170  }
1171 
1172  for (mb_x = 0; mb_x < width; mb_x++) {
1173  int modified_predictor = predictor;
1174 
1175  if (get_bits_left(&s->gb) < 1) {
1176  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1177  return AVERROR_INVALIDDATA;
1178  }
1179 
1180  if (s->restart_interval && !s->restart_count){
1181  s->restart_count = s->restart_interval;
1182  resync_mb_x = mb_x;
1183  resync_mb_y = mb_y;
1184  for(i=0; i<4; i++)
1185  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1186  }
1187  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1188  modified_predictor = 1;
1189 
1190  for (i=0;i<nb_components;i++) {
1191  int pred, dc;
1192 
1193  topleft[i] = top[i];
1194  top[i] = buffer[mb_x][i];
1195 
1196  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1197  if (ret < 0)
1198  return ret;
1199 
1200  if (!s->bayer || mb_x) {
1201  pred = left[i];
1202  } else { /* This path runs only for the first line in bayer images */
1203  vpred[i] += dc;
1204  pred = vpred[i] - dc;
1205  }
1206 
1207  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1208 
1209  left[i] = buffer[mb_x][i] =
1210  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1211  }
1212 
1213  if (s->restart_interval && !--s->restart_count) {
1214  align_get_bits(&s->gb);
1215  skip_bits(&s->gb, 16); /* skip RSTn */
1216  }
1217  }
1218  if (s->rct && s->nb_components == 4) {
1219  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1220  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1221  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1222  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1223  ptr[4*mb_x + 0] = buffer[mb_x][3];
1224  }
1225  } else if (s->nb_components == 4) {
1226  for(i=0; i<nb_components; i++) {
1227  int c= s->comp_index[i];
1228  if (s->bits <= 8) {
1229  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1230  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1231  }
1232  } else if(s->bits == 9) {
1233  return AVERROR_PATCHWELCOME;
1234  } else {
1235  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1236  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1237  }
1238  }
1239  }
1240  } else if (s->rct) {
1241  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1242  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1243  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1244  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1245  }
1246  } else if (s->pegasus_rct) {
1247  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1248  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1249  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1250  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1251  }
1252  } else if (s->bayer) {
1253  if (s->bits <= 8)
1254  return AVERROR_PATCHWELCOME;
1255  if (nb_components == 1) {
1256  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1257  for (mb_x = 0; mb_x < width; mb_x++)
1258  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1259  } else if (nb_components == 2) {
1260  for (mb_x = 0; mb_x < width; mb_x++) {
1261  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1262  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1263  }
1264  }
1265  } else {
1266  for(i=0; i<nb_components; i++) {
1267  int c= s->comp_index[i];
1268  if (s->bits <= 8) {
1269  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1270  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1271  }
1272  } else if(s->bits == 9) {
1273  return AVERROR_PATCHWELCOME;
1274  } else {
1275  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1276  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1277  }
1278  }
1279  }
1280  }
1281  }
1282  return 0;
1283 }
1284 
1286  int point_transform, int nb_components)
1287 {
1288  int i, mb_x, mb_y, mask;
1289  int bits= (s->bits+7)&~7;
1290  int resync_mb_y = 0;
1291  int resync_mb_x = 0;
1292  int ret;
1293 
1294  point_transform += bits - s->bits;
1295  mask = ((1 << s->bits) - 1) << point_transform;
1296 
1297  av_assert0(nb_components>=1 && nb_components<=4);
1298 
1299  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1300  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1301  if (get_bits_left(&s->gb) < 1) {
1302  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1303  return AVERROR_INVALIDDATA;
1304  }
1305  if (s->restart_interval && !s->restart_count){
1306  s->restart_count = s->restart_interval;
1307  resync_mb_x = mb_x;
1308  resync_mb_y = mb_y;
1309  }
1310 
1311  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1312  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1313  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1314  for (i = 0; i < nb_components; i++) {
1315  uint8_t *ptr;
1316  uint16_t *ptr16;
1317  int n, h, v, x, y, c, j, linesize;
1318  n = s->nb_blocks[i];
1319  c = s->comp_index[i];
1320  h = s->h_scount[i];
1321  v = s->v_scount[i];
1322  x = 0;
1323  y = 0;
1324  linesize= s->linesize[c];
1325 
1326  if(bits>8) linesize /= 2;
1327 
1328  for(j=0; j<n; j++) {
1329  int pred, dc;
1330 
1331  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1332  if (ret < 0)
1333  return ret;
1334 
1335  if ( h * mb_x + x >= s->width
1336  || v * mb_y + y >= s->height) {
1337  // Nothing to do
1338  } else if (bits<=8) {
1339  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1340  if(y==0 && toprow){
1341  if(x==0 && leftcol){
1342  pred= 1 << (bits - 1);
1343  }else{
1344  pred= ptr[-1];
1345  }
1346  }else{
1347  if(x==0 && leftcol){
1348  pred= ptr[-linesize];
1349  }else{
1350  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1351  }
1352  }
1353 
1354  if (s->interlaced && s->bottom_field)
1355  ptr += linesize >> 1;
1356  pred &= mask;
1357  *ptr= pred + ((unsigned)dc << point_transform);
1358  }else{
1359  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1360  if(y==0 && toprow){
1361  if(x==0 && leftcol){
1362  pred= 1 << (bits - 1);
1363  }else{
1364  pred= ptr16[-1];
1365  }
1366  }else{
1367  if(x==0 && leftcol){
1368  pred= ptr16[-linesize];
1369  }else{
1370  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1371  }
1372  }
1373 
1374  if (s->interlaced && s->bottom_field)
1375  ptr16 += linesize >> 1;
1376  pred &= mask;
1377  *ptr16= pred + ((unsigned)dc << point_transform);
1378  }
1379  if (++x == h) {
1380  x = 0;
1381  y++;
1382  }
1383  }
1384  }
1385  } else {
1386  for (i = 0; i < nb_components; i++) {
1387  uint8_t *ptr;
1388  uint16_t *ptr16;
1389  int n, h, v, x, y, c, j, linesize, dc;
1390  n = s->nb_blocks[i];
1391  c = s->comp_index[i];
1392  h = s->h_scount[i];
1393  v = s->v_scount[i];
1394  x = 0;
1395  y = 0;
1396  linesize = s->linesize[c];
1397 
1398  if(bits>8) linesize /= 2;
1399 
1400  for (j = 0; j < n; j++) {
1401  int pred;
1402 
1403  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1404  if (ret < 0)
1405  return ret;
1406 
1407  if ( h * mb_x + x >= s->width
1408  || v * mb_y + y >= s->height) {
1409  // Nothing to do
1410  } else if (bits<=8) {
1411  ptr = s->picture_ptr->data[c] +
1412  (linesize * (v * mb_y + y)) +
1413  (h * mb_x + x); //FIXME optimize this crap
1414  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1415 
1416  pred &= mask;
1417  *ptr = pred + ((unsigned)dc << point_transform);
1418  }else{
1419  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1420  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1421 
1422  pred &= mask;
1423  *ptr16= pred + ((unsigned)dc << point_transform);
1424  }
1425 
1426  if (++x == h) {
1427  x = 0;
1428  y++;
1429  }
1430  }
1431  }
1432  }
1433  if (s->restart_interval && !--s->restart_count) {
1434  align_get_bits(&s->gb);
1435  skip_bits(&s->gb, 16); /* skip RSTn */
1436  }
1437  }
1438  }
1439  return 0;
1440 }
1441 
1443  uint8_t *dst, const uint8_t *src,
1444  int linesize, int lowres)
1445 {
1446  switch (lowres) {
1447  case 0: s->copy_block(dst, src, linesize, 8);
1448  break;
1449  case 1: copy_block4(dst, src, linesize, linesize, 4);
1450  break;
1451  case 2: copy_block2(dst, src, linesize, linesize, 2);
1452  break;
1453  case 3: *dst = *src;
1454  break;
1455  }
1456 }
1457 
1458 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1459 {
1460  int block_x, block_y;
1461  int size = 8 >> s->avctx->lowres;
1462  if (s->bits > 8) {
1463  for (block_y=0; block_y<size; block_y++)
1464  for (block_x=0; block_x<size; block_x++)
1465  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1466  } else {
1467  for (block_y=0; block_y<size; block_y++)
1468  for (block_x=0; block_x<size; block_x++)
1469  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1470  }
1471 }
1472 
1473 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1474  int Al, const uint8_t *mb_bitmask,
1475  int mb_bitmask_size,
1476  const AVFrame *reference)
1477 {
1478  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1479  uint8_t *data[MAX_COMPONENTS];
1480  const uint8_t *reference_data[MAX_COMPONENTS];
1481  int linesize[MAX_COMPONENTS];
1482  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1483  int bytes_per_pixel = 1 + (s->bits > 8);
1484 
1485  if (mb_bitmask) {
1486  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1487  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1488  return AVERROR_INVALIDDATA;
1489  }
1490  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1491  }
1492 
1493  s->restart_count = 0;
1494 
1495  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1496  &chroma_v_shift);
1497  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1498  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1499 
1500  for (i = 0; i < nb_components; i++) {
1501  int c = s->comp_index[i];
1502  data[c] = s->picture_ptr->data[c];
1503  reference_data[c] = reference ? reference->data[c] : NULL;
1504  linesize[c] = s->linesize[c];
1505  s->coefs_finished[c] |= 1;
1506  }
1507 
1508  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1509  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1510  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1511 
1512  if (s->restart_interval && !s->restart_count)
1513  s->restart_count = s->restart_interval;
1514 
1515  if (get_bits_left(&s->gb) < 0) {
1516  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1517  -get_bits_left(&s->gb));
1518  return AVERROR_INVALIDDATA;
1519  }
1520  for (i = 0; i < nb_components; i++) {
1521  uint8_t *ptr;
1522  int n, h, v, x, y, c, j;
1523  int block_offset;
1524  n = s->nb_blocks[i];
1525  c = s->comp_index[i];
1526  h = s->h_scount[i];
1527  v = s->v_scount[i];
1528  x = 0;
1529  y = 0;
1530  for (j = 0; j < n; j++) {
1531  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1532  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1533 
1534  if (s->interlaced && s->bottom_field)
1535  block_offset += linesize[c] >> 1;
1536  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1537  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1538  ptr = data[c] + block_offset;
1539  } else
1540  ptr = NULL;
1541  if (!s->progressive) {
1542  if (copy_mb) {
1543  if (ptr)
1544  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1545  linesize[c], s->avctx->lowres);
1546 
1547  } else {
1548  s->bdsp.clear_block(s->block);
1549  if (decode_block(s, s->block, i,
1550  s->dc_index[i], s->ac_index[i],
1551  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1552  av_log(s->avctx, AV_LOG_ERROR,
1553  "error y=%d x=%d\n", mb_y, mb_x);
1554  return AVERROR_INVALIDDATA;
1555  }
1556  if (ptr && linesize[c]) {
1557  s->idsp.idct_put(ptr, linesize[c], s->block);
1558  if (s->bits & 7)
1559  shift_output(s, ptr, linesize[c]);
1560  }
1561  }
1562  } else {
1563  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1564  (h * mb_x + x);
1565  int16_t *block = s->blocks[c][block_idx];
1566  if (Ah)
1567  block[0] += get_bits1(&s->gb) *
1568  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1569  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1570  s->quant_matrixes[s->quant_sindex[i]],
1571  Al) < 0) {
1572  av_log(s->avctx, AV_LOG_ERROR,
1573  "error y=%d x=%d\n", mb_y, mb_x);
1574  return AVERROR_INVALIDDATA;
1575  }
1576  }
1577  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1578  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1579  mb_x, mb_y, x, y, c, s->bottom_field,
1580  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1581  if (++x == h) {
1582  x = 0;
1583  y++;
1584  }
1585  }
1586  }
1587 
1588  handle_rstn(s, nb_components);
1589  }
1590  }
1591  return 0;
1592 }
1593 
1595  int se, int Ah, int Al)
1596 {
1597  int mb_x, mb_y;
1598  int EOBRUN = 0;
1599  int c = s->comp_index[0];
1600  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1601 
1602  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1603  if (se < ss || se > 63) {
1604  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1605  return AVERROR_INVALIDDATA;
1606  }
1607 
1608  // s->coefs_finished is a bitmask for coefficients coded
1609  // ss and se are parameters telling start and end coefficients
1610  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1611 
1612  s->restart_count = 0;
1613 
1614  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1615  int block_idx = mb_y * s->block_stride[c];
1616  int16_t (*block)[64] = &s->blocks[c][block_idx];
1617  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1618  if (get_bits_left(&s->gb) <= 0) {
1619  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1620  return AVERROR_INVALIDDATA;
1621  }
1622  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1623  int ret;
1624  if (s->restart_interval && !s->restart_count)
1625  s->restart_count = s->restart_interval;
1626 
1627  if (Ah)
1628  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1629  quant_matrix, ss, se, Al, &EOBRUN);
1630  else
1631  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1632  quant_matrix, ss, se, Al, &EOBRUN);
1633 
1634  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1636  if (ret < 0) {
1637  av_log(s->avctx, AV_LOG_ERROR,
1638  "error y=%d x=%d\n", mb_y, mb_x);
1639  return AVERROR_INVALIDDATA;
1640  }
1641 
1642  if (handle_rstn(s, 0))
1643  EOBRUN = 0;
1644  }
1645  }
1646  return 0;
1647 }
1648 
1650 {
1651  int mb_x, mb_y;
1652  int c;
1653  const int bytes_per_pixel = 1 + (s->bits > 8);
1654  const int block_size = s->lossless ? 1 : 8;
1655 
1656  for (c = 0; c < s->nb_components; c++) {
1657  uint8_t *data = s->picture_ptr->data[c];
1658  int linesize = s->linesize[c];
1659  int h = s->h_max / s->h_count[c];
1660  int v = s->v_max / s->v_count[c];
1661  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1662  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1663 
1664  if (~s->coefs_finished[c])
1665  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1666 
1667  if (s->interlaced && s->bottom_field)
1668  data += linesize >> 1;
1669 
1670  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1671  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1672  int block_idx = mb_y * s->block_stride[c];
1673  int16_t (*block)[64] = &s->blocks[c][block_idx];
1674  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1675  s->idsp.idct_put(ptr, linesize, *block);
1676  if (s->bits & 7)
1677  shift_output(s, ptr, linesize);
1678  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1679  }
1680  }
1681  }
1682 }
1683 
1684 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1685  int mb_bitmask_size, const AVFrame *reference)
1686 {
1687  int len, nb_components, i, h, v, predictor, point_transform;
1688  int index, id, ret;
1689  const int block_size = s->lossless ? 1 : 8;
1690  int ilv, prev_shift;
1691 
1692  if (!s->got_picture) {
1693  av_log(s->avctx, AV_LOG_WARNING,
1694  "Can not process SOS before SOF, skipping\n");
1695  return AVERROR_INVALIDDATA;
1696  }
1697 
1698  ret = mjpeg_parse_len(s, &len, "sos");
1699  if (ret < 0)
1700  return ret;
1701  if (len < 1)
1702  return AVERROR_INVALIDDATA;
1703  nb_components = bytestream2_get_byteu(&s->gB);
1704  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1706  "decode_sos: nb_components (%d)",
1707  nb_components);
1708  return AVERROR_PATCHWELCOME;
1709  }
1710  if (len != 4 + 2 * nb_components) {
1711  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: len(%d) mismatch %d components\n", len, nb_components);
1712  return AVERROR_INVALIDDATA;
1713  }
1714  for (i = 0; i < nb_components; i++) {
1715  id = bytestream2_get_byteu(&s->gB);
1716  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1717  /* find component index */
1718  for (index = 0; index < s->nb_components; index++)
1719  if (id == s->component_id[index])
1720  break;
1721  if (index == s->nb_components) {
1722  av_log(s->avctx, AV_LOG_ERROR,
1723  "decode_sos: index(%d) out of components\n", index);
1724  return AVERROR_INVALIDDATA;
1725  }
1726  /* Metasoft MJPEG codec has Cb and Cr swapped */
1727  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1728  && nb_components == 3 && s->nb_components == 3 && i)
1729  index = 3 - i;
1730 
1731  s->quant_sindex[i] = s->quant_index[index];
1732  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1733  s->h_scount[i] = s->h_count[index];
1734  s->v_scount[i] = s->v_count[index];
1735 
1736  s->comp_index[i] = index;
1737 
1738  uint8_t b = bytestream2_get_byteu(&s->gB);
1739  s->dc_index[i] = b >> 4;
1740  s->ac_index[i] = b & 0x0F;
1741 
1742  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1743  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1744  goto out_of_range;
1745  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1746  goto out_of_range;
1747  }
1748 
1749  predictor = bytestream2_get_byteu(&s->gB); /* JPEG Ss / lossless JPEG predictor / JPEG-LS NEAR */
1750  ilv = bytestream2_get_byteu(&s->gB); /* JPEG Se / JPEG-LS ILV */
1751  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1752  uint8_t b = bytestream2_get_byteu(&s->gB);
1753  prev_shift = b >> 4; /* Ah */
1754  point_transform = b & 0x0F; /* Al */
1755  }else
1756  prev_shift = point_transform = 0;
1757 
1758  if (nb_components > 1) {
1759  /* interleaved stream */
1760  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1761  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1762  } else if (!s->ls) { /* skip this for JPEG-LS */
1763  h = s->h_max / s->h_scount[0];
1764  v = s->v_max / s->v_scount[0];
1765  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1766  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1767  s->nb_blocks[0] = 1;
1768  s->h_scount[0] = 1;
1769  s->v_scount[0] = 1;
1770  }
1771 
1772  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1773  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1774  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1775  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1776  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1777 
1778 
1779  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1780  if (s->mjpb_skiptosod)
1781  bytestream2_skip(&s->gB, s->mjpb_skiptosod);
1782 
1783  ret = init_get_bits8(&s->gb, s->gB.buffer, bytestream2_get_bytes_left(&s->gB));
1784  if (ret < 0)
1785  return ret;
1786 
1787 next_field:
1788  for (i = 0; i < nb_components; i++)
1789  s->last_dc[i] = (4 << s->bits);
1790 
1791  if (s->avctx->hwaccel) {
1792  int bytes_to_start = bytestream2_tell(&s->gB);
1793  av_assert0(bytes_to_start >= 0 &&
1794  s->raw_scan_buffer_size >= bytes_to_start);
1795 
1796  ret = FF_HW_CALL(s->avctx, decode_slice,
1797  s->raw_scan_buffer + bytes_to_start,
1798  s->raw_scan_buffer_size - bytes_to_start);
1799  if (ret < 0)
1800  return ret;
1801 
1802  } else if (s->lossless) {
1803  av_assert0(s->picture_ptr == s->picture);
1804  if (CONFIG_JPEGLS_DECODER && s->ls) {
1805 // for () {
1806 // reset_ls_coding_parameters(s, 0);
1807 
1809  point_transform, ilv)) < 0)
1810  return ret;
1811  } else {
1812  if (s->rgb || s->bayer) {
1813  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1814  return ret;
1815  } else {
1817  point_transform,
1818  nb_components)) < 0)
1819  return ret;
1820  }
1821  }
1822  } else {
1823  if (s->progressive && predictor) {
1824  av_assert0(s->picture_ptr == s->picture);
1826  ilv, prev_shift,
1827  point_transform)) < 0)
1828  return ret;
1829  } else {
1830  if ((ret = mjpeg_decode_scan(s, nb_components,
1831  prev_shift, point_transform,
1832  mb_bitmask, mb_bitmask_size, reference)) < 0)
1833  return ret;
1834  }
1835  }
1836 
1837  if (s->interlaced &&
1838  get_bits_left(&s->gb) > 32 &&
1839  show_bits(&s->gb, 8) == 0xFF) {
1840  GetBitContext bak = s->gb;
1841  align_get_bits(&bak);
1842  if (show_bits(&bak, 16) == 0xFFD1) {
1843  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1844  s->gb = bak;
1845  skip_bits(&s->gb, 16);
1846  s->bottom_field ^= 1;
1847 
1848  goto next_field;
1849  }
1850  }
1851 
1852  /* Add the amount of bits read from the unescaped image data buffer
1853  * into the GetByteContext. */
1854  bytestream2_skipu(&s->gB, (get_bits_count(&s->gb) + 7) / 8);
1855 
1856  return 0;
1857  out_of_range:
1858  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1859  return AVERROR_INVALIDDATA;
1860 }
1861 
1863 {
1864  if (bytestream2_get_be16u(&s->gB) != 4)
1865  return AVERROR_INVALIDDATA;
1866  s->restart_interval = bytestream2_get_be16u(&s->gB);
1867  s->restart_count = 0;
1868  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1869  s->restart_interval);
1870 
1871  return 0;
1872 }
1873 
1875 {
1876  int len, id, i;
1877 
1878  int ret = mjpeg_parse_len(s, &len, "app");
1879  if (ret < 0)
1880  return AVERROR_INVALIDDATA;
1881 
1882  if (len < 4) {
1883  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1884  return AVERROR_INVALIDDATA;
1885  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1886  goto out;
1887  }
1888 
1889  id = bytestream2_get_be32u(&s->gB);
1890  len -= 4;
1891 
1892  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1893  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1894  av_fourcc2str(av_bswap32(id)), id, len);
1895 
1896  /* Buggy AVID, it puts EOI only at every 10th frame. */
1897  /* Also, this fourcc is used by non-avid files too, it holds some
1898  information, but it's always present in AVID-created files. */
1899  if (id == AV_RB32("AVI1")) {
1900  /* structure:
1901  4bytes AVI1
1902  1bytes polarity
1903  1bytes always zero
1904  4bytes field_size
1905  4bytes field_size_less_padding
1906  */
1907  s->buggy_avid = 1;
1908  i = bytestream2_get_byteu(&s->gB); len--;
1909  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1910  goto out;
1911  }
1912 
1913  if (id == AV_RB32("JFIF")) {
1914  int t_w, t_h, v1, v2;
1915  if (len < 8)
1916  goto out;
1917  bytestream2_skipu(&s->gB, 1); /* the trailing zero-byte */
1918  v1 = bytestream2_get_byteu(&s->gB);
1919  v2 = bytestream2_get_byteu(&s->gB);
1920  bytestream2_skipu(&s->gB, 1);
1921 
1922  s->avctx->sample_aspect_ratio.num = bytestream2_get_be16u(&s->gB);
1923  s->avctx->sample_aspect_ratio.den = bytestream2_get_be16u(&s->gB);
1924  if ( s->avctx->sample_aspect_ratio.num <= 0
1925  || s->avctx->sample_aspect_ratio.den <= 0) {
1926  s->avctx->sample_aspect_ratio.num = 0;
1927  s->avctx->sample_aspect_ratio.den = 1;
1928  }
1929 
1930  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1931  av_log(s->avctx, AV_LOG_INFO,
1932  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1933  v1, v2,
1934  s->avctx->sample_aspect_ratio.num,
1935  s->avctx->sample_aspect_ratio.den);
1936 
1937  len -= 8;
1938  if (len >= 2) {
1939  t_w = bytestream2_get_byteu(&s->gB);
1940  t_h = bytestream2_get_byteu(&s->gB);
1941  if (t_w && t_h) {
1942  /* skip thumbnail */
1943  if (len -10 - (t_w * t_h * 3) > 0)
1944  len -= t_w * t_h * 3;
1945  }
1946  len -= 2;
1947  }
1948  goto out;
1949  }
1950 
1951  if ( id == AV_RB32("Adob")
1952  && len >= 8
1953  && bytestream2_peek_byteu(&s->gB) == 'e'
1954  && bytestream2_peek_be32u(&s->gB) != AV_RB32("e_CM")) {
1955  bytestream2_skipu(&s->gB, 1); /* 'e' */
1956  bytestream2_skipu(&s->gB, 2); /* version */
1957  bytestream2_skipu(&s->gB, 2); /* flags0 */
1958  bytestream2_skipu(&s->gB, 2); /* flags1 */
1959  s->adobe_transform = bytestream2_get_byteu(&s->gB);
1960  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1961  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1962  len -= 8;
1963  goto out;
1964  }
1965 
1966  if (id == AV_RB32("LJIF")) {
1967  int rgb = s->rgb;
1968  int pegasus_rct = s->pegasus_rct;
1969  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1970  av_log(s->avctx, AV_LOG_INFO,
1971  "Pegasus lossless jpeg header found\n");
1972  bytestream2_skipu(&s->gB, 2); /* version ? */
1973  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1974  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1975  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1976  switch (i=bytestream2_get_byteu(&s->gB)) {
1977  case 1:
1978  rgb = 1;
1979  pegasus_rct = 0;
1980  break;
1981  case 2:
1982  rgb = 1;
1983  pegasus_rct = 1;
1984  break;
1985  default:
1986  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1987  }
1988 
1989  len -= 9;
1990  if (s->bayer)
1991  goto out;
1992  if (s->got_picture)
1993  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1994  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1995  goto out;
1996  }
1997 
1998  s->rgb = rgb;
1999  s->pegasus_rct = pegasus_rct;
2000 
2001  goto out;
2002  }
2003  if (id == AV_RL32("colr") && len > 0) {
2004  s->colr = bytestream2_get_byteu(&s->gB);
2005  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2006  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
2007  len --;
2008  goto out;
2009  }
2010  if (id == AV_RL32("xfrm") && len > 0) {
2011  s->xfrm = bytestream2_get_byteu(&s->gB);
2012  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2013  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
2014  len --;
2015  goto out;
2016  }
2017 
2018  /* JPS extension by VRex */
2019  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2020  int flags, layout, type;
2021  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2022  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2023 
2024  bytestream2_skipu(&s->gB, 4); len -= 4; /* JPS_ */
2025  bytestream2_skipu(&s->gB, 2); len -= 2; /* block length */
2026  bytestream2_skipu(&s->gB, 1); /* reserved */
2027  flags = bytestream2_get_byteu(&s->gB);
2028  layout = bytestream2_get_byteu(&s->gB);
2029  type = bytestream2_get_byteu(&s->gB);
2030  len -= 4;
2031 
2032  av_freep(&s->stereo3d);
2033  s->stereo3d = av_stereo3d_alloc();
2034  if (!s->stereo3d) {
2035  goto out;
2036  }
2037  if (type == 0) {
2038  s->stereo3d->type = AV_STEREO3D_2D;
2039  } else if (type == 1) {
2040  switch (layout) {
2041  case 0x01:
2042  s->stereo3d->type = AV_STEREO3D_LINES;
2043  break;
2044  case 0x02:
2045  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2046  break;
2047  case 0x03:
2048  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2049  break;
2050  }
2051  if (!(flags & 0x04)) {
2052  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2053  }
2054  }
2055  goto out;
2056  }
2057 
2058  /* EXIF metadata */
2059  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2060  int ret;
2061 
2062  bytestream2_skipu(&s->gB, 2); // skip padding
2063  len -= 2;
2064 
2065  ret = av_exif_parse_buffer(s->avctx, s->gB.buffer, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2066  if (ret < 0) {
2067  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2068  goto out;
2069  }
2070 
2071  bytestream2_skipu(&s->gB, ret);
2072  len -= ret;
2073 
2074  goto out;
2075  }
2076 
2077  /* Apple MJPEG-A */
2078  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2079  id = bytestream2_get_be32u(&s->gB);
2080  len -= 4;
2081  /* Apple MJPEG-A */
2082  if (id == AV_RB32("mjpg")) {
2083  /* structure:
2084  4bytes field size
2085  4bytes pad field size
2086  4bytes next off
2087  4bytes quant off
2088  4bytes huff off
2089  4bytes image off
2090  4bytes scan off
2091  4bytes data off
2092  */
2093  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2094  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2095  }
2096  }
2097 
2098  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2099  int id2;
2100  unsigned seqno;
2101  unsigned nummarkers;
2102 
2103  id = bytestream2_get_be32u(&s->gB);
2104  id2 = bytestream2_get_be24u(&s->gB);
2105  len -= 7;
2106  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2107  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2108  goto out;
2109  }
2110 
2111  bytestream2_skipu(&s->gB, 1);
2112  seqno = bytestream2_get_byteu(&s->gB);
2113  len -= 2;
2114  if (seqno == 0) {
2115  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2116  goto out;
2117  }
2118 
2119  nummarkers = bytestream2_get_byteu(&s->gB);
2120  len -= 1;
2121  if (nummarkers == 0) {
2122  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2123  goto out;
2124  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2125  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2126  goto out;
2127  } else if (seqno > nummarkers) {
2128  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2129  goto out;
2130  }
2131 
2132  /* Allocate if this is the first APP2 we've seen. */
2133  if (s->iccnum == 0) {
2134  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2135  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2136  return AVERROR(ENOMEM);
2137  }
2138  s->iccnum = nummarkers;
2139  }
2140 
2141  if (s->iccentries[seqno - 1].data) {
2142  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2143  goto out;
2144  }
2145 
2146  s->iccentries[seqno - 1].length = len;
2147  s->iccentries[seqno - 1].data = av_malloc(len);
2148  if (!s->iccentries[seqno - 1].data) {
2149  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2150  return AVERROR(ENOMEM);
2151  }
2152 
2153  bytestream2_get_bufferu(&s->gB, s->iccentries[seqno - 1].data, len);
2154  len = 0;
2155  s->iccread++;
2156 
2157  if (s->iccread > s->iccnum)
2158  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2159  }
2160 
2161 out:
2162  /* slow but needed for extreme adobe jpegs */
2163  if (len < 0)
2164  av_log(s->avctx, AV_LOG_ERROR,
2165  "mjpeg: error, decode_app parser read over the end\n");
2166  if (len)
2167  bytestream2_skipu(&s->gB, len);
2168 
2169  return 0;
2170 }
2171 
2173 {
2174  int len;
2175  int ret = mjpeg_parse_len(s, &len, "com");
2176  if (ret < 0)
2177  return ret;
2178  if (!len)
2179  return 0;
2180 
2181  int i;
2182  char *cbuf = av_malloc(len + 1);
2183  if (!cbuf)
2184  return AVERROR(ENOMEM);
2185 
2186  for (i = 0; i < len; i++)
2187  cbuf[i] = bytestream2_get_byteu(&s->gB);
2188  if (cbuf[i - 1] == '\n')
2189  cbuf[i - 1] = 0;
2190  else
2191  cbuf[i] = 0;
2192 
2193  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2194  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2195 
2196  /* buggy avid, it puts EOI only at every 10th frame */
2197  if (!strncmp(cbuf, "AVID", 4)) {
2198  parse_avid(s, cbuf, len);
2199  } else if (!strcmp(cbuf, "CS=ITU601"))
2200  s->cs_itu601 = 1;
2201  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2202  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2203  s->flipped = 1;
2204  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2205  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2206  s->multiscope = 2;
2207  }
2208 
2209  av_free(cbuf);
2210 
2211  return 0;
2212 }
2213 
2214 /* return the 8 bit start code value and update the search
2215  state. Return -1 if no start code found */
2216 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2217 {
2218  const uint8_t *buf_ptr;
2219  int val;
2220  int skipped = 0;
2221 
2222  buf_ptr = *pbuf_ptr;
2223  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2224  buf_ptr++;
2225  while (buf_ptr < buf_end) {
2226  val = *buf_ptr++;
2227  if (val != 0xff) {
2228  if ((val >= SOF0) && (val <= COM))
2229  goto found;
2230  break;
2231  }
2232  }
2233  skipped++;
2234  }
2235  buf_ptr = buf_end;
2236  val = -1;
2237 found:
2238  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2239  *pbuf_ptr = buf_ptr;
2240  return val;
2241 }
2242 
2244  const uint8_t **buf_ptr, const uint8_t *buf_end,
2245  const uint8_t **unescaped_buf_ptr,
2246  int *unescaped_buf_size)
2247 {
2248  int start_code;
2249  start_code = find_marker(buf_ptr, buf_end);
2250 
2251  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2252  if (!s->buffer)
2253  return AVERROR(ENOMEM);
2254 
2255  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2256  if (start_code == SOS && !s->ls) {
2257  const uint8_t *src = *buf_ptr;
2258  const uint8_t *ptr = src;
2259  uint8_t *dst = s->buffer;
2260 
2261  #define copy_data_segment(skip) do { \
2262  ptrdiff_t length = (ptr - src) - (skip); \
2263  if (length > 0) { \
2264  memcpy(dst, src, length); \
2265  dst += length; \
2266  src = ptr; \
2267  } \
2268  } while (0)
2269 
2270  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2271  ptr = buf_end;
2272  copy_data_segment(0);
2273  } else {
2274  while (ptr < buf_end) {
2275  uint8_t x = *(ptr++);
2276 
2277  if (x == 0xff) {
2278  ptrdiff_t skip = 0;
2279  while (ptr < buf_end && x == 0xff) {
2280  x = *(ptr++);
2281  skip++;
2282  }
2283 
2284  /* 0xFF, 0xFF, ... */
2285  if (skip > 1) {
2287 
2288  /* decrement src as it is equal to ptr after the
2289  * copy_data_segment macro and we might want to
2290  * copy the current value of x later on */
2291  src--;
2292  }
2293 
2294  if (x < RST0 || x > RST7) {
2295  copy_data_segment(1);
2296  if (x)
2297  break;
2298  }
2299  }
2300  }
2301  if (src < ptr)
2302  copy_data_segment(0);
2303  }
2304  #undef copy_data_segment
2305 
2306  *unescaped_buf_ptr = s->buffer;
2307  *unescaped_buf_size = dst - s->buffer;
2308  memset(s->buffer + *unescaped_buf_size, 0,
2310 
2311  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2312  (buf_end - *buf_ptr) - (dst - s->buffer));
2313  } else if (start_code == SOS && s->ls) {
2314  const uint8_t *src = *buf_ptr;
2315  uint8_t *dst = s->buffer;
2316  int bit_count = 0;
2317  int t = 0, b = 0;
2318  PutBitContext pb;
2319 
2320  /* find marker */
2321  while (src + t < buf_end) {
2322  uint8_t x = src[t++];
2323  if (x == 0xff) {
2324  while ((src + t < buf_end) && x == 0xff)
2325  x = src[t++];
2326  if (x & 0x80) {
2327  t -= FFMIN(2, t);
2328  break;
2329  }
2330  }
2331  }
2332  bit_count = t * 8;
2333  init_put_bits(&pb, dst, t);
2334 
2335  /* unescape bitstream */
2336  while (b < t) {
2337  uint8_t x = src[b++];
2338  put_bits(&pb, 8, x);
2339  if (x == 0xFF && b < t) {
2340  x = src[b++];
2341  if (x & 0x80) {
2342  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2343  x &= 0x7f;
2344  }
2345  put_bits(&pb, 7, x);
2346  bit_count--;
2347  }
2348  }
2349  flush_put_bits(&pb);
2350 
2351  *unescaped_buf_ptr = dst;
2352  *unescaped_buf_size = (bit_count + 7) >> 3;
2353  memset(s->buffer + *unescaped_buf_size, 0,
2355  } else {
2356  *unescaped_buf_ptr = *buf_ptr;
2357  *unescaped_buf_size = buf_end - *buf_ptr;
2358  }
2359 
2360  return start_code;
2361 }
2362 
2364 {
2365  int i;
2366 
2367  if (s->iccentries) {
2368  for (i = 0; i < s->iccnum; i++)
2369  av_freep(&s->iccentries[i].data);
2370  av_freep(&s->iccentries);
2371  }
2372 
2373  s->iccread = 0;
2374  s->iccnum = 0;
2375 }
2376 
2378  int *got_frame, const AVPacket *avpkt,
2379  const uint8_t *buf, const int buf_size)
2380 {
2381  MJpegDecodeContext *s = avctx->priv_data;
2382  const uint8_t *buf_end, *buf_ptr;
2383  const uint8_t *unescaped_buf_ptr;
2384  int hshift, vshift;
2385  int unescaped_buf_size;
2386  int start_code;
2387  int index;
2388  int ret = 0;
2389  int is16bit;
2390 
2391  s->force_pal8 = 0;
2392 
2393  s->buf_size = buf_size;
2394 
2395  av_exif_free(&s->exif_metadata);
2396  av_freep(&s->stereo3d);
2397  s->adobe_transform = -1;
2398 
2399  if (s->iccnum != 0)
2401 
2402 redo_for_pal8:
2403  buf_ptr = buf;
2404  buf_end = buf + buf_size;
2405  while (buf_ptr < buf_end) {
2406  /* find start next marker */
2407  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2408  &unescaped_buf_ptr,
2409  &unescaped_buf_size);
2410  /* EOF */
2411  if (start_code < 0) {
2412  break;
2413  } else if (unescaped_buf_size > INT_MAX / 8) {
2414  av_log(avctx, AV_LOG_ERROR,
2415  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2416  start_code, unescaped_buf_size, buf_size);
2417  return AVERROR_INVALIDDATA;
2418  }
2419  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2420  start_code, buf_end - buf_ptr);
2421 
2422  bytestream2_init(&s->gB, unescaped_buf_ptr, unescaped_buf_size);
2423 
2424  s->start_code = start_code;
2425  if (avctx->debug & FF_DEBUG_STARTCODE)
2426  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2427 
2428  /* process markers */
2429  if (start_code >= RST0 && start_code <= RST7) {
2430  av_log(avctx, AV_LOG_DEBUG,
2431  "restart marker: %d\n", start_code & 0x0f);
2432  /* APP fields */
2433  } else if (start_code >= APP0 && start_code <= APP15) {
2434  if ((ret = mjpeg_decode_app(s)) < 0)
2435  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2436  av_err2str(ret));
2437  /* Comment */
2438  } else if (start_code == COM) {
2439  ret = mjpeg_decode_com(s);
2440  if (ret < 0)
2441  return ret;
2442  } else if (start_code == DQT) {
2444  if (ret < 0)
2445  return ret;
2446  }
2447 
2448  ret = -1;
2449 
2450  if (!CONFIG_JPEGLS_DECODER &&
2451  (start_code == SOF55 || start_code == LSE)) {
2452  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2453  return AVERROR(ENOSYS);
2454  }
2455 
2456  if (avctx->skip_frame == AVDISCARD_ALL) {
2457  switch(start_code) {
2458  case SOF0:
2459  case SOF1:
2460  case SOF2:
2461  case SOF3:
2462  case SOF55:
2463  break;
2464  default:
2465  goto skip;
2466  }
2467  }
2468 
2469  switch (start_code) {
2470  case SOI:
2471  s->restart_interval = 0;
2472  s->restart_count = 0;
2473  s->raw_image_buffer = buf_ptr;
2474  s->raw_image_buffer_size = buf_end - buf_ptr;
2475  /* nothing to do on SOI */
2476  break;
2477  case DHT:
2478  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2479  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2480  goto fail;
2481  }
2482  break;
2483  case SOF0:
2484  case SOF1:
2485  if (start_code == SOF0)
2487  else
2489  s->lossless = 0;
2490  s->ls = 0;
2491  s->progressive = 0;
2492  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2493  goto fail;
2494  break;
2495  case SOF2:
2497  s->lossless = 0;
2498  s->ls = 0;
2499  s->progressive = 1;
2500  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2501  goto fail;
2502  break;
2503  case SOF3:
2505 #if FF_API_CODEC_PROPS
2509 #endif
2510  s->lossless = 1;
2511  s->ls = 0;
2512  s->progressive = 0;
2513  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2514  goto fail;
2515  break;
2516  case SOF55:
2518 #if FF_API_CODEC_PROPS
2522 #endif
2523  s->lossless = 1;
2524  s->ls = 1;
2525  s->progressive = 0;
2526  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2527  goto fail;
2528  break;
2529  case LSE:
2530  if (!CONFIG_JPEGLS_DECODER ||
2531  (ret = ff_jpegls_decode_lse(s)) < 0)
2532  goto fail;
2533  if (ret == 1)
2534  goto redo_for_pal8;
2535  break;
2536  case EOI:
2537 eoi_parser:
2538  if (!avctx->hwaccel &&
2539  s->progressive && s->cur_scan && s->got_picture)
2541  s->cur_scan = 0;
2542  if (!s->got_picture) {
2543  av_log(avctx, AV_LOG_WARNING,
2544  "Found EOI before any SOF, ignoring\n");
2545  break;
2546  }
2547  if (s->interlaced) {
2548  s->bottom_field ^= 1;
2549  /* if not bottom field, do not output image yet */
2550  if (s->bottom_field == !s->interlace_polarity)
2551  break;
2552  }
2553  if (avctx->hwaccel) {
2554  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2555  if (ret < 0)
2556  return ret;
2557 
2558  av_freep(&s->hwaccel_picture_private);
2559  }
2560  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2561  return ret;
2562  if (s->lossless)
2563  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2564  *got_frame = 1;
2565  s->got_picture = 0;
2566 
2567  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2568  int qp = FFMAX3(s->qscale[0],
2569  s->qscale[1],
2570  s->qscale[2]);
2571 
2572  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2573  }
2574 
2575  goto the_end;
2576  case SOS:
2577  s->raw_scan_buffer = buf_ptr;
2578  s->raw_scan_buffer_size = buf_end - buf_ptr;
2579 
2580  s->cur_scan++;
2581 
2582  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2583  (avctx->err_recognition & AV_EF_EXPLODE))
2584  goto fail;
2585  break;
2586  case DRI:
2587  if ((ret = mjpeg_decode_dri(s)) < 0)
2588  return ret;
2589  break;
2590  case SOF5:
2591  case SOF6:
2592  case SOF7:
2593  case SOF9:
2594  case SOF10:
2595  case SOF11:
2596  case SOF13:
2597  case SOF14:
2598  case SOF15:
2599  case JPG:
2600  av_log(avctx, AV_LOG_ERROR,
2601  "mjpeg: unsupported coding type (%x)\n", start_code);
2602  break;
2603  }
2604 
2605  if (avctx->skip_frame == AVDISCARD_ALL) {
2606  switch(start_code) {
2607  case SOF0:
2608  case SOF1:
2609  case SOF2:
2610  case SOF3:
2611  case SOF55:
2612  s->got_picture = 0;
2613  goto the_end_no_picture;
2614  }
2615  }
2616 
2617 skip:
2618  /* eof process start code */
2619  buf_ptr += bytestream2_tell(&s->gB);
2620  av_log(avctx, AV_LOG_DEBUG,
2621  "marker parser used %d bytes\n",
2622  bytestream2_tell(&s->gB));
2623  }
2624  if (s->got_picture && s->cur_scan) {
2625  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2626  goto eoi_parser;
2627  }
2628  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2629  return AVERROR_INVALIDDATA;
2630 fail:
2631  s->got_picture = 0;
2632  return ret;
2633 the_end:
2634 
2635  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2636 
2637  if (AV_RB32(s->upscale_h)) {
2638  int p;
2640  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2641  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2642  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2643  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2644  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2646  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2649  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2651  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2652  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2653  );
2654  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2655  if (ret)
2656  return ret;
2657 
2658  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2659  for (p = 0; p<s->nb_components; p++) {
2660  uint8_t *line = s->picture_ptr->data[p];
2661  int w = s->width;
2662  int h = s->height;
2663  if (!s->upscale_h[p])
2664  continue;
2665  if (p==1 || p==2) {
2666  w = AV_CEIL_RSHIFT(w, hshift);
2667  h = AV_CEIL_RSHIFT(h, vshift);
2668  }
2669  if (s->upscale_v[p] == 1)
2670  h = (h+1)>>1;
2671  av_assert0(w > 0);
2672  for (int i = 0; i < h; i++) {
2673  if (s->upscale_h[p] == 1) {
2674  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2675  else line[w - 1] = line[(w - 1) / 2];
2676  for (index = w - 2; index > 0; index--) {
2677  if (is16bit)
2678  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2679  else
2680  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2681  }
2682  } else if (s->upscale_h[p] == 2) {
2683  if (is16bit) {
2684  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2685  if (w > 1)
2686  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2687  } else {
2688  line[w - 1] = line[(w - 1) / 3];
2689  if (w > 1)
2690  line[w - 2] = line[w - 1];
2691  }
2692  for (index = w - 3; index > 0; index--) {
2693  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2694  }
2695  } else if (s->upscale_h[p] == 4){
2696  if (is16bit) {
2697  uint16_t *line16 = (uint16_t *) line;
2698  line16[w - 1] = line16[(w - 1) >> 2];
2699  if (w > 1)
2700  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2701  if (w > 2)
2702  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2703  } else {
2704  line[w - 1] = line[(w - 1) >> 2];
2705  if (w > 1)
2706  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2707  if (w > 2)
2708  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2709  }
2710  for (index = w - 4; index > 0; index--)
2711  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2712  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2713  }
2714  line += s->linesize[p];
2715  }
2716  }
2717  }
2718  if (AV_RB32(s->upscale_v)) {
2719  int p;
2721  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2728  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2729  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2730  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2731  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2732  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2733  );
2734  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2735  if (ret)
2736  return ret;
2737 
2738  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2739  for (p = 0; p < s->nb_components; p++) {
2740  uint8_t *dst;
2741  int w = s->width;
2742  int h = s->height;
2743  if (!s->upscale_v[p])
2744  continue;
2745  if (p==1 || p==2) {
2746  w = AV_CEIL_RSHIFT(w, hshift);
2747  h = AV_CEIL_RSHIFT(h, vshift);
2748  }
2749  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2750  for (int i = h - 1; i; i--) {
2751  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2752  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2753  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2754  memcpy(dst, src1, w);
2755  } else {
2756  for (index = 0; index < w; index++)
2757  dst[index] = (src1[index] + src2[index]) >> 1;
2758  }
2759  dst -= s->linesize[p];
2760  }
2761  }
2762  }
2763  if (s->flipped && !s->rgb) {
2764  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2765  if (ret)
2766  return ret;
2767 
2768  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2769  for (index=0; index<s->nb_components; index++) {
2770  int h = frame->height;
2771  if (index && index < 3)
2772  h = AV_CEIL_RSHIFT(h, vshift);
2773  if (frame->data[index]) {
2774  frame->data[index] += (h - 1) * frame->linesize[index];
2775  frame->linesize[index] *= -1;
2776  }
2777  }
2778  }
2779 
2780  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2781  av_assert0(s->nb_components == 3);
2782  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2783  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2784  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2785  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2786  }
2787 
2788  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2789  int w = s->picture_ptr->width;
2790  int h = s->picture_ptr->height;
2791  av_assert0(s->nb_components == 4);
2792  for (int i = 0; i < h; i++) {
2793  int j;
2794  uint8_t *dst[4];
2795  for (index=0; index<4; index++) {
2796  dst[index] = s->picture_ptr->data[index]
2797  + s->picture_ptr->linesize[index]*i;
2798  }
2799  for (j=0; j<w; j++) {
2800  int k = dst[3][j];
2801  int r = dst[0][j] * k;
2802  int g = dst[1][j] * k;
2803  int b = dst[2][j] * k;
2804  dst[0][j] = g*257 >> 16;
2805  dst[1][j] = b*257 >> 16;
2806  dst[2][j] = r*257 >> 16;
2807  }
2808  memset(dst[3], 255, w);
2809  }
2810  }
2811  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2812  int w = s->picture_ptr->width;
2813  int h = s->picture_ptr->height;
2814  av_assert0(s->nb_components == 4);
2815  for (int i = 0; i < h; i++) {
2816  int j;
2817  uint8_t *dst[4];
2818  for (index=0; index<4; index++) {
2819  dst[index] = s->picture_ptr->data[index]
2820  + s->picture_ptr->linesize[index]*i;
2821  }
2822  for (j=0; j<w; j++) {
2823  int k = dst[3][j];
2824  int r = (255 - dst[0][j]) * k;
2825  int g = (128 - dst[1][j]) * k;
2826  int b = (128 - dst[2][j]) * k;
2827  dst[0][j] = r*257 >> 16;
2828  dst[1][j] = (g*257 >> 16) + 128;
2829  dst[2][j] = (b*257 >> 16) + 128;
2830  }
2831  memset(dst[3], 255, w);
2832  }
2833  }
2834 
2835  if (s->stereo3d) {
2837  if (stereo) {
2838  stereo->type = s->stereo3d->type;
2839  stereo->flags = s->stereo3d->flags;
2840  }
2841  av_freep(&s->stereo3d);
2842  }
2843 
2844  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2845  AVFrameSideData *sd;
2846  size_t offset = 0;
2847  int total_size = 0;
2848 
2849  /* Sum size of all parts. */
2850  for (int i = 0; i < s->iccnum; i++)
2851  total_size += s->iccentries[i].length;
2852 
2853  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2854  if (ret < 0) {
2855  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2856  return ret;
2857  }
2858 
2859  if (sd) {
2860  /* Reassemble the parts, which are now in-order. */
2861  for (int i = 0; i < s->iccnum; i++) {
2862  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2863  offset += s->iccentries[i].length;
2864  }
2865  }
2866  }
2867 
2868  if (s->exif_metadata.entries) {
2869  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2870  av_exif_free(&s->exif_metadata);
2871  if (ret < 0)
2872  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2873  }
2874 
2875  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2876  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2877  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2878  avctx->coded_height > s->orig_height) {
2879  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2880  frame->crop_top = frame->height - avctx->height;
2881  }
2882 
2883 the_end_no_picture:
2884  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2885  buf_end - buf_ptr);
2886  return buf_ptr - buf;
2887 }
2888 
2889 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2890  AVPacket *avpkt)
2891 {
2892  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2893  avpkt, avpkt->data, avpkt->size);
2894 }
2895 
2896 
2897 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2898  * even without having called ff_mjpeg_decode_init(). */
2900 {
2901  MJpegDecodeContext *s = avctx->priv_data;
2902  int i, j;
2903 
2904  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2905  av_log(avctx, AV_LOG_INFO, "Single field\n");
2906  }
2907 
2908  av_frame_free(&s->picture);
2909  s->picture_ptr = NULL;
2910 
2911  av_frame_free(&s->smv_frame);
2912 
2913  av_freep(&s->buffer);
2914  av_freep(&s->stereo3d);
2915  av_freep(&s->ljpeg_buffer);
2916  s->ljpeg_buffer_size = 0;
2917 
2918  for (i = 0; i < 3; i++) {
2919  for (j = 0; j < 4; j++)
2920  ff_vlc_free(&s->vlcs[i][j]);
2921  }
2922  for (i = 0; i < MAX_COMPONENTS; i++) {
2923  av_freep(&s->blocks[i]);
2924  av_freep(&s->last_nnz[i]);
2925  }
2926  av_exif_free(&s->exif_metadata);
2927 
2929 
2930  av_freep(&s->hwaccel_picture_private);
2931  av_freep(&s->jls_state);
2932 
2933  return 0;
2934 }
2935 
2937 {
2938  MJpegDecodeContext *s = avctx->priv_data;
2939  s->got_picture = 0;
2940 
2941  s->smv_next_frame = 0;
2942  av_frame_unref(s->smv_frame);
2943 }
2944 
2945 #if CONFIG_MJPEG_DECODER
2946 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2947 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2948 static const AVOption options[] = {
2949  { "extern_huff", "Use external huffman table.",
2950  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2951  { NULL },
2952 };
2953 
2954 static const AVClass mjpegdec_class = {
2955  .class_name = "MJPEG decoder",
2956  .item_name = av_default_item_name,
2957  .option = options,
2958  .version = LIBAVUTIL_VERSION_INT,
2959 };
2960 
2961 const FFCodec ff_mjpeg_decoder = {
2962  .p.name = "mjpeg",
2963  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2964  .p.type = AVMEDIA_TYPE_VIDEO,
2965  .p.id = AV_CODEC_ID_MJPEG,
2966  .priv_data_size = sizeof(MJpegDecodeContext),
2970  .flush = decode_flush,
2971  .p.capabilities = AV_CODEC_CAP_DR1,
2972  .p.max_lowres = 3,
2973  .p.priv_class = &mjpegdec_class,
2974  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2975  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2978  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2979 #if CONFIG_MJPEG_NVDEC_HWACCEL
2980  HWACCEL_NVDEC(mjpeg),
2981 #endif
2982 #if CONFIG_MJPEG_VAAPI_HWACCEL
2983  HWACCEL_VAAPI(mjpeg),
2984 #endif
2985  NULL
2986  },
2987 };
2988 #endif
2989 #if CONFIG_THP_DECODER
2990 const FFCodec ff_thp_decoder = {
2991  .p.name = "thp",
2992  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2993  .p.type = AVMEDIA_TYPE_VIDEO,
2994  .p.id = AV_CODEC_ID_THP,
2995  .priv_data_size = sizeof(MJpegDecodeContext),
2999  .flush = decode_flush,
3000  .p.capabilities = AV_CODEC_CAP_DR1,
3001  .p.max_lowres = 3,
3002  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3003 };
3004 #endif
3005 
3006 #if CONFIG_SMVJPEG_DECODER
3007 // SMV JPEG just stacks several output frames into one JPEG picture
3008 // we handle that by setting up the cropping parameters appropriately
3009 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3010 {
3011  MJpegDecodeContext *s = avctx->priv_data;
3012 
3013  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3014 
3015  frame->width = avctx->coded_width;
3016  frame->height = avctx->coded_height;
3017  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3018  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3019 
3020  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3021  s->smv_frame->pts += s->smv_frame->duration;
3022  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3023 
3024  if (s->smv_next_frame == 0)
3025  av_frame_unref(s->smv_frame);
3026 }
3027 
3028 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3029 {
3030  MJpegDecodeContext *s = avctx->priv_data;
3031  AVPacket *const pkt = avctx->internal->in_pkt;
3032  int got_frame = 0;
3033  int ret;
3034 
3035  if (s->smv_next_frame > 0)
3036  goto return_frame;
3037 
3038  ret = ff_decode_get_packet(avctx, pkt);
3039  if (ret < 0)
3040  return ret;
3041 
3042  av_frame_unref(s->smv_frame);
3043 
3044  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3045  s->smv_frame->pkt_dts = pkt->dts;
3047  if (ret < 0)
3048  return ret;
3049 
3050  if (!got_frame)
3051  return AVERROR(EAGAIN);
3052 
3053  // packet duration covers all the frames in the packet
3054  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3055 
3056 return_frame:
3057  av_assert0(s->smv_frame->buf[0]);
3058  ret = av_frame_ref(frame, s->smv_frame);
3059  if (ret < 0)
3060  return ret;
3061 
3062  smv_process_frame(avctx, frame);
3063  return 0;
3064 }
3065 
3066 const FFCodec ff_smvjpeg_decoder = {
3067  .p.name = "smvjpeg",
3068  CODEC_LONG_NAME("SMV JPEG"),
3069  .p.type = AVMEDIA_TYPE_VIDEO,
3070  .p.id = AV_CODEC_ID_SMVJPEG,
3071  .priv_data_size = sizeof(MJpegDecodeContext),
3074  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3075  .flush = decode_flush,
3076  .p.capabilities = AV_CODEC_CAP_DR1,
3077  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3079 };
3080 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:249
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:870
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1442
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
APP1
@ APP1
Definition: mjpeg.h:80
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:994
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1406
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:112
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:247
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1285
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1458
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:121
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1670
fail
#define fail()
Definition: checkasm.h:216
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2377
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2172
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:58
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:650
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:897
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1077
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2363
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2899
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SOF55
@ SOF55
JPEG-LS.
Definition: mjpeg.h:103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
mjpeg_parse_len
static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
Definition: mjpegdec.c:192
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2430
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1649
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:204
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:835
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:56
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1473
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1012
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1594
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1112
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2889
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:914
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1684
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2127
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2216
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:850
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
VD
#define VD
Definition: amfdec.c:665
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1862
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2936
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1390
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:986
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1387
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2243
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1382
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:306
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1874
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:47
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347