FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59  const uint8_t **pbuf_ptr, size_t *pbuf_size);
60 
62 {
63  static const struct {
64  int class;
65  int index;
66  const uint8_t *bits;
67  const uint8_t *values;
68  int length;
69  } ht[] = {
71  ff_mjpeg_val_dc, 12 },
73  ff_mjpeg_val_dc, 12 },
82  };
83  int i, ret;
84 
85  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
86  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
87  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
88  ht[i].bits, ht[i].values,
89  ht[i].class == 1, s->avctx);
90  if (ret < 0)
91  return ret;
92 
93  if (ht[i].class < 2) {
94  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
95  ht[i].bits + 1, 16);
96  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
97  ht[i].values, ht[i].length);
98  }
99  }
100 
101  return 0;
102 }
103 
104 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
105 {
106  if (len > 12 && buf[12] == 1) /* 1 - NTSC */
107  s->interlace_polarity = 1;
108  if (len > 12 && buf[12] == 2) /* 2 - PAL */
109  s->interlace_polarity = 0;
110  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
111  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 12 ? buf[12] : -1);
112 }
113 
114 static void init_idct(AVCodecContext *avctx)
115 {
116  MJpegDecodeContext *s = avctx->priv_data;
117 
118  ff_idctdsp_init(&s->idsp, avctx);
119  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
120  s->idsp.idct_permutation);
121 }
122 
124 {
125  MJpegDecodeContext *s = avctx->priv_data;
126  int ret;
127 
128  if (!s->picture_ptr) {
129  s->picture = av_frame_alloc();
130  if (!s->picture)
131  return AVERROR(ENOMEM);
132  s->picture_ptr = s->picture;
133  }
134 
135  s->avctx = avctx;
136  ff_blockdsp_init(&s->bdsp);
137  init_idct(avctx);
138  s->buffer_size = 0;
139  s->buffer = NULL;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150 #if FF_API_MJPEG_EXTERN_HUFF
151  if (s->extern_huff && avctx->extradata) {
152  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
153  bytestream2_init(&s->gB, avctx->extradata, avctx->extradata_size);
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161 #endif
162  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
163  s->interlace_polarity = 1; /* bottom field first */
164  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
165  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
166  if (avctx->codec_tag == AV_RL32("MJPG"))
167  s->interlace_polarity = 1;
168  }
169 
170  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
171  if (avctx->extradata_size >= 4)
172  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
173 
174  if (s->smv_frames_per_jpeg <= 0) {
175  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
176  return AVERROR_INVALIDDATA;
177  }
178 
179  s->smv_frame = av_frame_alloc();
180  if (!s->smv_frame)
181  return AVERROR(ENOMEM);
182  } else if (avctx->extradata_size > 8
183  && AV_RL32(avctx->extradata) == 0x2C
184  && AV_RL32(avctx->extradata + 4) == 0x18) {
185  parse_avid(s, avctx->extradata, avctx->extradata_size);
186  }
187 
188  if (avctx->codec->id == AV_CODEC_ID_AMV)
189  s->flipped = 1;
190 
191  return 0;
192 }
193 
194 
195 static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
196 {
197  int len = bytestream2_get_be16u(&s->gB);
198  if (len < 2 || bytestream2_get_bytes_left(&s->gB) < (len - 2)) {
199  av_log(s->avctx, AV_LOG_ERROR, "%s: invalid len %d\n", name, len);
200  return AVERROR_INVALIDDATA;
201  }
202  *plen = len - 2;
203  return 0;
204 }
205 
206 /* quantize tables */
208 {
209  int len, index, i;
210 
211  int ret = mjpeg_parse_len(s, &len, "dqt");
212  if (ret < 0)
213  return ret;
214 
215  while (len >= 65) {
216  uint8_t b = bytestream2_get_byteu(&s->gB);
217  int pr = b >> 4;
218  if (pr > 1) {
219  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
220  return AVERROR_INVALIDDATA;
221  }
222  if (len < (1 + 64 * (1 + pr)))
223  return AVERROR_INVALIDDATA;
224  index = b & 0x0F;
225  if (index >= 4)
226  return AVERROR_INVALIDDATA;
227  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
228  /* read quant table */
229  for (i = 0; i < 64; i++) {
230  s->quant_matrixes[index][i] = pr ? bytestream2_get_be16u(&s->gB) : bytestream2_get_byteu(&s->gB);
231  if (s->quant_matrixes[index][i] == 0) {
232  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
233  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
234  if (s->avctx->err_recognition & AV_EF_EXPLODE)
235  return AVERROR_INVALIDDATA;
236  }
237  }
238 
239  // XXX FIXME fine-tune, and perhaps add dc too
240  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
241  s->quant_matrixes[index][8]) >> 1;
242  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
243  index, s->qscale[index]);
244  len -= 1 + 64 * (1 + pr);
245  }
246  return 0;
247 }
248 
249 /* decode huffman tables and build VLC decoders */
251 {
252  int len, index, i, class, n, v;
253  uint8_t bits_table[17];
254  uint8_t val_table[256];
255  int ret = 0;
256 
257  ret = mjpeg_parse_len(s, &len, "dht");
258  if (ret < 0)
259  return ret;
260 
261  while (len > 0) {
262  if (len < 17)
263  return AVERROR_INVALIDDATA;
264  uint8_t b = bytestream2_get_byteu(&s->gB);
265  class = b >> 4;
266  if (class >= 2)
267  return AVERROR_INVALIDDATA;
268  index = b & 0x0F;
269  if (index >= 4)
270  return AVERROR_INVALIDDATA;
271  n = 0;
272  for (i = 1; i <= 16; i++) {
273  bits_table[i] = bytestream2_get_byteu(&s->gB);
274  n += bits_table[i];
275  }
276  len -= 17;
277  if (len < n || n > 256)
278  return AVERROR_INVALIDDATA;
279 
280  for (i = 0; i < n; i++) {
281  v = bytestream2_get_byteu(&s->gB);
282  val_table[i] = v;
283  }
284  len -= n;
285 
286  /* build VLC and flush previous vlc if present */
287  ff_vlc_free(&s->vlcs[class][index]);
288  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
289  class, index, n);
290  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
291  val_table, class > 0, s->avctx)) < 0)
292  return ret;
293 
294  if (class > 0) {
295  ff_vlc_free(&s->vlcs[2][index]);
296  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
297  val_table, 0, s->avctx)) < 0)
298  return ret;
299  }
300 
301  for (i = 0; i < 16; i++)
302  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
303  for (i = 0; i < 256; i++)
304  s->raw_huffman_values[class][index][i] = val_table[i];
305  }
306  return 0;
307 }
308 
310 {
311  int len, nb_components, i, width, height, bits, ret, size_change;
312  unsigned pix_fmt_id;
313  int h_count[MAX_COMPONENTS] = { 0 };
314  int v_count[MAX_COMPONENTS] = { 0 };
315 
316  s->cur_scan = 0;
317  memset(s->upscale_h, 0, sizeof(s->upscale_h));
318  memset(s->upscale_v, 0, sizeof(s->upscale_v));
319 
320  ret = mjpeg_parse_len(s, &len, "sof");
321  if (ret < 0)
322  return ret;
323  if (len < 6)
324  return AVERROR_INVALIDDATA;
325  bits = bytestream2_get_byteu(&s->gB);
326 
327  if (bits > 16 || bits < 1) {
328  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
329  return AVERROR_INVALIDDATA;
330  }
331 
332  if (s->avctx->bits_per_raw_sample != bits) {
333  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
334  s->avctx->bits_per_raw_sample = bits;
335  init_idct(s->avctx);
336  }
337  if (s->pegasus_rct)
338  bits = 9;
339  if (bits == 9 && !s->pegasus_rct)
340  s->rct = 1; // FIXME ugly
341 
342  if (s->lossless && s->avctx->lowres) {
343  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
344  return AVERROR(ENOSYS);
345  }
346 
347  height = bytestream2_get_be16u(&s->gB);
348  width = bytestream2_get_be16u(&s->gB);
349 
350  // HACK for odd_height.mov
351  if (s->interlaced && s->width == width && s->height == height + 1)
352  height = s->height;
353 
354  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
355  if (av_image_check_size(width, height, 0, s->avctx) < 0)
356  return AVERROR_INVALIDDATA;
357 
358  if (!s->progressive && !s->ls) {
359  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
360  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
361  return AVERROR_INVALIDDATA;
362  }
363 
364  nb_components = bytestream2_get_byteu(&s->gB);
365  if (nb_components <= 0 ||
366  nb_components > MAX_COMPONENTS)
367  return AVERROR_INVALIDDATA;
368  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
369  if (nb_components != s->nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR,
371  "nb_components changing in interlaced picture\n");
372  return AVERROR_INVALIDDATA;
373  }
374  }
375  if (s->ls && !(bits <= 8 || nb_components == 1)) {
377  "JPEG-LS that is not <= 8 "
378  "bits/component or 16-bit gray");
379  return AVERROR_PATCHWELCOME;
380  }
381  len -= 6;
382  if (len != 3 * nb_components) {
383  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
384  return AVERROR_INVALIDDATA;
385  }
386 
387  s->nb_components = nb_components;
388  s->h_max = 1;
389  s->v_max = 1;
390  for (i = 0; i < nb_components; i++) {
391  /* component id */
392  s->component_id[i] = bytestream2_get_byteu(&s->gB);
393  uint8_t b = bytestream2_get_byteu(&s->gB);
394  h_count[i] = b >> 4;
395  v_count[i] = b & 0x0F;
396  /* compute hmax and vmax (only used in interleaved case) */
397  if (h_count[i] > s->h_max)
398  s->h_max = h_count[i];
399  if (v_count[i] > s->v_max)
400  s->v_max = v_count[i];
401  s->quant_index[i] = bytestream2_get_byteu(&s->gB);
402  if (s->quant_index[i] >= 4) {
403  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
404  return AVERROR_INVALIDDATA;
405  }
406  if (!h_count[i] || !v_count[i]) {
407  av_log(s->avctx, AV_LOG_ERROR,
408  "Invalid sampling factor in component %d %d:%d\n",
409  i, h_count[i], v_count[i]);
410  return AVERROR_INVALIDDATA;
411  }
412 
413  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
414  i, h_count[i], v_count[i],
415  s->component_id[i], s->quant_index[i]);
416  }
417  if ( nb_components == 4
418  && s->component_id[0] == 'C'
419  && s->component_id[1] == 'M'
420  && s->component_id[2] == 'Y'
421  && s->component_id[3] == 'K')
422  s->adobe_transform = 0;
423 
424  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
425  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
426  return AVERROR_PATCHWELCOME;
427  }
428 
429  if (s->bayer) {
430  if (nb_components == 2) {
431  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
432  width stored in their SOF3 markers is the width of each one. We only output
433  a single component, therefore we need to adjust the output image width. We
434  handle the deinterleaving (but not the debayering) in this file. */
435  width *= 2;
436  }
437  /* They can also contain 1 component, which is double the width and half the height
438  of the final image (rows are interleaved). We don't handle the decoding in this
439  file, but leave that to the TIFF/DNG decoder. */
440  }
441 
442  /* if different size, realloc/alloc picture */
443  if (width != s->width || height != s->height || bits != s->bits ||
444  memcmp(s->h_count, h_count, sizeof(h_count)) ||
445  memcmp(s->v_count, v_count, sizeof(v_count))) {
446  size_change = 1;
447 
448  s->width = width;
449  s->height = height;
450  s->bits = bits;
451  memcpy(s->h_count, h_count, sizeof(h_count));
452  memcpy(s->v_count, v_count, sizeof(v_count));
453  s->interlaced = 0;
454  s->got_picture = 0;
455 
456  /* test interlaced mode */
457  if (s->first_picture &&
458  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
459  s->orig_height != 0 &&
460  s->height < ((s->orig_height * 3) / 4)) {
461  s->interlaced = 1;
462  s->bottom_field = s->interlace_polarity;
463  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
464  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
465  height *= 2;
466  }
467 
468  ret = ff_set_dimensions(s->avctx, width, height);
469  if (ret < 0)
470  return ret;
471 
472  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
473  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
474  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
475  s->orig_height < height)
476  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
477 
478  s->first_picture = 0;
479  } else {
480  size_change = 0;
481  }
482 
483  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
484  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
485  if (s->avctx->height <= 0)
486  return AVERROR_INVALIDDATA;
487  }
488  if (s->bayer && s->progressive) {
489  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
490  return AVERROR_INVALIDDATA;
491  }
492 
493  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
494  if (s->progressive) {
495  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
496  return AVERROR_INVALIDDATA;
497  }
498  } else {
499  if (s->v_max == 1 && s->h_max == 1 && s->lossless == 1 && (nb_components == 3 || nb_components == 4))
500  s->rgb = 1;
501  else if (!s->lossless)
502  s->rgb = 0;
503  /* XXX: not complete test ! */
504  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
505  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
506  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
507  (s->h_count[3] << 4) | s->v_count[3];
508  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
509  /* NOTE we do not allocate pictures large enough for the possible
510  * padding of h/v_count being 4 */
511  if (!(pix_fmt_id & 0xD0D0D0D0))
512  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
513  if (!(pix_fmt_id & 0x0D0D0D0D))
514  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
515 
516  for (i = 0; i < 8; i++) {
517  int j = 6 + (i & 1) - (i & 6);
518  int is = (pix_fmt_id >> (4 * i)) & 0xF;
519  int js = (pix_fmt_id >> (4 * j)) & 0xF;
520 
521  if (is == 1 && js != 2 && (i < 2 || i > 5))
522  js = (pix_fmt_id >> ( 8 + 4 * (i & 1))) & 0xF;
523  if (is == 1 && js != 2 && (i < 2 || i > 5))
524  js = (pix_fmt_id >> (16 + 4 * (i & 1))) & 0xF;
525 
526  if (is == 1 && js == 2) {
527  if (i & 1) s->upscale_h[j / 2] = 1;
528  else s->upscale_v[j / 2] = 1;
529  }
530  }
531 
532  if (s->bayer) {
533  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
534  goto unk_pixfmt;
535  }
536 
537  switch (pix_fmt_id) {
538  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
539  if (!s->bayer)
540  goto unk_pixfmt;
541  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
542  break;
543  case 0x11111100:
544  if (s->rgb)
545  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
546  else {
547  if ( s->adobe_transform == 0
548  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
549  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
550  } else {
551  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
552  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
553  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
554  }
555  }
556  av_assert0(s->nb_components == 3);
557  break;
558  case 0x11111111:
559  if (s->rgb)
560  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
561  else {
562  if (s->adobe_transform == 0 && s->bits <= 8) {
563  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
564  } else {
565  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
566  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
567  }
568  }
569  av_assert0(s->nb_components == 4);
570  break;
571  case 0x11412100:
572  if (s->bits > 8)
573  goto unk_pixfmt;
574  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
575  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
576  s->upscale_h[0] = 4;
577  s->upscale_h[1] = 0;
578  s->upscale_h[2] = 1;
579  } else {
580  goto unk_pixfmt;
581  }
582  break;
583  case 0x22111122:
584  case 0x22111111:
585  if (s->adobe_transform == 0 && s->bits <= 8) {
586  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
587  s->upscale_v[1] = s->upscale_v[2] = 1;
588  s->upscale_h[1] = s->upscale_h[2] = 1;
589  } else if (s->adobe_transform == 2 && s->bits <= 8) {
590  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
591  s->upscale_v[1] = s->upscale_v[2] = 1;
592  s->upscale_h[1] = s->upscale_h[2] = 1;
593  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
594  } else {
595  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
596  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
597  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
598  }
599  av_assert0(s->nb_components == 4);
600  break;
601  case 0x12121100:
602  case 0x22122100:
603  case 0x21211100:
604  case 0x21112100:
605  case 0x22211200:
606  case 0x22221100:
607  case 0x22112200:
608  case 0x11222200:
609  if (s->bits > 8)
610  goto unk_pixfmt;
611  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
612  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
613  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
614  } else {
615  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
616  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
617  }
618  break;
619  case 0x11000000:
620  case 0x13000000:
621  case 0x14000000:
622  case 0x31000000:
623  case 0x33000000:
624  case 0x34000000:
625  case 0x41000000:
626  case 0x43000000:
627  case 0x44000000:
628  if (s->bits <= 8)
629  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
630  else
631  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
632  break;
633  case 0x12111100:
634  case 0x14121200:
635  case 0x14111100:
636  case 0x22211100:
637  case 0x22112100:
638  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
639  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
640  else
641  goto unk_pixfmt;
642  s->upscale_v[1] = s->upscale_v[2] = 1;
643  } else {
644  if (pix_fmt_id == 0x14111100)
645  s->upscale_v[1] = s->upscale_v[2] = 1;
646  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
647  else
648  goto unk_pixfmt;
649  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
650  }
651  break;
652  case 0x21111100:
653  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
654  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
655  else
656  goto unk_pixfmt;
657  s->upscale_h[1] = s->upscale_h[2] = 1;
658  } else {
659  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
660  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
661  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
662  }
663  break;
664  case 0x11311100:
665  if (s->bits > 8)
666  goto unk_pixfmt;
667  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
668  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
669  else
670  goto unk_pixfmt;
671  s->upscale_h[0] = s->upscale_h[2] = 2;
672  break;
673  case 0x31111100:
674  if (s->bits > 8)
675  goto unk_pixfmt;
676  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
677  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
678  s->upscale_h[1] = s->upscale_h[2] = 2;
679  break;
680  case 0x22121100:
681  case 0x22111200:
682  case 0x41211100:
683  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
684  else
685  goto unk_pixfmt;
686  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
687  break;
688  case 0x22111100:
689  case 0x23111100:
690  case 0x42111100:
691  case 0x24111100:
692  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
693  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
694  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
695  if (pix_fmt_id == 0x42111100) {
696  if (s->bits > 8)
697  goto unk_pixfmt;
698  s->upscale_h[1] = s->upscale_h[2] = 1;
699  } else if (pix_fmt_id == 0x24111100) {
700  if (s->bits > 8)
701  goto unk_pixfmt;
702  s->upscale_v[1] = s->upscale_v[2] = 1;
703  } else if (pix_fmt_id == 0x23111100) {
704  if (s->bits > 8)
705  goto unk_pixfmt;
706  s->upscale_v[1] = s->upscale_v[2] = 2;
707  }
708  break;
709  case 0x41111100:
710  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
711  else
712  goto unk_pixfmt;
713  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
714  break;
715  default:
716  unk_pixfmt:
717  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
718  memset(s->upscale_h, 0, sizeof(s->upscale_h));
719  memset(s->upscale_v, 0, sizeof(s->upscale_v));
720  return AVERROR_PATCHWELCOME;
721  }
722  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
723  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
724  return AVERROR_PATCHWELCOME;
725  }
726  if (s->ls) {
727  memset(s->upscale_h, 0, sizeof(s->upscale_h));
728  memset(s->upscale_v, 0, sizeof(s->upscale_v));
729  if (s->nb_components == 3) {
730  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
731  } else if (s->nb_components != 1) {
732  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
733  return AVERROR_PATCHWELCOME;
734  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
735  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
736  else if (s->bits <= 8)
737  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
738  else
739  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
740  }
741 
742  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
743  if (!s->pix_desc) {
744  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
745  return AVERROR_BUG;
746  }
747 
748  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
749  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
750  } else {
751  enum AVPixelFormat pix_fmts[] = {
752 #if CONFIG_MJPEG_NVDEC_HWACCEL
754 #endif
755 #if CONFIG_MJPEG_VAAPI_HWACCEL
757 #endif
758  s->avctx->pix_fmt,
760  };
761  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
762  if (s->hwaccel_pix_fmt < 0)
763  return AVERROR(EINVAL);
764 
765  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
766  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
767  }
768 
769  if (s->avctx->skip_frame == AVDISCARD_ALL) {
770  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
771  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
772  s->got_picture = 1;
773  return 0;
774  }
775 
776  av_frame_unref(s->picture_ptr);
777  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
778  if (ret < 0)
779  return ret;
780  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
781  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
782  s->got_picture = 1;
783 
784  // Lets clear the palette to avoid leaving uninitialized values in it
785  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
786  memset(s->picture_ptr->data[1], 0, 1024);
787 
788  for (i = 0; i < 4; i++)
789  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
790 
791  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
792  s->width, s->height, s->linesize[0], s->linesize[1],
793  s->interlaced, s->avctx->height);
794 
795  }
796 
797  if ((s->rgb && !s->lossless && !s->ls) ||
798  (!s->rgb && s->ls && s->nb_components > 1) ||
799  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
800  ) {
801  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
802  return AVERROR_PATCHWELCOME;
803  }
804 
805  /* totally blank picture as progressive JPEG will only add details to it */
806  if (s->progressive) {
807  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
808  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
809  for (i = 0; i < s->nb_components; i++) {
810  int size = bw * bh * s->h_count[i] * s->v_count[i];
811  av_freep(&s->blocks[i]);
812  av_freep(&s->last_nnz[i]);
813  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
814  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
815  if (!s->blocks[i] || !s->last_nnz[i])
816  return AVERROR(ENOMEM);
817  s->block_stride[i] = bw * s->h_count[i];
818  }
819  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
820  }
821 
822  if (s->avctx->hwaccel) {
823  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
824  s->hwaccel_picture_private =
825  av_mallocz(hwaccel->frame_priv_data_size);
826  if (!s->hwaccel_picture_private)
827  return AVERROR(ENOMEM);
828 
829  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
830  s->raw_image_buffer_size);
831  if (ret < 0)
832  return ret;
833  }
834 
835  return 0;
836 }
837 
838 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
839 {
840  int code;
841  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
842  if (code < 0 || code > 16) {
843  av_log(s->avctx, AV_LOG_ERROR,
844  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
845  return AVERROR_INVALIDDATA;
846  }
847 
848  *val = code ? get_xbits(&s->gb, code) : 0;
849  return 0;
850 }
851 
852 /* decode block and dequantize */
853 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
854  int dc_index, int ac_index, uint16_t *quant_matrix)
855 {
856  int code, i, j, level, val;
857 
858  /* DC coef */
859  int ret = mjpeg_decode_dc(s, dc_index, &val);
860  if (ret < 0)
861  return ret;
862 
863  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
864  s->last_dc[component] = val;
865  block[0] = av_clip_int16(val);
866  /* AC coefs */
867  i = 0;
868  {
869  OPEN_READER(re, &s->gb);
870  do {
871  UPDATE_CACHE(re, &s->gb);
872  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
873 
874  i += ((unsigned)code) >> 4;
875  code &= 0xf;
876  if (code) {
877  // GET_VLC updates the cache if parsing reaches the second stage.
878  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
879  // and don't need to refill the cache.
880  {
881  int cache = GET_CACHE(re, &s->gb);
882  int sign = (~cache) >> 31;
883  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
884  }
885 
886  LAST_SKIP_BITS(re, &s->gb, code);
887 
888  if (i > 63) {
889  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
890  return AVERROR_INVALIDDATA;
891  }
892  j = s->permutated_scantable[i];
893  block[j] = level * quant_matrix[i];
894  }
895  } while (i < 63);
896  CLOSE_READER(re, &s->gb);
897  }
898 
899  return 0;
900 }
901 
903  int component, int dc_index,
904  uint16_t *quant_matrix, int Al)
905 {
906  unsigned val;
907  s->bdsp.clear_block(block);
908  int ret = mjpeg_decode_dc(s, dc_index, &val);
909  if (ret < 0)
910  return ret;
911 
912  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
913  s->last_dc[component] = val;
914  block[0] = val;
915  return 0;
916 }
917 
918 /* decode block and dequantize - progressive JPEG version */
920  uint8_t *last_nnz, int ac_index,
921  uint16_t *quant_matrix,
922  int Ss, int Se, int Al, int *EOBRUN)
923 {
924  int code, i, j, val, run;
925  unsigned level;
926 
927  if (*EOBRUN) {
928  (*EOBRUN)--;
929  return 0;
930  }
931 
932  {
933  OPEN_READER(re, &s->gb);
934  for (i = Ss; ; i++) {
935  UPDATE_CACHE(re, &s->gb);
936  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
937 
938  run = ((unsigned) code) >> 4;
939  code &= 0xF;
940  if (code) {
941  i += run;
942 
943  {
944  int cache = GET_CACHE(re, &s->gb);
945  int sign = (~cache) >> 31;
946  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
947  }
948 
949  LAST_SKIP_BITS(re, &s->gb, code);
950 
951  if (i >= Se) {
952  if (i == Se) {
953  j = s->permutated_scantable[Se];
954  block[j] = level * (quant_matrix[Se] << Al);
955  break;
956  }
957  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
958  return AVERROR_INVALIDDATA;
959  }
960  j = s->permutated_scantable[i];
961  block[j] = level * (quant_matrix[i] << Al);
962  } else {
963  if (run == 0xF) { // ZRL - skip 15 coefficients
964  i += 15;
965  if (i >= Se) {
966  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
967  return AVERROR_INVALIDDATA;
968  }
969  } else {
970  val = (1 << run);
971  if (run) {
972  // Given that GET_VLC reloads internally, we always
973  // have at least 16 bits in the cache here.
974  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
975  LAST_SKIP_BITS(re, &s->gb, run);
976  }
977  *EOBRUN = val - 1;
978  break;
979  }
980  }
981  }
982  CLOSE_READER(re, &s->gb);
983  }
984 
985  if (i > *last_nnz)
986  *last_nnz = i;
987 
988  return 0;
989 }
990 
991 #define REFINE_BIT(j) { \
992  UPDATE_CACHE(re, &s->gb); \
993  sign = block[j] >> 15; \
994  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
995  ((quant_matrix[i] ^ sign) - sign) << Al; \
996  LAST_SKIP_BITS(re, &s->gb, 1); \
997 }
998 
999 #define ZERO_RUN \
1000 for (; ; i++) { \
1001  if (i > last) { \
1002  i += run; \
1003  if (i > Se) { \
1004  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
1005  return -1; \
1006  } \
1007  break; \
1008  } \
1009  j = s->permutated_scantable[i]; \
1010  if (block[j]) \
1011  REFINE_BIT(j) \
1012  else if (run-- == 0) \
1013  break; \
1014 }
1015 
1016 /* decode block and dequantize - progressive JPEG refinement pass */
1018  uint8_t *last_nnz,
1019  int ac_index, uint16_t *quant_matrix,
1020  int Ss, int Se, int Al, int *EOBRUN)
1021 {
1022  int code, i = Ss, j, sign, val, run;
1023  int last = FFMIN(Se, *last_nnz);
1024 
1025  OPEN_READER(re, &s->gb);
1026  if (*EOBRUN) {
1027  (*EOBRUN)--;
1028  } else {
1029  for (; ; i++) {
1030  UPDATE_CACHE(re, &s->gb);
1031  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1032 
1033  if (code & 0xF) {
1034  run = ((unsigned) code) >> 4;
1035  val = SHOW_UBITS(re, &s->gb, 1);
1036  LAST_SKIP_BITS(re, &s->gb, 1);
1037  ZERO_RUN;
1038  j = s->permutated_scantable[i];
1039  val--;
1040  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1041  if (i == Se) {
1042  if (i > *last_nnz)
1043  *last_nnz = i;
1044  CLOSE_READER(re, &s->gb);
1045  return 0;
1046  }
1047  } else {
1048  run = ((unsigned) code) >> 4;
1049  if (run == 0xF) {
1050  ZERO_RUN;
1051  } else {
1052  val = run;
1053  run = (1 << run);
1054  if (val) {
1055  // Given that GET_VLC reloads internally, we always
1056  // have at least 16 bits in the cache here.
1057  run += SHOW_UBITS(re, &s->gb, val);
1058  LAST_SKIP_BITS(re, &s->gb, val);
1059  }
1060  *EOBRUN = run - 1;
1061  break;
1062  }
1063  }
1064  }
1065 
1066  if (i > *last_nnz)
1067  *last_nnz = i;
1068  }
1069 
1070  for (; i <= last; i++) {
1071  j = s->permutated_scantable[i];
1072  if (block[j])
1073  REFINE_BIT(j)
1074  }
1075  CLOSE_READER(re, &s->gb);
1076 
1077  return 0;
1078 }
1079 #undef REFINE_BIT
1080 #undef ZERO_RUN
1081 
1082 /* Handles 1 to 4 components */
1084 {
1085  int nb_components = s->nb_components_sos;
1086  int predictor = s->Ss;
1087  int point_transform = s->Al;
1088  int i, mb_x, mb_y;
1089  unsigned width;
1090  uint16_t (*buffer)[4];
1091  int left[4], top[4], topleft[4];
1092  const int linesize = s->linesize[0];
1093  const int mask = ((1 << s->bits) - 1) << point_transform;
1094  int resync_mb_y = 0;
1095  int resync_mb_x = 0;
1096  int vpred[6];
1097  int ret;
1098 
1099  if (!s->bayer && s->nb_components < 3)
1100  return AVERROR_INVALIDDATA;
1101  if (s->bayer && s->nb_components > 2)
1102  return AVERROR_INVALIDDATA;
1103  if (s->nb_components <= 0 || s->nb_components > 4)
1104  return AVERROR_INVALIDDATA;
1105  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1106  return AVERROR_INVALIDDATA;
1107  if (s->bayer) {
1108  if (s->rct || s->pegasus_rct)
1109  return AVERROR_INVALIDDATA;
1110  }
1111 
1112 
1113  for (i = 0; i < 6; i++)
1114  vpred[i] = 1 << (s->bits - 1);
1115 
1116  if (s->bayer)
1117  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1118  else
1119  width = s->mb_width;
1120 
1121  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1122  if (!s->ljpeg_buffer)
1123  return AVERROR(ENOMEM);
1124 
1125  buffer = s->ljpeg_buffer;
1126 
1127  for (i = 0; i < 4; i++)
1128  buffer[0][i] = 1 << (s->bits - 1);
1129 
1130  s->restart_count = -1;
1131 
1132  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1133  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1134 
1135  if (s->interlaced && s->bottom_field)
1136  ptr += linesize >> 1;
1137 
1138  for (i = 0; i < 4; i++)
1139  top[i] = left[i] = topleft[i] = buffer[0][i];
1140 
1141  for (mb_x = 0; mb_x < width; mb_x++) {
1142  int modified_predictor = predictor;
1143  int restart;
1144 
1145  ret = ff_mjpeg_handle_restart(s, &restart);
1146  if (ret < 0)
1147  return ret;
1148  if (restart) {
1149  resync_mb_x = mb_x;
1150  resync_mb_y = mb_y;
1151  for (i = 0; i < 4; i++)
1152  top[i] = left[i] = topleft[i] = 1 << (s->bits - 1);
1153  }
1154 
1155  if (get_bits_left(&s->gb) < 1) {
1156  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1157  return AVERROR_INVALIDDATA;
1158  }
1159 
1160  if (mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x || !mb_x)
1161  modified_predictor = 1;
1162 
1163  for (i = 0; i < nb_components; i++) {
1164  int pred, dc;
1165 
1166  topleft[i] = top[i];
1167  top[i] = buffer[mb_x][i];
1168 
1169  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1170  if (ret < 0)
1171  return ret;
1172 
1173  if (!s->bayer || mb_x) {
1174  pred = left[i];
1175  } else { /* This path runs only for the first line in bayer images */
1176  vpred[i] += dc;
1177  pred = vpred[i] - dc;
1178  }
1179 
1180  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1181 
1182  left[i] = buffer[mb_x][i] =
1183  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1184  }
1185  }
1186  if (s->rct && s->nb_components == 4) {
1187  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1188  ptr[4 * mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1189  ptr[4 * mb_x + 1] = buffer[mb_x][1] + ptr[4 * mb_x + 2];
1190  ptr[4 * mb_x + 3] = buffer[mb_x][2] + ptr[4 * mb_x + 2];
1191  ptr[4 * mb_x + 0] = buffer[mb_x][3];
1192  }
1193  } else if (s->nb_components == 4) {
1194  for (i = 0; i < nb_components; i++) {
1195  int c = s->comp_index[i];
1196  if (s->bits <= 8) {
1197  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1198  ptr[4 * mb_x + 3 - c] = buffer[mb_x][i];
1199  }
1200  } else if (s->bits == 9) {
1201  return AVERROR_PATCHWELCOME;
1202  } else {
1203  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1204  ((uint16_t*)ptr)[4 * mb_x + c] = buffer[mb_x][i];
1205  }
1206  }
1207  }
1208  } else if (s->rct) {
1209  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1210  ptr[3 * mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1211  ptr[3 * mb_x + 0] = buffer[mb_x][1] + ptr[3 * mb_x + 1];
1212  ptr[3 * mb_x + 2] = buffer[mb_x][2] + ptr[3 * mb_x + 1];
1213  }
1214  } else if (s->pegasus_rct) {
1215  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1216  ptr[3 * mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1217  ptr[3 * mb_x + 0] = buffer[mb_x][1] + ptr[3 * mb_x + 1];
1218  ptr[3 * mb_x + 2] = buffer[mb_x][2] + ptr[3 * mb_x + 1];
1219  }
1220  } else if (s->bayer) {
1221  if (s->bits <= 8)
1222  return AVERROR_PATCHWELCOME;
1223  if (nb_components == 1) {
1224  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1225  for (mb_x = 0; mb_x < width; mb_x++)
1226  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1227  } else if (nb_components == 2) {
1228  for (mb_x = 0; mb_x < width; mb_x++) {
1229  ((uint16_t*)ptr)[2 * mb_x + 0] = buffer[mb_x][0];
1230  ((uint16_t*)ptr)[2 * mb_x + 1] = buffer[mb_x][1];
1231  }
1232  }
1233  } else {
1234  for (i = 0; i < nb_components; i++) {
1235  int c = s->comp_index[i];
1236  if (s->bits <= 8) {
1237  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1238  ptr[3 * mb_x + 2 - c] = buffer[mb_x][i];
1239  }
1240  } else if (s->bits == 9) {
1241  return AVERROR_PATCHWELCOME;
1242  } else {
1243  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1244  ((uint16_t*)ptr)[3 * mb_x + 2 - c] = buffer[mb_x][i];
1245  }
1246  }
1247  }
1248  }
1249  }
1250  return 0;
1251 }
1252 
1254 {
1255  int predictor = s->Ss;
1256  int point_transform = s->Al;
1257  int nb_components = s->nb_components_sos;
1258  int i, mb_x, mb_y, mask;
1259  int bits = (s->bits + 7) & ~7;
1260  int resync_mb_y = 0;
1261  int resync_mb_x = 0;
1262  int ret;
1263 
1264  point_transform += bits - s->bits;
1265  mask = ((1 << s->bits) - 1) << point_transform;
1266 
1267  av_assert0(nb_components >= 1 && nb_components <= 4);
1268 
1269  s->restart_count = -1;
1270 
1271  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1272  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1273  int restart;
1274  ret = ff_mjpeg_handle_restart(s, &restart);
1275  if (ret < 0)
1276  return ret;
1277  if (restart) {
1278  resync_mb_x = mb_x;
1279  resync_mb_y = mb_y;
1280  }
1281 
1282  if (get_bits_left(&s->gb) < 1) {
1283  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1284  return AVERROR_INVALIDDATA;
1285  }
1286 
1287  if (!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x || s->interlaced) {
1288  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y + 1 && mb_x < resync_mb_x;
1289  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1290  for (i = 0; i < nb_components; i++) {
1291  uint8_t *ptr;
1292  uint16_t *ptr16;
1293  int n, h, v, x, y, c, j, linesize;
1294  n = s->nb_blocks[i];
1295  c = s->comp_index[i];
1296  h = s->h_scount[i];
1297  v = s->v_scount[i];
1298  x = 0;
1299  y = 0;
1300  linesize = s->linesize[c];
1301 
1302  if (bits > 8) linesize /= 2;
1303 
1304  for (j = 0; j < n; j++) {
1305  int pred, dc;
1306 
1307  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1308  if (ret < 0)
1309  return ret;
1310 
1311  if ( h * mb_x + x >= s->width
1312  || v * mb_y + y >= s->height) {
1313  // Nothing to do
1314  } else if (bits <= 8) {
1315  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); // FIXME optimize this crap
1316  if (y == 0 && toprow) {
1317  if (x == 0 && leftcol) {
1318  pred = 1 << (bits - 1);
1319  } else {
1320  pred = ptr[-1];
1321  }
1322  } else {
1323  if (x == 0 && leftcol) {
1324  pred = ptr[-linesize];
1325  } else {
1326  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
1327  }
1328  }
1329 
1330  if (s->interlaced && s->bottom_field)
1331  ptr += linesize >> 1;
1332  pred &= mask;
1333  *ptr = pred + ((unsigned)dc << point_transform);
1334  } else {
1335  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2 * (linesize * (v * mb_y + y)) + 2 * (h * mb_x + x)); // FIXME optimize this crap
1336  if (y == 0 && toprow) {
1337  if (x == 0 && leftcol) {
1338  pred = 1 << (bits - 1);
1339  } else {
1340  pred = ptr16[-1];
1341  }
1342  } else {
1343  if (x == 0 && leftcol) {
1344  pred = ptr16[-linesize];
1345  } else {
1346  PREDICT(pred, ptr16[-linesize - 1], ptr16[-linesize], ptr16[-1], predictor);
1347  }
1348  }
1349 
1350  if (s->interlaced && s->bottom_field)
1351  ptr16 += linesize >> 1;
1352  pred &= mask;
1353  *ptr16 = pred + ((unsigned)dc << point_transform);
1354  }
1355  if (++x == h) {
1356  x = 0;
1357  y++;
1358  }
1359  }
1360  }
1361  } else {
1362  for (i = 0; i < nb_components; i++) {
1363  uint8_t *ptr;
1364  uint16_t *ptr16;
1365  int n, h, v, x, y, c, j, linesize, dc;
1366  n = s->nb_blocks[i];
1367  c = s->comp_index[i];
1368  h = s->h_scount[i];
1369  v = s->v_scount[i];
1370  x = 0;
1371  y = 0;
1372  linesize = s->linesize[c];
1373 
1374  if (bits > 8) linesize /= 2;
1375 
1376  for (j = 0; j < n; j++) {
1377  int pred;
1378 
1379  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1380  if (ret < 0)
1381  return ret;
1382 
1383  if ( h * mb_x + x >= s->width
1384  || v * mb_y + y >= s->height) {
1385  // Nothing to do
1386  } else if (bits <= 8) {
1387  ptr = s->picture_ptr->data[c] +
1388  (linesize * (v * mb_y + y)) +
1389  (h * mb_x + x); // FIXME optimize this crap
1390  PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
1391 
1392  pred &= mask;
1393  *ptr = pred + ((unsigned)dc << point_transform);
1394  } else {
1395  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2 * (linesize * (v * mb_y + y)) + 2 * (h * mb_x + x)); // FIXME optimize this crap
1396  PREDICT(pred, ptr16[-linesize - 1], ptr16[-linesize], ptr16[-1], predictor);
1397 
1398  pred &= mask;
1399  *ptr16 = pred + ((unsigned)dc << point_transform);
1400  }
1401 
1402  if (++x == h) {
1403  x = 0;
1404  y++;
1405  }
1406  }
1407  }
1408  }
1409  }
1410  }
1411  return 0;
1412 }
1413 
1415  uint8_t *dst, const uint8_t *src,
1416  int linesize, int lowres)
1417 {
1418  switch (lowres) {
1419  case 0: s->copy_block(dst, src, linesize, 8);
1420  break;
1421  case 1: copy_block4(dst, src, linesize, linesize, 4);
1422  break;
1423  case 2: copy_block2(dst, src, linesize, linesize, 2);
1424  break;
1425  case 3: *dst = *src;
1426  break;
1427  }
1428 }
1429 
1430 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1431 {
1432  int block_x, block_y;
1433  int size = 8 >> s->avctx->lowres;
1434  if (s->bits > 8) {
1435  for (block_y = 0; block_y < size; block_y++)
1436  for (block_x = 0; block_x < size; block_x++)
1437  *(uint16_t*)(ptr + 2 * block_x + block_y * linesize) <<= 16 - s->bits;
1438  } else {
1439  for (block_y = 0; block_y < size; block_y++)
1440  for (block_x = 0; block_x < size; block_x++)
1441  *(ptr + block_x + block_y * linesize) <<= 8 - s->bits;
1442  }
1443 }
1444 
1446 {
1447  int nb_components = s->nb_components_sos;
1448  int Ah = s->Ah;
1449  int Al = s->Al;
1450  const uint8_t *mb_bitmask = NULL;
1451  const AVFrame *reference = NULL;
1452  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1453  uint8_t *data[MAX_COMPONENTS];
1454  const uint8_t *reference_data[MAX_COMPONENTS];
1455  int linesize[MAX_COMPONENTS];
1456  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1457  int bytes_per_pixel = 1 + (s->bits > 8);
1458  int ret;
1459 
1460  if (s->avctx->codec_id == AV_CODEC_ID_MXPEG) {
1461  mb_bitmask = s->mb_bitmask;
1462  reference = s->reference;
1463  }
1464 
1465  if (mb_bitmask) {
1466  if (s->mb_bitmask_size != (s->mb_width * s->mb_height + 7) >> 3) {
1467  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1468  return AVERROR_INVALIDDATA;
1469  }
1470  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1471  }
1472 
1473  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1474  &chroma_v_shift);
1475  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1476  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1477 
1478  for (i = 0; i < nb_components; i++) {
1479  int c = s->comp_index[i];
1480  data[c] = s->picture_ptr->data[c];
1481  reference_data[c] = reference ? reference->data[c] : NULL;
1482  linesize[c] = s->linesize[c];
1483  s->coefs_finished[c] |= 1;
1484  }
1485 
1486 next_field:
1487  s->restart_count = -1;
1488 
1489  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1490  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1491  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1492  int restart;
1493 
1494  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
1495  if (s->restart_count < 0) {
1497  if (ret < 0)
1498  return ret;
1499  }
1500  restart = ff_mjpeg_should_restart(s);
1501  if (restart)
1502  align_get_bits(&s->gb);
1503  } else {
1504  ret = ff_mjpeg_handle_restart(s, &restart);
1505  if (ret < 0)
1506  return ret;
1507  }
1508  if (restart) {
1509  for (i = 0; i < nb_components; i++)
1510  s->last_dc[i] = (4 << s->bits);
1511  }
1512 
1513  if (get_bits_left(&s->gb) < 0) {
1514  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1515  -get_bits_left(&s->gb));
1516  return AVERROR_INVALIDDATA;
1517  }
1518  for (i = 0; i < nb_components; i++) {
1519  uint8_t *ptr;
1520  int n, h, v, x, y, c, j;
1521  int block_offset;
1522  n = s->nb_blocks[i];
1523  c = s->comp_index[i];
1524  h = s->h_scount[i];
1525  v = s->v_scount[i];
1526  x = 0;
1527  y = 0;
1528  for (j = 0; j < n; j++) {
1529  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1530  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1531 
1532  if (s->interlaced && s->bottom_field)
1533  block_offset += linesize[c] >> 1;
1534  if ( 8 * (h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1535  && 8 * (v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1536  ptr = data[c] + block_offset;
1537  } else
1538  ptr = NULL;
1539  if (!s->progressive) {
1540  if (copy_mb) {
1541  if (ptr)
1542  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1543  linesize[c], s->avctx->lowres);
1544 
1545  } else {
1546  s->bdsp.clear_block(s->block);
1547  if (decode_block(s, s->block, i,
1548  s->dc_index[i], s->ac_index[i],
1549  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1550  av_log(s->avctx, AV_LOG_ERROR,
1551  "error y=%d x=%d\n", mb_y, mb_x);
1552  return AVERROR_INVALIDDATA;
1553  }
1554  if (ptr && linesize[c]) {
1555  s->idsp.idct_put(ptr, linesize[c], s->block);
1556  if (s->bits & 7)
1557  shift_output(s, ptr, linesize[c]);
1558  }
1559  }
1560  } else {
1561  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1562  (h * mb_x + x);
1563  int16_t *block = s->blocks[c][block_idx];
1564  if (Ah)
1565  block[0] += get_bits1(&s->gb) *
1566  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1567  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1568  s->quant_matrixes[s->quant_sindex[i]],
1569  Al) < 0) {
1570  av_log(s->avctx, AV_LOG_ERROR,
1571  "error y=%d x=%d\n", mb_y, mb_x);
1572  return AVERROR_INVALIDDATA;
1573  }
1574  }
1575  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1576  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1577  mb_x, mb_y, x, y, c, s->bottom_field,
1578  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1579  if (++x == h) {
1580  x = 0;
1581  y++;
1582  }
1583  }
1584  }
1585  }
1586  }
1587 
1588  if (s->interlaced &&
1589  bytestream2_get_bytes_left(&s->gB) > 2 &&
1590  bytestream2_tell(&s->gB) > 2 &&
1591  s->gB.buffer[-2] == 0xFF &&
1592  s->gB.buffer[-1] == 0xD1) {
1593  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1594  s->bottom_field ^= 1;
1595 
1596  goto next_field;
1597  }
1598 
1599  return 0;
1600 }
1601 
1603 {
1604  int Ss = s->Ss;
1605  int Se = s->Se;
1606  int Ah = s->Ah;
1607  int Al = s->Al;
1608  int mb_x, mb_y;
1609  int EOBRUN = 0;
1610  int c = s->comp_index[0];
1611  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1612 
1613  av_assert0(Ss >= 0 && Ah >= 0 && Al >= 0);
1614  if (Se < Ss || Se > 63) {
1615  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", Ss, Se);
1616  return AVERROR_INVALIDDATA;
1617  }
1618 
1619  // s->coefs_finished is a bitmask for coefficients coded
1620  // Ss and Se are parameters telling start and end coefficients
1621  s->coefs_finished[c] |= (2ULL << Se) - (1ULL << Ss);
1622 
1623  s->restart_count = -1;
1624 
1625  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1626  int block_idx = mb_y * s->block_stride[c];
1627  int16_t (*block)[64] = &s->blocks[c][block_idx];
1628  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1629  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1630  int ret;
1631  int restart;
1632  ret = ff_mjpeg_handle_restart(s, &restart);
1633  if (ret < 0)
1634  return ret;
1635  if (restart)
1636  EOBRUN = 0;
1637 
1638  if (Ah)
1639  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1640  quant_matrix, Ss, Se, Al, &EOBRUN);
1641  else
1642  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1643  quant_matrix, Ss, Se, Al, &EOBRUN);
1644 
1645  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1647  if (ret < 0) {
1648  av_log(s->avctx, AV_LOG_ERROR,
1649  "error y=%d x=%d\n", mb_y, mb_x);
1650  return AVERROR_INVALIDDATA;
1651  }
1652  }
1653  }
1654  return 0;
1655 }
1656 
1658 {
1659  int mb_x, mb_y;
1660  int c;
1661  const int bytes_per_pixel = 1 + (s->bits > 8);
1662  const int block_size = s->lossless ? 1 : 8;
1663 
1664  for (c = 0; c < s->nb_components; c++) {
1665  uint8_t *data = s->picture_ptr->data[c];
1666  int linesize = s->linesize[c];
1667  int h = s->h_max / s->h_count[c];
1668  int v = s->v_max / s->v_count[c];
1669  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1670  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1671 
1672  if (~s->coefs_finished[c])
1673  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1674 
1675  if (s->interlaced && s->bottom_field)
1676  data += linesize >> 1;
1677 
1678  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1679  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1680  int block_idx = mb_y * s->block_stride[c];
1681  int16_t (*block)[64] = &s->blocks[c][block_idx];
1682  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1683  s->idsp.idct_put(ptr, linesize, *block);
1684  if (s->bits & 7)
1685  shift_output(s, ptr, linesize);
1686  ptr += bytes_per_pixel * 8 >> s->avctx->lowres;
1687  }
1688  }
1689  }
1690 }
1691 
1693 {
1694  int len, i, h, v;
1695  int index, id, ret;
1696  const int block_size = s->lossless ? 1 : 8;
1697 
1698  if (!s->got_picture) {
1699  av_log(s->avctx, AV_LOG_WARNING,
1700  "Can not process SOS before SOF, skipping\n");
1701  return AVERROR_INVALIDDATA;
1702  }
1703 
1704  ret = mjpeg_parse_len(s, &len, "sos");
1705  if (ret < 0)
1706  return ret;
1707  if (len < 1)
1708  return AVERROR_INVALIDDATA;
1709  s->nb_components_sos = bytestream2_get_byteu(&s->gB);
1710  if (s->nb_components_sos == 0 || s->nb_components_sos > MAX_COMPONENTS) {
1712  "decode_sos: nb_components (%d)",
1713  s->nb_components_sos);
1714  return AVERROR_PATCHWELCOME;
1715  }
1716  if (len != 4 + 2 * s->nb_components_sos) {
1717  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: len(%d) mismatch %d components\n", len, s->nb_components_sos);
1718  return AVERROR_INVALIDDATA;
1719  }
1720  for (i = 0; i < s->nb_components_sos; i++) {
1721  id = bytestream2_get_byteu(&s->gB);
1722  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1723  /* find component index */
1724  for (index = 0; index < s->nb_components; index++)
1725  if (id == s->component_id[index])
1726  break;
1727  if (index == s->nb_components) {
1728  av_log(s->avctx, AV_LOG_ERROR,
1729  "decode_sos: index(%d) out of components\n", index);
1730  return AVERROR_INVALIDDATA;
1731  }
1732  /* Metasoft MJPEG codec has Cb and Cr swapped */
1733  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1734  && s->nb_components_sos == 3 && s->nb_components == 3 && i)
1735  index = 3 - i;
1736 
1737  s->quant_sindex[i] = s->quant_index[index];
1738  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1739  s->h_scount[i] = s->h_count[index];
1740  s->v_scount[i] = s->v_count[index];
1741 
1742  s->comp_index[i] = index;
1743 
1744  uint8_t b = bytestream2_get_byteu(&s->gB);
1745  s->dc_index[i] = b >> 4;
1746  s->ac_index[i] = b & 0x0F;
1747 
1748  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1749  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1750  goto out_of_range;
1751  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1752  goto out_of_range;
1753  }
1754 
1755  s->Ss = bytestream2_get_byteu(&s->gB); /* JPEG Ss / lossless JPEG predictor / JPEG-LS NEAR */
1756  s->Se = bytestream2_get_byteu(&s->gB); /* JPEG Se / JPEG-LS ILV */
1757  uint8_t b = bytestream2_get_byteu(&s->gB);
1758  s->Ah = b >> 4; /* Ah */
1759  s->Al = b & 0x0F; /* Al */
1760 
1761  if (s->nb_components_sos > 1) {
1762  /* interleaved stream */
1763  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1764  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1765  } else if (!s->ls) { /* skip this for JPEG-LS */
1766  h = s->h_max / s->h_scount[0];
1767  v = s->v_max / s->v_scount[0];
1768  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1769  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1770  s->nb_blocks[0] = 1;
1771  s->h_scount[0] = 1;
1772  s->v_scount[0] = 1;
1773  }
1774 
1775  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1776  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1777  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1778  s->Ss, s->Al, s->Se, s->bits, s->mjpb_skiptosod,
1779  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), s->nb_components_sos);
1780 
1781 
1782  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1783  if (s->mjpb_skiptosod)
1784  bytestream2_skip(&s->gB, s->mjpb_skiptosod);
1785 
1786  if (s->avctx->hwaccel) {
1787  const uint8_t *buf_ptr;
1788  size_t buf_size;
1789 
1790  mjpeg_find_raw_scan_data(s, &buf_ptr, &buf_size);
1791 
1792  ret = FF_HW_CALL(s->avctx, decode_slice, buf_ptr, buf_size);
1793  if (ret < 0)
1794  return ret;
1795 
1796  } else {
1797  if (s->lossless) {
1798  av_assert0(s->picture_ptr == s->picture);
1799  if (CONFIG_JPEGLS_DECODER && s->ls) {
1800  if ((ret = ff_jpegls_decode_picture(s)) < 0)
1801  return ret;
1802  } else {
1803  if (s->rgb || s->bayer) {
1804  if ((ret = ljpeg_decode_rgb_scan(s)) < 0)
1805  return ret;
1806  } else {
1807  if ((ret = ljpeg_decode_yuv_scan(s)) < 0)
1808  return ret;
1809  }
1810  }
1811  } else {
1812  if (s->progressive && s->Ss) {
1813  av_assert0(s->picture_ptr == s->picture);
1814  if ((ret = mjpeg_decode_scan_progressive_ac(s)) < 0)
1815  return ret;
1816  } else {
1817  if ((ret = mjpeg_decode_scan(s)) < 0)
1818  return ret;
1819  }
1820  }
1821  }
1822 
1823  if (s->avctx->codec_id == AV_CODEC_ID_MEDIA100 ||
1824  s->avctx->codec_id == AV_CODEC_ID_MJPEGB ||
1825  s->avctx->codec_id == AV_CODEC_ID_THP) {
1826  /* Add the amount of bits read from the unescaped image data buffer
1827  * into the GetByteContext. */
1828  bytestream2_skipu(&s->gB, (get_bits_count(&s->gb) + 7) / 8);
1829  }
1830 
1831  return 0;
1832  out_of_range:
1833  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1834  return AVERROR_INVALIDDATA;
1835 }
1836 
1838 {
1839  if (bytestream2_get_be16u(&s->gB) != 4)
1840  return AVERROR_INVALIDDATA;
1841  s->restart_interval = bytestream2_get_be16u(&s->gB);
1842  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1843  s->restart_interval);
1844 
1845  return 0;
1846 }
1847 
1849 {
1850  int len, id, i;
1851 
1852  int ret = mjpeg_parse_len(s, &len, "app");
1853  if (ret < 0)
1854  return AVERROR_INVALIDDATA;
1855 
1856  if (len < 4) {
1857  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1858  return AVERROR_INVALIDDATA;
1859  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1860  goto out;
1861  }
1862 
1863  id = bytestream2_get_be32u(&s->gB);
1864  len -= 4;
1865 
1866  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1867  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1868  av_fourcc2str(av_bswap32(id)), id, len);
1869 
1870  /* This fourcc is used by non-avid files too, it holds some
1871  information, but it's always present in AVID-created files. */
1872  if (id == AV_RB32("AVI1")) {
1873  /* structure:
1874  4bytes AVI1
1875  1bytes polarity
1876  1bytes always zero
1877  4bytes field_size
1878  4bytes field_size_less_padding
1879  */
1880  if (len < 1)
1881  goto out;
1882  i = bytestream2_get_byteu(&s->gB); len--;
1883  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1884  goto out;
1885  }
1886 
1887  if (id == AV_RB32("JFIF")) {
1888  int t_w, t_h, v1, v2;
1889  if (len < 8)
1890  goto out;
1891  bytestream2_skipu(&s->gB, 1); /* the trailing zero-byte */
1892  v1 = bytestream2_get_byteu(&s->gB);
1893  v2 = bytestream2_get_byteu(&s->gB);
1894  bytestream2_skipu(&s->gB, 1);
1895 
1896  s->avctx->sample_aspect_ratio.num = bytestream2_get_be16u(&s->gB);
1897  s->avctx->sample_aspect_ratio.den = bytestream2_get_be16u(&s->gB);
1898  if ( s->avctx->sample_aspect_ratio.num <= 0
1899  || s->avctx->sample_aspect_ratio.den <= 0) {
1900  s->avctx->sample_aspect_ratio.num = 0;
1901  s->avctx->sample_aspect_ratio.den = 1;
1902  }
1903 
1904  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1905  av_log(s->avctx, AV_LOG_INFO,
1906  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1907  v1, v2,
1908  s->avctx->sample_aspect_ratio.num,
1909  s->avctx->sample_aspect_ratio.den);
1910 
1911  len -= 8;
1912  if (len >= 2) {
1913  t_w = bytestream2_get_byteu(&s->gB);
1914  t_h = bytestream2_get_byteu(&s->gB);
1915  if (t_w && t_h) {
1916  /* skip thumbnail */
1917  if (len - 10 - (t_w * t_h * 3) > 0)
1918  len -= t_w * t_h * 3;
1919  }
1920  len -= 2;
1921  }
1922  goto out;
1923  }
1924 
1925  if ( id == AV_RB32("Adob")
1926  && len >= 8
1927  && bytestream2_peek_byteu(&s->gB) == 'e'
1928  && bytestream2_peek_be32u(&s->gB) != AV_RB32("e_CM")) {
1929  bytestream2_skipu(&s->gB, 1); /* 'e' */
1930  bytestream2_skipu(&s->gB, 2); /* version */
1931  bytestream2_skipu(&s->gB, 2); /* flags0 */
1932  bytestream2_skipu(&s->gB, 2); /* flags1 */
1933  s->adobe_transform = bytestream2_get_byteu(&s->gB);
1934  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1935  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1936  len -= 8;
1937  goto out;
1938  }
1939 
1940  if (id == AV_RB32("LJIF")) {
1941  int rgb = s->rgb;
1942  int pegasus_rct = s->pegasus_rct;
1943  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1944  av_log(s->avctx, AV_LOG_INFO,
1945  "Pegasus lossless jpeg header found\n");
1946  if (len < 9)
1947  goto out;
1948  bytestream2_skipu(&s->gB, 2); /* version ? */
1949  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1950  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1951  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1952  switch (i = bytestream2_get_byteu(&s->gB)) {
1953  case 1:
1954  rgb = 1;
1955  pegasus_rct = 0;
1956  break;
1957  case 2:
1958  rgb = 1;
1959  pegasus_rct = 1;
1960  break;
1961  default:
1962  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1963  }
1964 
1965  len -= 9;
1966  if (s->bayer)
1967  goto out;
1968  if (s->got_picture)
1969  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1970  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1971  goto out;
1972  }
1973 
1974  s->rgb = rgb;
1975  s->pegasus_rct = pegasus_rct;
1976 
1977  goto out;
1978  }
1979  if (id == AV_RL32("colr") && len > 0) {
1980  s->colr = bytestream2_get_byteu(&s->gB);
1981  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1982  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1983  len--;
1984  goto out;
1985  }
1986  if (id == AV_RL32("xfrm") && len > 0) {
1987  s->xfrm = bytestream2_get_byteu(&s->gB);
1988  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1989  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1990  len--;
1991  goto out;
1992  }
1993 
1994  /* JPS extension by VRex */
1995  if (start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1996  int flags, layout, type;
1997  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1998  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1999 
2000  bytestream2_skipu(&s->gB, 4); len -= 4; /* JPS_ */
2001  bytestream2_skipu(&s->gB, 2); len -= 2; /* block length */
2002  bytestream2_skipu(&s->gB, 1); /* reserved */
2003  flags = bytestream2_get_byteu(&s->gB);
2004  layout = bytestream2_get_byteu(&s->gB);
2005  type = bytestream2_get_byteu(&s->gB);
2006  len -= 4;
2007 
2008  av_freep(&s->stereo3d);
2009  s->stereo3d = av_stereo3d_alloc();
2010  if (!s->stereo3d) {
2011  goto out;
2012  }
2013  if (type == 0) {
2014  s->stereo3d->type = AV_STEREO3D_2D;
2015  } else if (type == 1) {
2016  switch (layout) {
2017  case 0x01:
2018  s->stereo3d->type = AV_STEREO3D_LINES;
2019  break;
2020  case 0x02:
2021  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2022  break;
2023  case 0x03:
2024  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2025  break;
2026  }
2027  if (!(flags & 0x04)) {
2028  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2029  }
2030  }
2031  goto out;
2032  }
2033 
2034  /* EXIF metadata */
2035  if (start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2036  int ret;
2037 
2038  bytestream2_skipu(&s->gB, 2); // skip padding
2039  len -= 2;
2040 
2041  if (s->exif_metadata.entries) {
2042  av_log(s->avctx, AV_LOG_WARNING, "multiple EXIF\n");
2043  goto out;
2044  }
2045 
2046  ret = av_exif_parse_buffer(s->avctx, s->gB.buffer, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2047  if (ret < 0) {
2048  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2049  goto out;
2050  }
2051 
2052  bytestream2_skipu(&s->gB, ret);
2053  len -= ret;
2054 
2055  goto out;
2056  }
2057 
2058  /* Apple MJPEG-A */
2059  if ((start_code == APP1) && (len > (0x28 - 8))) {
2060  id = bytestream2_get_be32u(&s->gB);
2061  len -= 4;
2062  /* Apple MJPEG-A */
2063  if (id == AV_RB32("mjpg")) {
2064  /* structure:
2065  4bytes field size
2066  4bytes pad field size
2067  4bytes next off
2068  4bytes quant off
2069  4bytes huff off
2070  4bytes image off
2071  4bytes scan off
2072  4bytes data off
2073  */
2074  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2075  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2076  }
2077  }
2078 
2079  if (start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2080  int id2;
2081  unsigned seqno;
2082  unsigned nummarkers;
2083 
2084  id = bytestream2_get_be32u(&s->gB);
2085  id2 = bytestream2_get_be24u(&s->gB);
2086  len -= 7;
2087  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2088  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2089  goto out;
2090  }
2091 
2092  bytestream2_skipu(&s->gB, 1);
2093  seqno = bytestream2_get_byteu(&s->gB);
2094  len -= 2;
2095  if (seqno == 0) {
2096  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2097  goto out;
2098  }
2099 
2100  nummarkers = bytestream2_get_byteu(&s->gB);
2101  len -= 1;
2102  if (nummarkers == 0) {
2103  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2104  goto out;
2105  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2106  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2107  goto out;
2108  } else if (seqno > nummarkers) {
2109  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2110  goto out;
2111  }
2112 
2113  /* Allocate if this is the first APP2 we've seen. */
2114  if (s->iccnum == 0) {
2115  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2116  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2117  return AVERROR(ENOMEM);
2118  }
2119  s->iccnum = nummarkers;
2120  }
2121 
2122  if (s->iccentries[seqno - 1].data) {
2123  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2124  goto out;
2125  }
2126 
2127  s->iccentries[seqno - 1].length = len;
2128  s->iccentries[seqno - 1].data = av_malloc(len);
2129  if (!s->iccentries[seqno - 1].data) {
2130  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2131  return AVERROR(ENOMEM);
2132  }
2133 
2134  bytestream2_get_bufferu(&s->gB, s->iccentries[seqno - 1].data, len);
2135  len = 0;
2136  s->iccread++;
2137 
2138  if (s->iccread > s->iccnum)
2139  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2140  }
2141 
2142 out:
2143  /* slow but needed for extreme adobe jpegs */
2144  if (len < 0)
2145  av_log(s->avctx, AV_LOG_ERROR,
2146  "mjpeg: error, decode_app parser read over the end\n");
2147  if (len > 0)
2148  bytestream2_skipu(&s->gB, len);
2149 
2150  return 0;
2151 }
2152 
2154 {
2155  int len;
2156  int ret = mjpeg_parse_len(s, &len, "com");
2157  if (ret < 0)
2158  return ret;
2159  if (!len)
2160  return 0;
2161 
2162  int i;
2163  char *cbuf = av_malloc(len + 1);
2164  if (!cbuf)
2165  return AVERROR(ENOMEM);
2166 
2167  for (i = 0; i < len; i++)
2168  cbuf[i] = bytestream2_get_byteu(&s->gB);
2169  if (cbuf[i - 1] == '\n')
2170  cbuf[i - 1] = 0;
2171  else
2172  cbuf[i] = 0;
2173 
2174  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2175  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2176 
2177  /* buggy avid, it puts EOI only at every 10th frame */
2178  if (!strncmp(cbuf, "AVID", 4)) {
2179  parse_avid(s, cbuf, len);
2180  } else if (!strcmp(cbuf, "CS=ITU601"))
2181  s->cs_itu601 = 1;
2182  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2183  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2184  s->flipped = 1;
2185  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2186  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2187  s->multiscope = 2;
2188  }
2189 
2190  av_free(cbuf);
2191 
2192  return 0;
2193 }
2194 
2195 /* return the 8 bit start code value and update the search
2196  state. Return -1 if no start code found */
2197 int ff_mjpeg_find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2198 {
2199  const uint8_t *buf_ptr;
2200  int val;
2201 
2202  buf_ptr = *pbuf_ptr;
2203  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2204  buf_ptr++;
2205  while (buf_ptr < buf_end) {
2206  val = *buf_ptr++;
2207  if (val != 0xff) {
2208  if ((val >= SOF0) && (val <= COM))
2209  goto found;
2210  break;
2211  }
2212  }
2213  }
2214  buf_ptr = buf_end;
2215  val = -1;
2216 found:
2217  ff_dlog(NULL, "find_marker skipped %td bytes\n",
2218  (buf_ptr - *pbuf_ptr) - (val < 0 ? 0 : 2));
2219  *pbuf_ptr = buf_ptr;
2220  return val;
2221 }
2222 
2224  const uint8_t **pbuf_ptr, size_t *pbuf_size)
2225 {
2226  const uint8_t *buf_ptr = s->gB.buffer;
2227  const uint8_t *buf_end = buf_ptr + bytestream2_get_bytes_left(&s->gB);
2228 
2229  /* Find size of image data buffer (including restart markers).
2230  * No unescaping is performed. */
2231  const uint8_t *ptr = buf_ptr;
2232  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2233  ptr++;
2234  if (ptr < buf_end) {
2235  uint8_t x = *ptr++;
2236  /* Discard multiple optional 0xFF fill bytes. */
2237  while (x == 0xff && ptr < buf_end)
2238  x = *ptr++;
2239  if (x && (x < RST0 || x > RST7)) {
2240  /* Non-restart marker */
2241  ptr -= 2;
2242  goto found_hw;
2243  }
2244  }
2245  }
2246  ptr = buf_end;
2247 found_hw:
2248  *pbuf_ptr = buf_ptr;
2249  *pbuf_size = ptr - buf_ptr;
2250  bytestream2_skipu(&s->gB, *pbuf_size);
2251 }
2252 
2254 {
2255  const uint8_t *buf_ptr = s->gB.buffer;
2256  const uint8_t *buf_end = buf_ptr + bytestream2_get_bytes_left(&s->gB);
2257  const uint8_t *unescaped_buf_ptr;
2258  size_t unescaped_buf_size;
2259 
2260  if (s->avctx->codec_id == AV_CODEC_ID_MEDIA100 ||
2261  s->avctx->codec_id == AV_CODEC_ID_MJPEGB ||
2262  s->avctx->codec_id == AV_CODEC_ID_THP) {
2263  /* The image data buffer is already unescaped. The only way to
2264  * find the size of the buffer is by fully decoding it. */
2265  unescaped_buf_ptr = buf_ptr;
2266  unescaped_buf_size = buf_end - buf_ptr;
2267  goto the_end;
2268  }
2269 
2270  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - buf_ptr);
2271  if (!s->buffer)
2272  return AVERROR(ENOMEM);
2273 
2274  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2275  if (!s->ls) {
2276  const uint8_t *src = buf_ptr;
2277  const uint8_t *ptr = src;
2278  uint8_t *dst = s->buffer;
2279  PutByteContext pb;
2280 
2281  bytestream2_init_writer(&pb, dst, buf_end - src);
2282 
2283  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2284  ptr++;
2285  if (ptr < buf_end) {
2286  /* Copy verbatim data. */
2287  ptrdiff_t length = (ptr - 1) - src;
2288  if (length > 0)
2289  bytestream2_put_bufferu(&pb, src, length);
2290 
2291  uint8_t x = *ptr++;
2292  /* Discard multiple optional 0xFF fill bytes. */
2293  while (x == 0xff && ptr < buf_end)
2294  x = *ptr++;
2295 
2296  src = ptr;
2297  if (x == 0) {
2298  /* Stuffed zero byte */
2299  bytestream2_put_byteu(&pb, 0xff);
2300  } else if (x >= RST0 && x <= RST7) {
2301  /* Restart marker */
2302  goto found;
2303  } else {
2304  /* Non-restart marker */
2305  ptr -= 2;
2306  goto found;
2307  }
2308  }
2309  }
2310  /* Copy remaining verbatim data. */
2311  ptr = buf_end;
2312  ptrdiff_t length = ptr - src;
2313  if (length > 0)
2314  bytestream2_put_bufferu(&pb, src, length);
2315 
2316 found:
2317  unescaped_buf_ptr = s->buffer;
2318  unescaped_buf_size = bytestream2_tell_p(&pb);
2319  memset(s->buffer + unescaped_buf_size, 0,
2321 
2322  bytestream2_skipu(&s->gB, ptr - buf_ptr);
2323 
2324  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2325  (buf_end - buf_ptr) - (unescaped_buf_size));
2326  } else {
2327  const uint8_t *src = buf_ptr;
2328  const uint8_t *ptr = src;
2329  uint8_t *dst = s->buffer;
2330  PutBitContext pb;
2331 
2332  init_put_bits(&pb, dst, buf_end - src);
2333 
2334  while ((ptr = memchr(ptr, 0xff, buf_end - ptr))) {
2335  ptr++;
2336  if (ptr < buf_end) {
2337  /* Copy verbatim data. */
2338  ptrdiff_t length = (ptr - 1) - src;
2339  if (length > 0)
2340  ff_copy_bits(&pb, src, length * 8);
2341 
2342  uint8_t x = *ptr++;
2343  /* Discard multiple optional 0xFF fill bytes. */
2344  while (x == 0xff && ptr < buf_end)
2345  x = *ptr++;
2346 
2347  src = ptr;
2348  if (!(x & 0x80)) {
2349  /* Stuffed zero bit */
2350  put_bits(&pb, 15, 0x7f80 | x);
2351  } else if (x >= RST0 && x <= RST7) {
2352  /* Restart marker */
2353  goto found_ls;
2354  } else {
2355  /* Non-restart marker */
2356  ptr -= 2;
2357  goto found_ls;
2358  }
2359  }
2360  }
2361  /* Copy remaining verbatim data. */
2362  ptr = buf_end;
2363  ptrdiff_t length = ptr - src;
2364  if (length > 0)
2365  ff_copy_bits(&pb, src, length * 8);
2366 
2367 found_ls:
2368  flush_put_bits(&pb);
2369 
2370  unescaped_buf_ptr = dst;
2371  unescaped_buf_size = put_bytes_output(&pb);
2372  memset(s->buffer + unescaped_buf_size, 0,
2374 
2375  bytestream2_skipu(&s->gB, ptr - buf_ptr);
2376  }
2377 
2378 the_end:
2379  return init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2380 }
2381 
2383 {
2384  int i;
2385 
2386  if (s->iccentries) {
2387  for (i = 0; i < s->iccnum; i++)
2388  av_freep(&s->iccentries[i].data);
2389  av_freep(&s->iccentries);
2390  }
2391 
2392  s->iccread = 0;
2393  s->iccnum = 0;
2394 }
2395 
2397  int *got_frame, const AVPacket *avpkt,
2398  const uint8_t *buf, const int buf_size)
2399 {
2400  MJpegDecodeContext *s = avctx->priv_data;
2401  const uint8_t *buf_end, *buf_ptr;
2402  int hshift, vshift;
2403  int start_code;
2404  int index;
2405  int ret = 0;
2406  int is16bit;
2407 
2408  s->force_pal8 = 0;
2409 
2410  s->buf_size = buf_size;
2411 
2412  av_exif_free(&s->exif_metadata);
2413  av_freep(&s->stereo3d);
2414  s->adobe_transform = -1;
2415 
2416  if (s->iccnum != 0)
2418 
2419 redo_for_pal8:
2420  buf_ptr = buf;
2421  buf_end = buf + buf_size;
2422  while (buf_ptr < buf_end) {
2423  /* find start next marker */
2424  start_code = ff_mjpeg_find_marker(&buf_ptr, buf_end);
2425  /* EOF */
2426  if (start_code < 0)
2427  break;
2428 
2429  ptrdiff_t bytes_left = buf_end - buf_ptr;
2430  if (bytes_left > INT_MAX / 8) {
2431  av_log(avctx, AV_LOG_ERROR,
2432  "MJPEG packet 0x%x too big (%td/%d), corrupt data?\n",
2433  start_code, bytes_left, buf_size);
2434  return AVERROR_INVALIDDATA;
2435  }
2436  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2437  start_code, buf_end - buf_ptr);
2438 
2439  bytestream2_init(&s->gB, buf_ptr, bytes_left);
2440 
2441  if (avctx->debug & FF_DEBUG_STARTCODE)
2442  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2443 
2444  /* process markers */
2445  if (start_code >= RST0 && start_code <= RST7) {
2446  av_log(avctx, AV_LOG_DEBUG,
2447  "restart marker: %d\n", start_code & 0x0f);
2448  /* APP fields */
2449  } else if (start_code >= APP0 && start_code <= APP15) {
2450  if ((ret = mjpeg_decode_app(s, start_code)) < 0)
2451  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2452  av_err2str(ret));
2453  /* Comment */
2454  } else if (start_code == COM) {
2455  ret = mjpeg_decode_com(s);
2456  if (ret < 0)
2457  return ret;
2458  } else if (start_code == DQT) {
2460  if (ret < 0)
2461  return ret;
2462  }
2463 
2464  ret = -1;
2465 
2466  if (!CONFIG_JPEGLS_DECODER &&
2467  (start_code == SOF55 || start_code == LSE)) {
2468  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2469  return AVERROR(ENOSYS);
2470  }
2471 
2472  if (avctx->skip_frame == AVDISCARD_ALL) {
2473  switch (start_code) {
2474  case SOF0:
2475  case SOF1:
2476  case SOF2:
2477  case SOF3:
2478  case SOF55:
2479  break;
2480  default:
2481  goto skip;
2482  }
2483  }
2484 
2485  switch (start_code) {
2486  case SOI:
2487  s->restart_interval = 0;
2488  s->raw_image_buffer = buf_ptr;
2489  s->raw_image_buffer_size = buf_end - buf_ptr;
2490  /* nothing to do on SOI */
2491  break;
2492  case DHT:
2493  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2494  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2495  goto fail;
2496  }
2497  break;
2498  case SOF0:
2499  case SOF1:
2500  if (start_code == SOF0)
2502  else
2504  s->lossless = 0;
2505  s->ls = 0;
2506  s->progressive = 0;
2507  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2508  goto fail;
2509  break;
2510  case SOF2:
2512  s->lossless = 0;
2513  s->ls = 0;
2514  s->progressive = 1;
2515  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2516  goto fail;
2517  break;
2518  case SOF3:
2520 #if FF_API_CODEC_PROPS
2524 #endif
2525  s->lossless = 1;
2526  s->ls = 0;
2527  s->progressive = 0;
2528  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2529  goto fail;
2530  break;
2531  case SOF55:
2533 #if FF_API_CODEC_PROPS
2537 #endif
2538  s->lossless = 1;
2539  s->ls = 1;
2540  s->progressive = 0;
2541  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2542  goto fail;
2543  break;
2544  case LSE:
2545  if (!CONFIG_JPEGLS_DECODER ||
2546  (ret = ff_jpegls_decode_lse(s)) < 0)
2547  goto fail;
2548  if (ret == 1)
2549  goto redo_for_pal8;
2550  break;
2551  case EOI:
2552 eoi_parser:
2553  if (!avctx->hwaccel &&
2554  s->progressive && s->cur_scan && s->got_picture)
2556  s->cur_scan = 0;
2557  if (!s->got_picture) {
2558  av_log(avctx, AV_LOG_WARNING,
2559  "Found EOI before any SOF, ignoring\n");
2560  break;
2561  }
2562  if (s->interlaced) {
2563  s->bottom_field ^= 1;
2564  /* if not bottom field, do not output image yet */
2565  if (s->bottom_field == !s->interlace_polarity)
2566  break;
2567  }
2568  if (avctx->hwaccel) {
2569  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2570  if (ret < 0)
2571  return ret;
2572 
2573  av_freep(&s->hwaccel_picture_private);
2574  }
2575  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2576  return ret;
2577  if (s->lossless)
2578  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2579  *got_frame = 1;
2580  s->got_picture = 0;
2581 
2582  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2583  int qp = FFMAX3(s->qscale[0],
2584  s->qscale[1],
2585  s->qscale[2]);
2586 
2587  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2588  }
2589 
2590  goto the_end;
2591  case SOS:
2592  s->cur_scan++;
2593 
2594  if ((ret = ff_mjpeg_decode_sos(s)) < 0 &&
2595  (avctx->err_recognition & AV_EF_EXPLODE))
2596  goto fail;
2597  break;
2598  case DRI:
2599  if ((ret = mjpeg_decode_dri(s)) < 0)
2600  return ret;
2601  break;
2602  case SOF5:
2603  case SOF6:
2604  case SOF7:
2605  case SOF9:
2606  case SOF10:
2607  case SOF11:
2608  case SOF13:
2609  case SOF14:
2610  case SOF15:
2611  case JPG:
2612  av_log(avctx, AV_LOG_ERROR,
2613  "mjpeg: unsupported coding type (%x)\n", start_code);
2614  break;
2615  }
2616 
2617  if (avctx->skip_frame == AVDISCARD_ALL) {
2618  switch (start_code) {
2619  case SOF0:
2620  case SOF1:
2621  case SOF2:
2622  case SOF3:
2623  case SOF55:
2624  s->got_picture = 0;
2625  goto the_end_no_picture;
2626  }
2627  }
2628 
2629 skip:
2630  /* eof process start code */
2631  buf_ptr += bytestream2_tell(&s->gB);
2632  av_log(avctx, AV_LOG_DEBUG,
2633  "marker parser used %d bytes\n",
2634  bytestream2_tell(&s->gB));
2635  }
2636  if (s->got_picture && s->cur_scan) {
2637  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2638  goto eoi_parser;
2639  }
2640  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2641  return AVERROR_INVALIDDATA;
2642 fail:
2643  s->got_picture = 0;
2644  return ret;
2645 the_end:
2646 
2647  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2648 
2649  if (AV_RB32(s->upscale_h)) {
2650  int p;
2652  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2655  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2659  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2661  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2662  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2663  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2664  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2665  );
2666  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2667  if (ret)
2668  return ret;
2669 
2670  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2671  for (p = 0; p < s->nb_components; p++) {
2672  uint8_t *line = s->picture_ptr->data[p];
2673  int w = s->width;
2674  int h = s->height;
2675  if (!s->upscale_h[p])
2676  continue;
2677  if (p == 1 || p == 2) {
2678  w = AV_CEIL_RSHIFT(w, hshift);
2679  h = AV_CEIL_RSHIFT(h, vshift);
2680  }
2681  if (s->upscale_v[p] == 1)
2682  h = (h + 1) >> 1;
2683  av_assert0(w > 0);
2684  for (int i = 0; i < h; i++) {
2685  if (s->upscale_h[p] == 1) {
2686  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2687  else line[w - 1] = line[(w - 1) / 2];
2688  for (index = w - 2; index > 0; index--) {
2689  if (is16bit)
2690  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2691  else
2692  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2693  }
2694  } else if (s->upscale_h[p] == 2) {
2695  if (is16bit) {
2696  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2697  if (w > 1)
2698  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2699  } else {
2700  line[w - 1] = line[(w - 1) / 3];
2701  if (w > 1)
2702  line[w - 2] = line[w - 1];
2703  }
2704  for (index = w - 3; index > 0; index--) {
2705  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2706  }
2707  } else if (s->upscale_h[p] == 4) {
2708  if (is16bit) {
2709  uint16_t *line16 = (uint16_t *) line;
2710  line16[w - 1] = line16[(w - 1) >> 2];
2711  if (w > 1)
2712  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2713  if (w > 2)
2714  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2715  } else {
2716  line[w - 1] = line[(w - 1) >> 2];
2717  if (w > 1)
2718  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2719  if (w > 2)
2720  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2721  }
2722  for (index = w - 4; index > 0; index--)
2723  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2724  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2725  }
2726  line += s->linesize[p];
2727  }
2728  }
2729  }
2730  if (AV_RB32(s->upscale_v)) {
2731  int p;
2733  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2734  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2735  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2736  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2737  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2738  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2739  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2740  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2741  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2742  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2743  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2744  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2745  );
2746  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2747  if (ret)
2748  return ret;
2749 
2750  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2751  for (p = 0; p < s->nb_components; p++) {
2752  uint8_t *dst;
2753  int w = s->width;
2754  int h = s->height;
2755  if (!s->upscale_v[p])
2756  continue;
2757  if (p == 1 || p == 2) {
2758  w = AV_CEIL_RSHIFT(w, hshift);
2759  h = AV_CEIL_RSHIFT(h, vshift);
2760  }
2761  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2762  for (int i = h - 1; i; i--) {
2763  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2764  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2765  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2766  memcpy(dst, src1, w);
2767  } else {
2768  for (index = 0; index < w; index++)
2769  dst[index] = (src1[index] + src2[index]) >> 1;
2770  }
2771  dst -= s->linesize[p];
2772  }
2773  }
2774  }
2775  if (s->flipped && !s->rgb) {
2776  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2777  if (ret)
2778  return ret;
2779 
2780  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2781  for (index = 0; index < s->nb_components; index++) {
2782  int h = frame->height;
2783  if (index && index < 3)
2784  h = AV_CEIL_RSHIFT(h, vshift);
2785  if (frame->data[index]) {
2786  frame->data[index] += (h - 1) * frame->linesize[index];
2787  frame->linesize[index] *= -1;
2788  }
2789  }
2790  }
2791 
2792  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2793  av_assert0(s->nb_components == 3);
2794  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2795  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2796  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2797  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2798  }
2799 
2800  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2801  int w = s->picture_ptr->width;
2802  int h = s->picture_ptr->height;
2803  av_assert0(s->nb_components == 4);
2804  for (int i = 0; i < h; i++) {
2805  int j;
2806  uint8_t *dst[4];
2807  for (index = 0; index < 4; index++) {
2808  dst[index] = s->picture_ptr->data[index]
2809  + s->picture_ptr->linesize[index]*i;
2810  }
2811  for (j = 0; j < w; j++) {
2812  int k = dst[3][j];
2813  int r = dst[0][j] * k;
2814  int g = dst[1][j] * k;
2815  int b = dst[2][j] * k;
2816  dst[0][j] = g * 257 >> 16;
2817  dst[1][j] = b * 257 >> 16;
2818  dst[2][j] = r * 257 >> 16;
2819  }
2820  memset(dst[3], 255, w);
2821  }
2822  }
2823  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2824  int w = s->picture_ptr->width;
2825  int h = s->picture_ptr->height;
2826  av_assert0(s->nb_components == 4);
2827  for (int i = 0; i < h; i++) {
2828  int j;
2829  uint8_t *dst[4];
2830  for (index = 0; index < 4; index++) {
2831  dst[index] = s->picture_ptr->data[index]
2832  + s->picture_ptr->linesize[index]*i;
2833  }
2834  for (j = 0; j < w; j++) {
2835  int k = dst[3][j];
2836  int r = (255 - dst[0][j]) * k;
2837  int g = (128 - dst[1][j]) * k;
2838  int b = (128 - dst[2][j]) * k;
2839  dst[0][j] = r * 257 >> 16;
2840  dst[1][j] = (g * 257 >> 16) + 128;
2841  dst[2][j] = (b * 257 >> 16) + 128;
2842  }
2843  memset(dst[3], 255, w);
2844  }
2845  }
2846 
2847  if (s->stereo3d) {
2849  if (stereo) {
2850  stereo->type = s->stereo3d->type;
2851  stereo->flags = s->stereo3d->flags;
2852  }
2853  av_freep(&s->stereo3d);
2854  }
2855 
2856  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2857  AVFrameSideData *sd;
2858  size_t offset = 0;
2859  int total_size = 0;
2860 
2861  /* Sum size of all parts. */
2862  for (int i = 0; i < s->iccnum; i++)
2863  total_size += s->iccentries[i].length;
2864 
2865  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2866  if (ret < 0) {
2867  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2868  return ret;
2869  }
2870 
2871  if (sd) {
2872  /* Reassemble the parts, which are now in-order. */
2873  for (int i = 0; i < s->iccnum; i++) {
2874  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2875  offset += s->iccentries[i].length;
2876  }
2877  }
2878  }
2879 
2880  if (s->exif_metadata.entries) {
2881  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2882  av_exif_free(&s->exif_metadata);
2883  if (ret < 0)
2884  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2885  }
2886 
2887  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2888  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2889  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2890  avctx->coded_height > s->orig_height) {
2891  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2892  frame->crop_top = frame->height - avctx->height;
2893  }
2894 
2895 the_end_no_picture:
2896  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2897  buf_end - buf_ptr);
2898  return buf_ptr - buf;
2899 }
2900 
2901 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2902  AVPacket *avpkt)
2903 {
2904  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2905  avpkt, avpkt->data, avpkt->size);
2906 }
2907 
2908 
2909 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2910  * even without having called ff_mjpeg_decode_init(). */
2912 {
2913  MJpegDecodeContext *s = avctx->priv_data;
2914  int i, j;
2915 
2916  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2917  av_log(avctx, AV_LOG_INFO, "Single field\n");
2918  }
2919 
2920  av_frame_free(&s->picture);
2921  s->picture_ptr = NULL;
2922 
2923  av_frame_free(&s->smv_frame);
2924 
2925  av_freep(&s->buffer);
2926  av_freep(&s->stereo3d);
2927  av_freep(&s->ljpeg_buffer);
2928  s->ljpeg_buffer_size = 0;
2929 
2930  for (i = 0; i < 3; i++) {
2931  for (j = 0; j < 4; j++)
2932  ff_vlc_free(&s->vlcs[i][j]);
2933  }
2934  for (i = 0; i < MAX_COMPONENTS; i++) {
2935  av_freep(&s->blocks[i]);
2936  av_freep(&s->last_nnz[i]);
2937  }
2938  av_exif_free(&s->exif_metadata);
2939 
2941 
2942  av_freep(&s->hwaccel_picture_private);
2943  av_freep(&s->jls_state);
2944 
2945  return 0;
2946 }
2947 
2949 {
2950  MJpegDecodeContext *s = avctx->priv_data;
2951  s->got_picture = 0;
2952 
2953  s->smv_next_frame = 0;
2954  av_frame_unref(s->smv_frame);
2955 }
2956 
2957 #if CONFIG_MJPEG_DECODER
2958 #if FF_API_MJPEG_EXTERN_HUFF
2959 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2960 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2961 static const AVOption options[] = {
2962  { "extern_huff", "Use external huffman table.",
2963  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD | AV_OPT_FLAG_DEPRECATED },
2964  { NULL },
2965 };
2966 #endif
2967 
2968 static const AVClass mjpegdec_class = {
2969  .class_name = "MJPEG decoder",
2970  .item_name = av_default_item_name,
2971 #if FF_API_MJPEG_EXTERN_HUFF
2972  .option = options,
2973 #endif
2974  .version = LIBAVUTIL_VERSION_INT,
2975 };
2976 
2977 const FFCodec ff_mjpeg_decoder = {
2978  .p.name = "mjpeg",
2979  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2980  .p.type = AVMEDIA_TYPE_VIDEO,
2981  .p.id = AV_CODEC_ID_MJPEG,
2982  .priv_data_size = sizeof(MJpegDecodeContext),
2986  .flush = decode_flush,
2987  .p.capabilities = AV_CODEC_CAP_DR1,
2988  .p.max_lowres = 3,
2989  .p.priv_class = &mjpegdec_class,
2990  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2991  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2994  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2995 #if CONFIG_MJPEG_NVDEC_HWACCEL
2996  HWACCEL_NVDEC(mjpeg),
2997 #endif
2998 #if CONFIG_MJPEG_VAAPI_HWACCEL
2999  HWACCEL_VAAPI(mjpeg),
3000 #endif
3001  NULL
3002  },
3003 };
3004 #endif
3005 #if CONFIG_THP_DECODER
3006 const FFCodec ff_thp_decoder = {
3007  .p.name = "thp",
3008  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3009  .p.type = AVMEDIA_TYPE_VIDEO,
3010  .p.id = AV_CODEC_ID_THP,
3011  .priv_data_size = sizeof(MJpegDecodeContext),
3015  .flush = decode_flush,
3016  .p.capabilities = AV_CODEC_CAP_DR1,
3017  .p.max_lowres = 3,
3018  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3019 };
3020 #endif
3021 
3022 #if CONFIG_SMVJPEG_DECODER
3023 // SMV JPEG just stacks several output frames into one JPEG picture
3024 // we handle that by setting up the cropping parameters appropriately
3025 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3026 {
3027  MJpegDecodeContext *s = avctx->priv_data;
3028 
3029  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3030 
3031  frame->width = avctx->coded_width;
3032  frame->height = avctx->coded_height;
3033  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3034  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3035 
3036  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3037  s->smv_frame->pts += s->smv_frame->duration;
3038  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3039 
3040  if (s->smv_next_frame == 0)
3041  av_frame_unref(s->smv_frame);
3042 }
3043 
3044 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3045 {
3046  MJpegDecodeContext *s = avctx->priv_data;
3047  AVPacket *const pkt = avctx->internal->in_pkt;
3048  int got_frame = 0;
3049  int ret;
3050 
3051  if (s->smv_next_frame > 0)
3052  goto return_frame;
3053 
3054  ret = ff_decode_get_packet(avctx, pkt);
3055  if (ret < 0)
3056  return ret;
3057 
3058  av_frame_unref(s->smv_frame);
3059 
3060  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3061  s->smv_frame->pkt_dts = pkt->dts;
3063  if (ret < 0)
3064  return ret;
3065 
3066  if (!got_frame)
3067  return AVERROR(EAGAIN);
3068 
3069  // packet duration covers all the frames in the packet
3070  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3071 
3072 return_frame:
3073  av_assert0(s->smv_frame->buf[0]);
3074  ret = av_frame_ref(frame, s->smv_frame);
3075  if (ret < 0)
3076  return ret;
3077 
3078  smv_process_frame(avctx, frame);
3079  return 0;
3080 }
3081 
3082 const FFCodec ff_smvjpeg_decoder = {
3083  .p.name = "smvjpeg",
3084  CODEC_LONG_NAME("SMV JPEG"),
3085  .p.type = AVMEDIA_TYPE_VIDEO,
3086  .p.id = AV_CODEC_ID_SMVJPEG,
3087  .priv_data_size = sizeof(MJpegDecodeContext),
3090  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3091  .flush = decode_flush,
3092  .p.capabilities = AV_CODEC_CAP_DR1,
3093  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3095 };
3096 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:71
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1417
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:251
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:377
opt.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:881
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1210
out
static FILE * out
Definition: movenc.c:55
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1414
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
APP1
@ APP1
Definition: mjpeg.h:80
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:999
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1410
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:114
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1083
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AV_CODEC_ID_MEDIA100
@ AV_CODEC_ID_MEDIA100
Definition: codec_id.h:322
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
AV_CODEC_ID_MXPEG
@ AV_CODEC_ID_MXPEG
Definition: codec_id.h:198
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1387
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:250
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1430
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:123
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1674
fail
#define fail()
Definition: checkasm.h:221
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2396
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2153
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:61
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2197
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:658
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1602
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ff_mjpeg_handle_restart
static int ff_mjpeg_handle_restart(MJpegDecodeContext *s, int *restart)
Definition: mjpegdec.h:216
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s)
Definition: mjpegdec.c:1692
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:111
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:902
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1650
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
ff_mjpeg_should_restart
static int ff_mjpeg_should_restart(MJpegDecodeContext *s)
Definition: mjpegdec.h:198
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:104
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(const PutByteContext *p)
Definition: bytestream.h:197
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2382
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2911
mjpeg_find_raw_scan_data
static void mjpeg_find_raw_scan_data(MJpegDecodeContext *s, const uint8_t **pbuf_ptr, size_t *pbuf_size)
Definition: mjpegdec.c:2223
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SOF55
@ SOF55
JPEG-LS.
Definition: mjpeg.h:103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
mjpeg_parse_len
static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
Definition: mjpegdec.c:195
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2452
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1657
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:207
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:838
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int Ss, int Se, int Al, int *EOBRUN)
Definition: mjpegdec.c:919
options
Definition: swscale.c:44
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:56
lowres
static int lowres
Definition: ffplay.c:332
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s, int start_code)
Definition: mjpegdec.c:1848
AV_CODEC_ID_MJPEGB
@ AV_CODEC_ID_MJPEGB
Definition: codec_id.h:60
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
PutByteContext
Definition: bytestream.h:37
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1709
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1747
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
pix_fmts
static enum AVPixelFormat pix_fmts[4][4]
Definition: lcevc_parser.c:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2901
av_bswap32
#define av_bswap32
Definition: bswap.h:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2147
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
AV_OPT_FLAG_DEPRECATED
#define AV_OPT_FLAG_DEPRECATED
Set if option is deprecated, users should refer to AVOption.help text for more information.
Definition: opt.h:386
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:853
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
VD
#define VD
Definition: amfdec.c:607
src2
const pixel * src2
Definition: h264pred_template.c:421
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s)
Definition: jpeglsdec.c:355
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1445
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1837
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
ff_mjpeg_unescape_sos
int ff_mjpeg_unescape_sos(MJpegDecodeContext *s)
Definition: mjpegdec.c:2253
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:68
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2948
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1394
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1890
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:991
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int Ss, int Se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1017
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:34
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1391
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1630
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1386
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:309
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
SOI
@ SOI
Definition: mjpeg.h:70
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:53
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
bytestream2_put_bufferu
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:301
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1649
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:47
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s)
Definition: mjpegdec.c:1253
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347