Go to the documentation of this file.
24 #include "config_components.h"
132 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
137 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
142 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
149 int bits_per_pixel,
int pass,
150 int color_type,
const uint8_t *
src)
152 int x,
mask, dsp_mask, j, src_x,
b, bpp;
159 switch (bits_per_pixel) {
162 for (x = 0; x <
width; x++) {
164 if ((dsp_mask << j) & 0x80) {
165 b = (
src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
166 dst[x >> 3] &= 0xFF7F>>j;
167 dst[x >> 3] |=
b << (7 - j);
169 if ((
mask << j) & 0x80)
175 for (x = 0; x <
width; x++) {
176 int j2 = 2 * (x & 3);
178 if ((dsp_mask << j) & 0x80) {
179 b = (
src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
180 dst[x >> 2] &= 0xFF3F>>j2;
181 dst[x >> 2] |=
b << (6 - j2);
183 if ((
mask << j) & 0x80)
189 for (x = 0; x <
width; x++) {
192 if ((dsp_mask << j) & 0x80) {
193 b = (
src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
194 dst[x >> 1] &= 0xFF0F>>j2;
195 dst[x >> 1] |=
b << (4 - j2);
197 if ((
mask << j) & 0x80)
202 bpp = bits_per_pixel >> 3;
205 for (x = 0; x <
width; x++) {
207 if ((dsp_mask << j) & 0x80) {
211 if ((
mask << j) & 0x80)
222 for (
i = 0;
i <
w;
i++) {
223 int a,
b,
c, p, pa, pb, pc;
236 if (pa <= pb && pa <= pc)
246 #define UNROLL1(bpp, op) \
255 for (; i <= size - bpp; i += bpp) { \
256 dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
259 dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
262 dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
265 dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
269 #define UNROLL_FILTER(op) \
272 } else if (bpp == 2) { \
274 } else if (bpp == 3) { \
276 } else if (bpp == 4) { \
279 for (; i < size; i++) { \
280 dst[i] = op(dst[i - bpp], src[i], last[i]); \
285 uint8_t *
src, uint8_t *last,
int size,
int bpp)
287 int i, p,
r,
g,
b,
a;
289 switch (filter_type) {
294 for (
i = 0;
i < bpp;
i++)
298 for (;
i <
size;
i += bpp) {
299 unsigned s = *(
int *)(
src +
i);
300 p = ((
s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((
s ^ p) & 0x80808080);
301 *(
int *)(
dst +
i) = p;
304 #define OP_SUB(x, s, l) ((x) + (s))
312 for (
i = 0;
i < bpp;
i++) {
316 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
320 for (
i = 0;
i < bpp;
i++) {
324 if (bpp > 2 &&
size > 4) {
341 #define YUV2RGB(NAME, TYPE) \
342 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
345 for (i = 0; i < size - 2; i += 3 + alpha) { \
346 int g = dst [i + 1]; \
357 if (
s->interlace_type) {
360 return 100 - 100 *
s->y /
s->cur_h;
367 uint8_t *ptr, *last_row;
370 if (!
s->interlace_type) {
371 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
373 last_row =
s->last_row;
375 last_row = ptr - dst_stride;
378 last_row,
s->row_size,
s->bpp);
381 if (
s->bit_depth == 16) {
382 deloco_rgb16((uint16_t *)(ptr - dst_stride),
s->row_size / 2,
385 deloco_rgb8(ptr - dst_stride,
s->row_size,
390 if (
s->y ==
s->cur_h) {
393 if (
s->bit_depth == 16) {
394 deloco_rgb16((uint16_t *)ptr,
s->row_size / 2,
397 deloco_rgb8(ptr,
s->row_size,
405 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
412 s->last_row,
s->pass_row_size,
s->bpp);
413 FFSWAP(uint8_t *,
s->last_row,
s->tmp_row);
414 FFSWAP(
unsigned int,
s->last_row_size,
s->tmp_row_size);
419 s->color_type,
s->last_row);
422 if (
s->y ==
s->cur_h) {
423 memset(
s->last_row, 0,
s->row_size);
434 s->crow_size =
s->pass_row_size + 1;
435 if (
s->pass_row_size != 0)
447 uint8_t *
dst, ptrdiff_t dst_stride)
449 z_stream *
const zstream = &
s->zstream.zstream;
452 zstream->next_in = gb->
buffer;
455 while (zstream->avail_in > 0) {
457 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
461 if (zstream->avail_out == 0) {
465 zstream->avail_out =
s->crow_size;
466 zstream->next_out =
s->crow_buf;
468 if (
ret == Z_STREAM_END && zstream->avail_in > 0) {
470 "%d undecompressed bytes left in buffer\n", zstream->avail_in);
478 const uint8_t *data_end,
void *logctx)
481 z_stream *
const zstream = &z.
zstream;
488 zstream->next_in =
data;
489 zstream->avail_in = data_end -
data;
492 while (zstream->avail_in > 0) {
498 zstream->next_out = buf;
499 zstream->avail_out = buf_size - 1;
501 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
505 bp->len += zstream->next_out - buf;
506 if (
ret == Z_STREAM_END)
510 bp->str[bp->len] = 0;
524 for (
i = 0;
i < size_in;
i++)
525 extra += !!(in[
i] & 0x80);
526 if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
531 for (
i = 0;
i < size_in;
i++) {
533 *(q++) = 0xC0 | (in[
i] >> 6);
534 *(q++) = 0x80 | (in[
i] & 0x3F);
548 const char *keyword =
data;
549 const char *keyword_end = memchr(keyword, 0, data_end -
data);
550 char *kw_utf8 =
NULL, *txt_utf8 =
NULL;
557 data = keyword_end + 1;
560 if (
data == data_end)
571 text_len = data_end -
data;
606 s->width =
s->cur_w = bytestream2_get_be32(gb);
607 s->height =
s->cur_h = bytestream2_get_be32(gb);
609 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
613 s->bit_depth = bytestream2_get_byte(gb);
614 if (
s->bit_depth != 1 &&
s->bit_depth != 2 &&
s->bit_depth != 4 &&
615 s->bit_depth != 8 &&
s->bit_depth != 16) {
619 s->color_type = bytestream2_get_byte(gb);
620 s->compression_type = bytestream2_get_byte(gb);
621 if (
s->compression_type) {
625 s->filter_type = bytestream2_get_byte(gb);
626 s->interlace_type = bytestream2_get_byte(gb);
630 "compression_type=%d filter_type=%d interlace_type=%d\n",
631 s->width,
s->height,
s->bit_depth,
s->color_type,
632 s->compression_type,
s->filter_type,
s->interlace_type);
636 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
676 if (
s->cicp_range == 0) {
679 }
else if (
s->cicp_range != 1) {
683 }
else if (
s->iccp_data) {
686 s->iccp_data_len, &sd);
690 memcpy(sd->
data,
s->iccp_data,
s->iccp_data_len);
693 }
else if (
s->have_srgb) {
696 }
else if (
s->have_chrm) {
715 if (
s->iccp_data ||
s->have_srgb ||
s->have_cicp) {
717 }
else if (
s->gamma) {
727 if (
s->gamma > 45355 &&
s->gamma < 45555)
729 else if (
s->gamma > 35614 &&
s->gamma < 35814)
731 else if (
s->gamma > 38362 &&
s->gamma < 38562)
733 else if (
s->gamma > 99900 &&
s->gamma < 100100)
739 if (!
s->have_cicp ||
s->cicp_range == 1)
747 if (!
s->has_trns &&
s->significant_bits > 0)
762 clli->
MaxCLL =
s->clli_max / 10000;
763 clli->
MaxFALL =
s->clli_avg / 10000;
776 for (
int i = 0;
i < 3;
i++) {
795 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
810 s->bits_per_pixel =
s->bit_depth *
s->channels;
811 s->bpp = (
s->bits_per_pixel + 7) >> 3;
812 s->row_size = (
s->cur_w *
s->bits_per_pixel + 7) >> 3;
814 if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
817 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
820 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
823 }
else if (
s->bit_depth == 16 &&
826 }
else if (
s->bit_depth == 16 &&
829 }
else if (
s->bit_depth == 16 &&
832 }
else if ((
s->bits_per_pixel == 1 ||
s->bits_per_pixel == 2 ||
s->bits_per_pixel == 4 ||
s->bits_per_pixel == 8) &&
837 }
else if (
s->bit_depth == 8 &&
840 }
else if (
s->bit_depth == 16 &&
845 "Bit depth %d color type %d",
846 s->bit_depth,
s->color_type);
870 "and color type %d with TRNS",
871 s->bit_depth,
s->color_type);
875 s->bpp += byte_depth;
914 if (!
s->interlace_type) {
915 s->crow_size =
s->row_size + 1;
921 s->crow_size =
s->pass_row_size + 1;
923 ff_dlog(avctx,
"row_size=%d crow_size =%d\n",
924 s->row_size,
s->crow_size);
928 memcpy(p->
data[1],
s->palette, 256 *
sizeof(uint32_t));
933 if (
s->interlace_type ||
945 s->crow_buf =
s->buffer + 15;
946 s->zstream.zstream.avail_out =
s->crow_size;
947 s->zstream.zstream.next_out =
s->crow_buf;
954 s->bpp -= byte_depth;
959 s->bpp += byte_depth;
973 if ((length % 3) != 0 || length > 256 * 3)
977 for (
i = 0;
i < n;
i++) {
978 r = bytestream2_get_byte(gb);
979 g = bytestream2_get_byte(gb);
980 b = bytestream2_get_byte(gb);
981 s->palette[
i] = (0xFF
U << 24) | (
r << 16) | (
g << 8) |
b;
984 s->palette[
i] = (0xFFU << 24);
1007 if (length > 256 || !(
s->hdr_state &
PNG_PLTE))
1010 for (
i = 0;
i < length;
i++) {
1011 unsigned v = bytestream2_get_byte(gb);
1012 s->palette[
i] = (
s->palette[
i] & 0x00ffffff) | (v << 24);
1020 for (
i = 0;
i < length / 2;
i++) {
1024 if (
s->bit_depth > 8)
1025 AV_WB16(&
s->transparent_color_be[2 *
i], v);
1027 s->transparent_color_be[
i] = v;
1043 while ((
s->iccp_name[cnt++] = bytestream2_get_byte(gb)) && cnt < 81);
1050 if (bytestream2_get_byte(gb) != 0) {
1063 s->iccp_data_len = bp.len;
1067 s->iccp_name[0] = 0;
1096 int b = bytestream2_get_byteu(gb);
1104 s->significant_bits =
bits;
1113 uint8_t *pd = p->
data[0];
1114 for (j = 0; j <
s->height; j++) {
1116 for (k = 7; k >= 1; k--)
1117 if ((
s->width&7) >= k)
1118 pd[8*
i + k - 1] = (pd[
i]>>8-k) & 1;
1119 for (
i--;
i >= 0;
i--) {
1120 pd[8*
i + 7]= pd[
i] & 1;
1121 pd[8*
i + 6]= (pd[
i]>>1) & 1;
1122 pd[8*
i + 5]= (pd[
i]>>2) & 1;
1123 pd[8*
i + 4]= (pd[
i]>>3) & 1;
1124 pd[8*
i + 3]= (pd[
i]>>4) & 1;
1125 pd[8*
i + 2]= (pd[
i]>>5) & 1;
1126 pd[8*
i + 1]= (pd[
i]>>6) & 1;
1127 pd[8*
i + 0]= pd[
i]>>7;
1131 }
else if (
s->bits_per_pixel == 2) {
1133 uint8_t *pd = p->
data[0];
1134 for (j = 0; j <
s->height; j++) {
1137 if ((
s->width&3) >= 3) pd[4*
i + 2]= (pd[
i] >> 2) & 3;
1138 if ((
s->width&3) >= 2) pd[4*
i + 1]= (pd[
i] >> 4) & 3;
1139 if ((
s->width&3) >= 1) pd[4*
i + 0]= pd[
i] >> 6;
1140 for (
i--;
i >= 0;
i--) {
1141 pd[4*
i + 3]= pd[
i] & 3;
1142 pd[4*
i + 2]= (pd[
i]>>2) & 3;
1143 pd[4*
i + 1]= (pd[
i]>>4) & 3;
1144 pd[4*
i + 0]= pd[
i]>>6;
1147 if ((
s->width&3) >= 3) pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1148 if ((
s->width&3) >= 2) pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1149 if ((
s->width&3) >= 1) pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1150 for (
i--;
i >= 0;
i--) {
1151 pd[4*
i + 3]= ( pd[
i] & 3)*0x55;
1152 pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1153 pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1154 pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1159 }
else if (
s->bits_per_pixel == 4) {
1161 uint8_t *pd = p->
data[0];
1162 for (j = 0; j <
s->height; j++) {
1165 if (
s->width&1) pd[2*
i+0]= pd[
i]>>4;
1166 for (
i--;
i >= 0;
i--) {
1167 pd[2*
i + 1] = pd[
i] & 15;
1168 pd[2*
i + 0] = pd[
i] >> 4;
1171 if (
s->width & 1) pd[2*
i + 0]= (pd[
i] >> 4) * 0x11;
1172 for (
i--;
i >= 0;
i--) {
1173 pd[2*
i + 1] = (pd[
i] & 15) * 0x11;
1174 pd[2*
i + 0] = (pd[
i] >> 4) * 0x11;
1185 uint32_t sequence_number;
1186 int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
1201 sequence_number = bytestream2_get_be32(gb);
1202 cur_w = bytestream2_get_be32(gb);
1203 cur_h = bytestream2_get_be32(gb);
1204 x_offset = bytestream2_get_be32(gb);
1205 y_offset = bytestream2_get_be32(gb);
1207 dispose_op = bytestream2_get_byte(gb);
1208 blend_op = bytestream2_get_byte(gb);
1210 if (sequence_number == 0 &&
1211 (cur_w !=
s->width ||
1212 cur_h !=
s->height ||
1215 cur_w <= 0 || cur_h <= 0 ||
1216 x_offset < 0 || y_offset < 0 ||
1217 cur_w >
s->width - x_offset|| cur_h >
s->height - y_offset)
1225 if ((sequence_number == 0 || !
s->last_picture.f) &&
1245 s->x_offset = x_offset;
1246 s->y_offset = y_offset;
1247 s->dispose_op = dispose_op;
1248 s->blend_op = blend_op;
1256 uint8_t *pd = p->
data[0];
1257 uint8_t *pd_last =
s->last_picture.f->data[0];
1260 ls =
FFMIN(ls,
s->width *
s->bpp);
1263 for (j = 0; j <
s->height; j++) {
1264 for (
i = 0;
i < ls;
i++)
1265 pd[
i] += pd_last[
i];
1267 pd_last +=
s->last_picture.f->linesize[0];
1273 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1279 ptrdiff_t dst_stride = p->
linesize[0];
1280 const uint8_t *
src =
s->last_picture.f->data[0];
1281 ptrdiff_t src_stride =
s->last_picture.f->linesize[0];
1297 for (y = 0; y <
s->y_offset; y++)
1298 memcpy(
dst + y * dst_stride,
src + y * src_stride, p->
width * bpp);
1299 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; y++) {
1300 memcpy(
dst + y * dst_stride,
src + y * src_stride,
s->x_offset * bpp);
1301 memcpy(
dst + y * dst_stride + (
s->x_offset +
s->cur_w) * bpp,
1302 src + y * src_stride + (
s->x_offset +
s->cur_w) * bpp,
1303 (p->
width -
s->cur_w -
s->x_offset) * bpp);
1305 for (y =
s->y_offset +
s->cur_h; y < p->
height; y++)
1306 memcpy(
dst + y * dst_stride,
src + y * src_stride, p->
width * bpp);
1310 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; ++y) {
1311 uint8_t *foreground =
dst + dst_stride * y + bpp *
s->x_offset;
1312 const uint8_t *background =
src + src_stride * y + bpp *
s->x_offset;
1313 for (x =
s->x_offset; x < s->x_offset +
s->cur_w; ++x, foreground += bpp, background += bpp) {
1315 uint8_t foreground_alpha, background_alpha, output_alpha;
1324 foreground_alpha = foreground[3];
1325 background_alpha = background[3];
1329 foreground_alpha = foreground[1];
1330 background_alpha = background[1];
1334 if (foreground_alpha == 255)
1337 if (foreground_alpha == 0) {
1338 memcpy(foreground, background, bpp);
1342 output_alpha = foreground_alpha +
FAST_DIV255((255 - foreground_alpha) * background_alpha);
1346 for (
b = 0;
b < bpp - 1; ++
b) {
1347 if (output_alpha == 0) {
1349 }
else if (background_alpha == 255) {
1350 output[
b] =
FAST_DIV255(foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background[
b]);
1352 output[
b] = (255 * foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background_alpha * background[
b]) / (255 * output_alpha);
1356 memcpy(foreground,
output, bpp);
1369 const ptrdiff_t dst_stride =
s->picture.f->linesize[0];
1370 uint8_t *
dst =
s->picture.f->data[0] +
s->y_offset * dst_stride + bpp *
s->x_offset;
1374 for (
size_t y = 0; y <
s->cur_h; y++) {
1375 memset(
dst, 0, bpp *
s->cur_w);
1384 uint32_t
tag, length;
1385 int decode_next_dat = 0;
1413 length = bytestream2_get_be32(&
s->gb);
1420 uint32_t crc_sig =
AV_RB32(
s->gb.buffer + length + 4);
1421 uint32_t crc_cal = ~
av_crc(crc_tab, UINT32_MAX,
s->gb.buffer, length + 4);
1422 if (crc_sig ^ crc_cal) {
1434 tag = bytestream2_get_le32(&
s->gb);
1445 case MKTAG(
'I',
'H',
'D',
'R'):
1446 case MKTAG(
'p',
'H',
'Y',
's'):
1447 case MKTAG(
't',
'E',
'X',
't'):
1448 case MKTAG(
'I',
'D',
'A',
'T'):
1449 case MKTAG(
't',
'R',
'N',
'S'):
1450 case MKTAG(
's',
'R',
'G',
'B'):
1451 case MKTAG(
'c',
'I',
'C',
'P'):
1452 case MKTAG(
'c',
'H',
'R',
'M'):
1453 case MKTAG(
'g',
'A',
'M',
'A'):
1461 case MKTAG(
'I',
'H',
'D',
'R'):
1465 case MKTAG(
'p',
'H',
'Y',
's'):
1469 case MKTAG(
'f',
'c',
'T',
'L'):
1474 decode_next_dat = 1;
1476 case MKTAG(
'f',
'd',
'A',
'T'):
1483 bytestream2_get_be32(&gb_chunk);
1485 case MKTAG(
'I',
'D',
'A',
'T'):
1491 case MKTAG(
'P',
'L',
'T',
'E'):
1494 case MKTAG(
't',
'R',
'N',
'S'):
1497 case MKTAG(
't',
'E',
'X',
't'):
1501 case MKTAG(
'z',
'T',
'X',
't'):
1505 case MKTAG(
's',
'T',
'E',
'R'): {
1506 int mode = bytestream2_get_byte(&gb_chunk);
1509 s->stereo_mode =
mode;
1512 "Unknown value in sTER chunk (%d)\n",
mode);
1516 case MKTAG(
'c',
'I',
'C',
'P'):
1517 s->cicp_primaries = bytestream2_get_byte(&gb_chunk);
1518 s->cicp_trc = bytestream2_get_byte(&gb_chunk);
1519 if (bytestream2_get_byte(&gb_chunk) != 0)
1521 s->cicp_range = bytestream2_get_byte(&gb_chunk);
1522 if (
s->cicp_range != 0 &&
s->cicp_range != 1)
1526 case MKTAG(
's',
'R',
'G',
'B'):
1531 case MKTAG(
'i',
'C',
'C',
'P'): {
1536 case MKTAG(
'c',
'H',
'R',
'M'): {
1539 s->white_point[0] = bytestream2_get_be32(&gb_chunk);
1540 s->white_point[1] = bytestream2_get_be32(&gb_chunk);
1543 for (
i = 0;
i < 3;
i++) {
1544 s->display_primaries[
i][0] = bytestream2_get_be32(&gb_chunk);
1545 s->display_primaries[
i][1] = bytestream2_get_be32(&gb_chunk);
1550 case MKTAG(
's',
'B',
'I',
'T'):
1554 case MKTAG(
'g',
'A',
'M',
'A'): {
1557 s->gamma = bytestream2_get_be32(&gb_chunk);
1569 case MKTAG(
'c',
'L',
'L',
'i'):
1570 case MKTAG(
'c',
'L',
'L',
'I'):
1576 s->clli_max = bytestream2_get_be32u(&gb_chunk);
1577 s->clli_avg = bytestream2_get_be32u(&gb_chunk);
1579 case MKTAG(
'm',
'D',
'C',
'v'):
1580 case MKTAG(
'm',
'D',
'C',
'V'):
1586 for (
int i = 0;
i < 3;
i++) {
1587 s->mdcv_primaries[
i][0] = bytestream2_get_be16u(&gb_chunk);
1588 s->mdcv_primaries[
i][1] = bytestream2_get_be16u(&gb_chunk);
1590 s->mdcv_white_point[0] = bytestream2_get_be16u(&gb_chunk);
1591 s->mdcv_white_point[1] = bytestream2_get_be16u(&gb_chunk);
1592 s->mdcv_max_lum = bytestream2_get_be32u(&gb_chunk);
1593 s->mdcv_min_lum = bytestream2_get_be32u(&gb_chunk);
1595 case MKTAG(
'I',
'E',
'N',
'D'):
1620 if (
s->bits_per_pixel <= 4)
1624 for (
int y = 0; y <
s->height; y++) {
1627 for (
int x =
s->width - 1; x >= 0; x--) {
1628 const uint8_t idx = row[x];
1630 row[4*x+2] =
s->palette[idx] & 0xFF;
1631 row[4*x+1] = (
s->palette[idx] >> 8 ) & 0xFF;
1632 row[4*x+0] = (
s->palette[idx] >> 16) & 0xFF;
1633 row[4*x+3] =
s->palette[idx] >> 24;
1640 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
1641 size_t raw_bpp =
s->bpp - byte_depth;
1646 for (y = 0; y <
s->height; ++y) {
1649 if (
s->bpp == 2 && byte_depth == 1) {
1650 uint8_t *
pixel = &row[2 *
s->width - 1];
1651 uint8_t *rowp = &row[1 *
s->width - 1];
1652 int tcolor =
s->transparent_color_be[0];
1653 for (x =
s->width; x > 0; --x) {
1654 *
pixel-- = *rowp == tcolor ? 0 : 0xff;
1657 }
else if (
s->bpp == 4 && byte_depth == 1) {
1658 uint8_t *
pixel = &row[4 *
s->width - 1];
1659 uint8_t *rowp = &row[3 *
s->width - 1];
1660 int tcolor =
AV_RL24(
s->transparent_color_be);
1661 for (x =
s->width; x > 0; --x) {
1669 for (x =
s->width; x > 0; --x) {
1670 uint8_t *
pixel = &row[
s->bpp * (x - 1)];
1671 memmove(
pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1673 if (!memcmp(
pixel,
s->transparent_color_be, raw_bpp)) {
1674 memset(&
pixel[raw_bpp], 0, byte_depth);
1676 memset(&
pixel[raw_bpp], 0xff, byte_depth);
1684 if (
s->last_picture.f) {
1686 &&
s->last_picture.f->width == p->
width
1687 &&
s->last_picture.f->height== p->
height
1688 &&
s->last_picture.f->format== p->
format
1692 else if (CONFIG_APNG_DECODER &&
1712 s->iccp_data_len = 0;
1713 s->iccp_name[0] = 0;
1715 s->stereo_mode = -1;
1728 if (
s->stereo_mode >= 0) {
1747 #if CONFIG_PNG_DECODER
1752 const uint8_t *buf = avpkt->
data;
1753 int buf_size = avpkt->
size;
1762 sig = bytestream2_get_be64(&
s->gb);
1769 s->y =
s->has_trns = 0;
1774 ret = inflateReset(&
s->zstream.zstream);
1805 #if CONFIG_APNG_DECODER
1818 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1826 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1910 s->last_row_size = 0;
1912 s->tmp_row_size = 0;
1921 #if CONFIG_APNG_DECODER
1939 #if CONFIG_PNG_DECODER
static void error(const char *err)
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_BPRINT_SIZE_UNLIMITED
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void clear_frame_metadata(PNGDecContext *s)
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
enum AVColorRange cicp_range
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Struct that contains both white point location and primaries location, providing the complete descrip...
unsigned int last_row_size
#define APNG_FCTL_CHUNK_SIZE
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
int ff_png_get_nb_channels(int color_type)
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int decode_text_chunk(PNGDecContext *s, GetByteContext *gb, int compressed)
uint16_t mdcv_white_point[2]
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
This function sets up the ProgressFrame, i.e.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
unsigned int tmp_row_size
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
@ APNG_DISPOSE_OP_BACKGROUND
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define PNG_FILTER_TYPE_LOCO
AVCodec p
The public AVCodec.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
static av_cold int png_dec_end(AVCodecContext *avctx)
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
enum PNGImageState pic_state
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb, AVFrame *p)
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
#define PNG_COLOR_TYPE_RGB_ALPHA
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
#define YUV2RGB(NAME, TYPE)
#define FF_CODEC_DECODE_CB(func)
@ AVCOL_PRI_NB
Not part of ABI.
const FFCodec ff_apng_decoder
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define PNG_COLOR_TYPE_RGB
const FFCodec ff_png_decoder
#define AV_EF_IGNORE_ERR
ignore errors and continue
#define av_assert0(cond)
assert() equivalent, that is always enabled.
enum PNGHeaderState hdr_state
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
static int percent_missing(PNGDecContext *s)
enum AVColorPrimaries av_csp_primaries_id_from_desc(const AVColorPrimariesDesc *prm)
Detects which enum AVColorPrimaries constant corresponds to the given complete gamut description.
#define CODEC_LONG_NAME(str)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
int flags
Additional information about the frame packing.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
enum AVColorPrimaries cicp_primaries
static int png_decode_idat(PNGDecContext *s, GetByteContext *gb, uint8_t *dst, ptrdiff_t dst_stride)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int png_dec_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
@ APNG_DISPOSE_OP_PREVIOUS
#define PNG_COLOR_TYPE_GRAY
#define UPDATE_THREAD_CONTEXT(func)
static int decode_sbit_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static void apng_reset_background(PNGDecContext *s, const AVFrame *p)
@ AVCOL_RANGE_UNSPECIFIED
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_png_pass_ymask[NB_PASSES]
static int output_frame(PNGDecContext *s, AVFrame *f)
static const uint8_t png_pass_mask[NB_PASSES]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
#define PNG_FILTER_VALUE_NONE
enum AVPictureType pict_type
Picture type of the frame.
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
#define PNG_FILTER_VALUE_AVG
static AVRational av_make_q(int num, int den)
Create an AVRational.
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define PNG_FILTER_VALUE_PAETH
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define PNG_FILTER_VALUE_UP
#define FF_COMPLIANCE_NORMAL
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static int populate_avctx_color_fields(AVCodecContext *avctx, AVFrame *frame)
#define FF_THREAD_FRAME
Decode more than one frame at once.
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
AVDictionary * frame_metadata
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
uint32_t display_primaries[3][2]
uint16_t mdcv_primaries[3][2]
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static void png_handle_row(PNGDecContext *s, uint8_t *dst, ptrdiff_t dst_stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
const uint8_t * buffer_end
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
enum AVStereo3DType type
How views are packed within the video.
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
static char * iso88591_to_utf8(const char *in, size_t size_in)
void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, const AVPacket *avpkt)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define UNROLL_FILTER(op)
ProgressFrame last_picture
uint8_t transparent_color_be[6]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
static const uint8_t png_pass_dsp_mask[NB_PASSES]
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
#define PNG_COLOR_MASK_PALETTE
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb)
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
The ProgressFrame structure.
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
enum AVColorTransferCharacteristic cicp_trc
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVColorRange
Visual content value range.
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end, void *logctx)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define PNG_COLOR_TYPE_PALETTE
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define av_fourcc2str(fourcc)