52 #define MAX_DWT_LEVELS 5
57 #define MAX_REFERENCE_FRAMES 8
59 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
61 #define MAX_BLOCKSIZE 32
66 #define DIRAC_REF_MASK_REF1 1
67 #define DIRAC_REF_MASK_REF2 2
68 #define DIRAC_REF_MASK_GLOBAL 4
74 #define DELAYED_PIC_REF 4
76 #define CALC_PADDING(size, depth) \
77 (((size + (1 << depth) - 1) >> depth) << depth)
79 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
243 { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
244 { { 4, 2, 2, 0}, { 0, 4, 4, 2}, { 0, 5, 5, 3}, { 0, 7, 7, 5} },
245 { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
246 { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
247 { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
248 { { 0, 4, 4, 8}, { 0, 8, 8, 12}, { 0, 13, 13, 17}, { 0, 17, 17, 21} },
249 { { 3, 1, 1, 0}, { 0, 4, 4, 2}, { 0, 6, 6, 5}, { 0, 9, 9, 7} },
253 4, 5, 6, 7, 8, 10, 11, 13,
254 16, 19, 23, 27, 32, 38, 45, 54,
255 64, 76, 91, 108, 128, 152, 181, 215,
256 256, 304, 362, 431, 512, 609, 724, 861,
257 1024, 1218, 1448, 1722, 2048, 2435, 2896, 3444,
258 4096, 4871, 5793, 6889, 8192, 9742, 11585, 13777,
259 16384, 19484, 23170, 27554, 32768, 38968, 46341, 55109,
264 1, 2, 3, 4, 4, 5, 6, 7,
265 8, 10, 12, 14, 16, 19, 23, 27,
266 32, 38, 46, 54, 64, 76, 91, 108,
267 128, 152, 181, 216, 256, 305, 362, 431,
268 512, 609, 724, 861, 1024, 1218, 1448, 1722,
269 2048, 2436, 2897, 3445, 4096, 4871, 5793, 6889,
270 8192, 9742, 11585, 13777, 16384, 19484, 23171, 27555,
275 1, 2, 2, 3, 3, 4, 4, 5,
276 6, 7, 9, 10, 12, 14, 17, 20,
277 24, 29, 34, 41, 48, 57, 68, 81,
278 96, 114, 136, 162, 192, 228, 272, 323,
279 384, 457, 543, 646, 768, 913, 1086, 1292,
280 1536, 1827, 2172, 2583, 3072, 3653, 4344, 5166,
281 6144, 7307, 8689, 10333, 12288, 14613, 17378, 20666,
288 return (
int)((x+1
U)*21845 + 10922) >> 16;
294 int i, remove_idx = -1;
296 for (i = 0; framelist[i]; i++)
297 if (framelist[i]->avframe->display_picture_number == picnum) {
298 remove_pic = framelist[i];
303 for (i = remove_idx; framelist[i]; i++)
304 framelist[i] = framelist[i+1];
312 for (i = 0; i < maxframes; i++)
314 framelist[i] =
frame;
324 int i, w,
h, top_padding;
327 for (i = 0; i < 3; i++) {
397 for (j = 0; j < 3; j++)
398 for (k = 1; k < 4; k++)
405 for (i = 0; i < 3; i++) {
463 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
477 sign_pred = buf[-b->
stride];
481 pred_ctx += !(buf[-1] | buf[-b->
stride] | buf[-1-b->
stride]);
485 pred_ctx += !buf[-b->
stride];
490 coeff = (coeff * qfactor + qoffset + 2) >> 2;
492 coeff = (coeff ^ -sign) + sign;
503 coeff = (coeff * qfactor + qoffset + 2) >> 2;
505 coeff = (coeff ^ -sign) + sign;
516 int left,
int right,
int top,
int bottom,
517 int blockcnt_one,
int is_arith)
519 int x,
y, zero_block;
520 int qoffset, qfactor;
540 if (quant > INT_MAX - b->
quant || b->
quant + quant < 0) {
557 for (y = top; y < bottom; y++) {
558 for (x = left; x < right; x++) {
578 for (x = 1; x < b->
width; x++)
582 for (y = 1; y < b->
height; y++) {
583 buf[0] += buf[-b->
stride];
585 for (x = 1; x < b->
width; x++) {
599 int cb_x, cb_y, left, right, top, bottom;
604 int blockcnt_one = (cb_width + cb_height) == 2;
615 for (cb_y = 0; cb_y < cb_height; cb_y++) {
616 bottom = (b->
height * (cb_y+1LL)) / cb_height;
618 for (cb_x = 0; cb_x < cb_width; cb_x++) {
619 right = (b->
width * (cb_x+1LL)) / cb_width;
620 codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
654 int level, num_bands = 0;
658 for (orientation = !!level; orientation < 4; orientation++) {
660 bands[num_bands++] =
b;
686 int slice_x,
int slice_y,
int bits_end,
705 for (y = top; y < bottom; y++) {
706 for (x = left; x < right; x++) {
749 for (orientation = !!level; orientation < 4; orientation++) {
758 chroma_bits = 8*slice->
bytes - 7 - length_bits - luma_bits;
762 for (orientation = !!level; orientation < 4; orientation++) {
793 for (slice_y = 0; bufsize > 0 && slice_y < s->
lowdelay.
num_y; slice_y++)
794 for (slice_x = 0; bufsize > 0 && slice_x < s->
lowdelay.
num_x; slice_x++) {
798 slices[slice_num].
bytes = bytes;
799 slices[slice_num].
slice_x = slice_x;
800 slices[slice_num].
slice_y = slice_y;
805 if (bufsize/8 >= bytes)
822 int i, w,
h,
level, orientation;
824 for (i = 0; i < 3; i++) {
836 for (orientation = !!level; orientation < 4; orientation++) {
875 static const uint8_t default_blen[] = { 4, 12, 16, 24 };
909 "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
940 for (ref = 0; ref < s->
num_refs; ref++) {
1002 #define CHECKEDREAD(dst, cond, errmsg) \
1003 tmp = svq3_get_ue_golomb(gb); \
1005 av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1006 return AVERROR_INVALIDDATA; \
1070 for (i = 0; i < 4; i++) {
1083 static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1092 return avgsplit[sbsplit[-1] + sbsplit[-
stride] + sbsplit[-stride-1]];
1102 return block[-1].
ref & refmask;
1107 pred = (block[-1].
ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].
ref & refmask);
1108 return (pred >> 1) & refmask;
1115 memset(block->
u.
dc, 0,
sizeof(block->
u.
dc));
1117 if (x && !(block[-1].ref & 3)) {
1118 for (i = 0; i < 3; i++)
1119 block->
u.
dc[i] += block[-1].
u.
dc[i];
1123 if (y && !(block[-stride].ref & 3)) {
1124 for (i = 0; i < 3; i++)
1125 block->
u.
dc[i] += block[-stride].
u.
dc[i];
1129 if (x && y && !(block[-1-stride].ref & 3)) {
1130 for (i = 0; i < 3; i++)
1131 block->
u.
dc[i] += block[-1-stride].
u.
dc[i];
1136 for (i = 0; i < 3; i++)
1137 block->
u.
dc[i] = (block->
u.
dc[i]+1)>>1;
1138 }
else if (n == 3) {
1139 for (i = 0; i < 3; i++)
1147 int refmask = ref+1;
1151 if (x && (block[-1].ref & mask) == refmask)
1152 pred[n++] = block[-1].
u.
mv[ref];
1154 if (y && (block[-stride].ref & mask) == refmask)
1157 if (x && y && (block[-stride-1].ref & mask) == refmask)
1158 pred[n++] = block[-stride-1].
u.
mv[ref];
1162 block->
u.
mv[ref][0] = 0;
1163 block->
u.
mv[ref][1] = 0;
1166 block->
u.
mv[ref][0] = pred[0][0];
1167 block->
u.
mv[ref][1] = pred[0][1];
1170 block->
u.
mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1171 block->
u.
mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1174 block->
u.
mv[ref][0] =
mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1175 block->
u.
mv[ref][1] =
mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1188 int64_t
m = (1<<ep) - (c[0]*(int64_t)x + c[1]*(int64_t)y);
1189 int64_t mx = m * (int64_t)((
A[0][0] * (int64_t)x +
A[0][1]*(int64_t)y) + (1LL<<ez) * b[0]);
1190 int64_t my = m * (int64_t)((
A[1][0] * (int64_t)x +
A[1][1]*(int64_t)y) + (1LL<<ez) * b[1]);
1192 block->
u.
mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1193 block->
u.
mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1211 for (i = 0; i < 3; i++)
1222 if (block->
ref & (i+1)) {
1226 pred_mv(block, stride, x, y, i);
1241 for (x = 1; x <
size; x++)
1244 for (y = 1; y <
size; y++) {
1246 for (x = 0; x <
size; x++)
1273 for (y = 0; y < s->
sbheight; y++) {
1274 for (x = 0; x < s->
sbwidth; x++) {
1285 for (i = 0; i < s->
num_refs; i++) {
1289 for (i = 0; i < 3; i++)
1293 for (x = 0; x < s->
sbwidth; x++) {
1297 for (q = 0; q < blkcnt; q++)
1298 for (p = 0; p < blkcnt; p++) {
1299 int bx = 4 * x + p*step;
1300 int by = 4 * y + q*step;
1312 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1313 (1 + (6*(i) + offset - 1) / (2*offset - 1))
1317 else if (i > blen-1 - 2*offset)
1323 int left,
int right,
int wy)
1326 for (x = 0; left && x < p->
xblen >> 1; x++)
1327 obmc_weight[x] = wy*8;
1328 for (; x < p->
xblen >> right; x++)
1330 for (; x < p->
xblen; x++)
1331 obmc_weight[x] = wy*8;
1337 int left,
int right,
int top,
int bottom)
1340 for (y = 0; top && y < p->
yblen >> 1; y++) {
1344 for (; y < p->
yblen >> bottom; y++) {
1349 for (; y < p->
yblen; y++) {
1361 if (top || bottom || by == 1) {
1396 int x,
int y,
int ref,
int plane)
1400 int motion_x = block->
u.
mv[ref][0];
1401 int motion_y = block->
u.
mv[ref][1];
1402 int mx, my, i, epel, nplanes = 0;
1425 src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->
stride + x;
1429 for (i = 0; i < 4; i++)
1430 src[i] = ref_hpel[i] + y*p->
stride + x;
1454 src[!mx] = src[2 + !!mx];
1456 }
else if (!(my&3)) {
1457 src[0] = src[(my>>1) ];
1458 src[1] = src[(my>>1)+1];
1479 for (i = 0; i < nplanes; i++) {
1487 return (nplanes>>1) + epel;
1491 uint8_t *obmc_weight,
int xblen,
int yblen)
1496 for (y = 0; y < yblen; y++) {
1497 for (x = 0; x < xblen; x += 2) {
1498 dst[x ] += dc * obmc_weight[x ];
1499 dst[x+1] += dc * obmc_weight[x+1];
1507 uint16_t *mctmp,
uint8_t *obmc_weight,
1508 int plane,
int dstx,
int dsty)
1514 switch (block->
ref&3) {
1520 idx =
mc_subpel(s, block, src, dstx, dsty, (block->
ref&3)-1, plane);
1527 idx =
mc_subpel(s, block, src, dstx, dsty, 0, plane);
1529 idx =
mc_subpel(s, block, src, dstx, dsty, 1, plane);
1550 for (x = 1; x < s->
blwidth-1; x++) {
1592 for (i = 1; i < 4; i++) {
1604 ref->
hpel[plane][3], ref->
hpel[plane][0],
1622 int y, i,
comp, dsty;
1627 for (comp = 0; comp < 3; comp++) {
1637 for (comp = 0; comp < 3; comp++) {
1642 for (i = 0; i < 4; i++)
1656 for (y = 0; y < p->
height; y += 16) {
1666 for (i = 0; i < s->
num_refs; i++) {
1675 for (y = 0; y < s->
blheight; y++) {
1678 uint16_t *mctmp = s->
mctmp + y*rowheight;
1691 mc_row(s, blocks, mctmp, comp, dsty);
1710 int chroma_x_shift, chroma_y_shift;
1719 for (i = 0; f->
data[i]; i++) {
1736 unsigned retire, picnum;
1738 int64_t refdist, refnum;
1753 for (i = 0; i < s->
num_refs; i++) {
1755 refdist = INT64_MAX;
1790 if (retire != picnum) {
1853 #define DATA_UNIT_HEADER_SIZE 13
1861 int ret, i, parse_code;
1867 parse_code = buf[4];
1887 }
else if (parse_code ==
pc_eos) {
1895 if (sscanf(buf+14,
"Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
1896 if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
1899 }
else if (parse_code & 0x8) {
1917 tmp = parse_code & 0x03;
1923 s->
is_arith = (parse_code & 0x48) == 0x08;
1924 s->
low_delay = (parse_code & 0x88) == 0x88;
1925 pic->
reference = (parse_code & 0x0C) == 0x0C;
1957 int buf_size = pkt->
size;
1960 unsigned data_unit_size;
1981 if (buf[buf_idx ] ==
'B' && buf[buf_idx+1] ==
'B' &&
1982 buf[buf_idx+2] ==
'C' && buf[buf_idx+3] ==
'D')
1989 data_unit_size =
AV_RB32(buf+buf_idx+5);
1990 if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
1991 if(data_unit_size > buf_size - buf_idx)
1993 "Data unit with size %d is larger than input buffer, discarding\n",
2005 buf_idx += data_unit_size;
2029 if (delayed_frame) {
#define CHECKEDREAD(dst, cond, errmsg)
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static av_cold int dirac_decode_end(AVCodecContext *avctx)
static void codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13...
This structure describes decoded (raw) audio or video data.
dirac_weight_func weight_func
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
struct DiracContext::@44 globalmc[2]
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
static int divide3(int x)
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
static void skip_bits_long(GetBitContext *s, int n)
static av_cold int init(AVCodecContext *avctx)
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode...
dirac_weight_func weight_dirac_pixels_tab[3]
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
#define DECLARE_ALIGNED(n, t, v)
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const int qoffset_inter_tab[MAX_QUANT+1]
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
static int alloc_buffers(DiracContext *s, int stride)
dirac_source_params source
static void coeff_unpack_arith(DiracArith *c, int qfactor, int qoffset, SubBand *b, IDWTELEM *buf, int x, int y)
static void lowdelay_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
static void dirac_decode_flush(AVCodecContext *avctx)
const uint8_t * coeff_data
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
#define DIRAC_REF_MASK_REF2
unsigned weight_log2denom
void(* put_signed_rect_clamped)(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
DiracFrame * delay_frames[MAX_DELAY+1]
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void(* add_rect_clamped)(uint8_t *dst, const uint16_t *src, int stride, const int16_t *idwt, int idwt_stride, int width, int height)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Interface to Dirac Decoder/Encoder.
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
static av_cold int dirac_decode_init(AVCodecContext *avctx)
uint8_t quant[MAX_DWT_LEVELS][4]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static int dirac_get_se_golomb(GetBitContext *gb)
static void free_sequence_buffers(DiracContext *s)
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
struct DiracContext::@42 codeblock[MAX_DWT_LEVELS+1]
static const uint8_t epel_weights[4][4][4]
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
static const int qoffset_intra_tab[MAX_QUANT+1]
void(* avg_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
static int get_bits_left(GetBitContext *gb)
av_cold void ff_diracdsp_init(DiracDSPContext *c)
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
void(* dirac_hpel_filter)(uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, const uint8_t *src, int stride, int width, int height)
static const uint16_t mask[17]
#define DIRAC_REF_MASK_GLOBAL
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
const char * name
Name of the codec implementation.
DiracFrame * current_picture
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band ...
static const uint8_t offset[127][2]
Libavcodec external API header.
int avpriv_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source)
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
static char * split(char *message, char delim)
static void init_planes(DiracContext *s)
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
enum AVPictureType pict_type
Picture type of the frame.
int display_picture_number
picture number in display order
#define CALC_PADDING(size, depth)
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
void ff_spatial_idwt_slice2(DWTContext *d, int y)
int width
picture width / height.
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data...
MpegvideoEncDSPContext mpvencdsp
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
uint8_t * edge_emu_buffer_base
static void intra_dc_prediction(SubBand *b)
Dirac Specification -> 13.3 intra_dc_prediction(band)
static void decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
static const float pred[4]
void(* add_dirac_obmc[3])(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
SubBand band[MAX_DWT_LEVELS][4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const int8_t mv[256][2]
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
uint8_t * edge_emu_buffer[4]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static const uint8_t default_qmat[][4][4]
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
main external API structure.
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
DiracFrame all_frames[MAX_FRAMES]
Arithmetic decoder for Dirac.
dirac_biweight_func biweight_dirac_pixels_tab[3]
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
static int dirac_get_arith_bit(DiracArith *c, int ctx)
rational number numerator/denominator
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
dirac_biweight_func biweight_func
int ff_spatial_idwt_init2(DWTContext *d, IDWTELEM *buffer, int width, int height, int stride, enum dwt_type type, int decomposition_count, IDWTELEM *temp)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int decode_subband_arith(AVCodecContext *avctx, void *b)
static int weight(int i, int blen, int offset)
static const int qscale_tab[MAX_QUANT+1]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
GLint GLenum GLboolean GLsizei stride
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
common internal api header.
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Core video DSP helper functions.
void(* put_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
dirac_pixels_tab[width][subpel] width is 2 for 32, 1 for 16, 0 for 8 subpel is 0 for fpel and hpel (o...
dirac_parse_code
Dirac Specification -> Parse code values.
static int alloc_sequence_buffers(DiracContext *s)
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
int key_frame
1 -> keyframe, 0-> not
static const double coeff[2][5]
static const uint8_t * align_get_bits(GetBitContext *s)
struct DiracContext::@43 lowdelay
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static void * av_mallocz_array(size_t nmemb, size_t size)
static void comp(unsigned char *dst, int dst_stride, unsigned char *src, int src_stride, int add)
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
#define av_malloc_array(a, b)
uint8_t * hpel_base[3][4]
#define FFSWAP(type, a, b)
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]