30 #ifndef AVCODEC_DSPUTIL_H
31 #define AVCODEC_DSPUTIL_H
55 #define H264_IDCT(depth) \
56 void ff_h264_idct8_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
57 void ff_h264_idct_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
58 void ff_h264_idct8_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
59 void ff_h264_idct_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
60 void ff_h264_idct_add16_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
61 void ff_h264_idct_add16intra_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
62 void ff_h264_idct8_add4_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
63 void ff_h264_idct_add8_422_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
64 void ff_h264_idct_add8_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
65 void ff_h264_luma_dc_dequant_idct_ ## depth ## _c(DCTELEM *output, DCTELEM *input, int qmul);\
66 void ff_h264_chroma422_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);\
67 void ff_h264_chroma_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);
83 #define MAX_NEG_CROP 1024
89 #define PUTAVG_PIXELS(depth)\
90 void ff_put_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
91 void ff_avg_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
92 void ff_put_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
93 void ff_avg_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);
99 #define ff_put_pixels8x8_c ff_put_pixels8x8_8_c
100 #define ff_avg_pixels8x8_c ff_avg_pixels8x8_8_c
101 #define ff_put_pixels16x16_c ff_put_pixels16x16_8_c
102 #define ff_avg_pixels16x16_c ff_avg_pixels16x16_8_c
116 int dxx,
int dxy,
int dyx,
int dyy,
int shift,
int r,
int width,
int height);
149 #define DEF_OLD_QPEL(name)\
150 void ff_put_ ## name (uint8_t *dst, uint8_t *src, int stride);\
151 void ff_put_no_rnd_ ## name (uint8_t *dst, uint8_t *src, int stride);\
152 void ff_avg_ ## name (uint8_t *dst, uint8_t *src, int stride);
167 #define CALL_2X_PIXELS(a, b, n)\
168 static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
169 b(block , pixels , line_size, h);\
170 b(block+n, pixels+n, line_size, h);\
189 int idct_permutation_type);
217 int dxx,
int dxy,
int dyx,
int dyy,
int shift,
int r,
int width,
int height);
382 const float *src1,
int len);
418 #define FF_NO_IDCT_PERM 1
419 #define FF_LIBMPEG2_IDCT_PERM 2
420 #define FF_SIMPLE_IDCT_PERM 3
421 #define FF_TRANSPOSE_IDCT_PERM 4
422 #define FF_PARTTRANS_IDCT_PERM 5
423 #define FF_SSE2_IDCT_PERM 6
427 #define BASIS_SHIFT 16
428 #define RECON_SHIFT 6
431 #define EDGE_WIDTH 16
433 #define EDGE_BOTTOM 2
462 const int16_t *window,
unsigned int len);
507 #define BYTE_VEC32(c) ((c)*0x01010101UL)
508 #define BYTE_VEC64(c) ((c)*0x0001000100010001UL)
512 return (a | b) - (((a ^
b) & ~
BYTE_VEC32(0x01)) >> 1);
517 return (a & b) + (((a ^
b) & ~
BYTE_VEC32(0x01)) >> 1);
522 return (a | b) - (((a ^
b) & ~
BYTE_VEC64(0x01)) >> 1);
527 return (a & b) + (((a ^
b) & ~
BYTE_VEC64(0x01)) >> 1);
536 return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
538 return (4*lambda)>>(FF_LAMBDA_SHIFT);
540 return (2*lambda)>>(FF_LAMBDA_SHIFT);
543 return (2*lambda)>>FF_LAMBDA_SHIFT;
548 return lambda2>>FF_LAMBDA_SHIFT;
564 #if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX
565 # define STRIDE_ALIGN 16
567 # define STRIDE_ALIGN 8
574 #define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
575 uint8_t la_##v[sizeof(t s o) + (a)]; \
576 t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
578 #define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
579 DECLARE_ALIGNED(a, t, la_##v) s o; \
582 #define LOCAL_ALIGNED(a, t, v, ...) E(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
584 #if HAVE_LOCAL_ALIGNED_8
585 # define LOCAL_ALIGNED_8(t, v, ...) E(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
587 # define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
590 #if HAVE_LOCAL_ALIGNED_16
591 # define LOCAL_ALIGNED_16(t, v, ...) E(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
593 # define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
596 #define WRAPPER8_16_SQ(name8, name16)\
597 static int name16(void *s, uint8_t *dst, uint8_t *src, int stride, int h){\
599 score +=name8(s, dst , src , stride, 8);\
600 score +=name8(s, dst+8 , src+8 , stride, 8);\
604 score +=name8(s, dst , src , stride, 8);\
605 score +=name8(s, dst+8 , src+8 , stride, 8);\