dsputil_mmx.c
Go to the documentation of this file.
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavcodec/dsputil.h"
28 #include "libavcodec/h264dsp.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/simple_idct.h"
31 #include "dsputil_mmx.h"
32 #include "idct_xvid.h"
33 
34 //#undef NDEBUG
35 //#include <assert.h>
36 
37 /* pixel operations */
38 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
39 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
40 
41 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
42  { 0x8000000080000000ULL, 0x8000000080000000ULL };
43 
44 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
48 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
51 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
52 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
55 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
56 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
59 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
61 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
62 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
63 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
64 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
65 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
66 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
67 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
68 
69 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
70 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
71 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
73 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
74 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
75 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
76 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
77 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
78 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
79 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
80 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
81 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
82 
83 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
84 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
85 
86 #if HAVE_INLINE_ASM
87 
88 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
89 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
90 
91 #define MOVQ_BFE(regd) \
92  __asm__ volatile ( \
93  "pcmpeqd %%"#regd", %%"#regd" \n\t" \
94  "paddb %%"#regd", %%"#regd" \n\t" ::)
95 
96 #ifndef PIC
97 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
98 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
99 #else
100 // for shared library it's better to use this way for accessing constants
101 // pcmpeqd -> -1
102 #define MOVQ_BONE(regd) \
103  __asm__ volatile ( \
104  "pcmpeqd %%"#regd", %%"#regd" \n\t" \
105  "psrlw $15, %%"#regd" \n\t" \
106  "packuswb %%"#regd", %%"#regd" \n\t" ::)
107 
108 #define MOVQ_WTWO(regd) \
109  __asm__ volatile ( \
110  "pcmpeqd %%"#regd", %%"#regd" \n\t" \
111  "psrlw $15, %%"#regd" \n\t" \
112  "psllw $1, %%"#regd" \n\t"::)
113 
114 #endif
115 
116 // using regr as temporary and for the output result
117 // first argument is unmodifed and second is trashed
118 // regfe is supposed to contain 0xfefefefefefefefe
119 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
120  "movq "#rega", "#regr" \n\t" \
121  "pand "#regb", "#regr" \n\t" \
122  "pxor "#rega", "#regb" \n\t" \
123  "pand "#regfe", "#regb" \n\t" \
124  "psrlq $1, "#regb" \n\t" \
125  "paddb "#regb", "#regr" \n\t"
126 
127 #define PAVGB_MMX(rega, regb, regr, regfe) \
128  "movq "#rega", "#regr" \n\t" \
129  "por "#regb", "#regr" \n\t" \
130  "pxor "#rega", "#regb" \n\t" \
131  "pand "#regfe", "#regb" \n\t" \
132  "psrlq $1, "#regb" \n\t" \
133  "psubb "#regb", "#regr" \n\t"
134 
135 // mm6 is supposed to contain 0xfefefefefefefefe
136 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
137  "movq "#rega", "#regr" \n\t" \
138  "movq "#regc", "#regp" \n\t" \
139  "pand "#regb", "#regr" \n\t" \
140  "pand "#regd", "#regp" \n\t" \
141  "pxor "#rega", "#regb" \n\t" \
142  "pxor "#regc", "#regd" \n\t" \
143  "pand %%mm6, "#regb" \n\t" \
144  "pand %%mm6, "#regd" \n\t" \
145  "psrlq $1, "#regb" \n\t" \
146  "psrlq $1, "#regd" \n\t" \
147  "paddb "#regb", "#regr" \n\t" \
148  "paddb "#regd", "#regp" \n\t"
149 
150 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
151  "movq "#rega", "#regr" \n\t" \
152  "movq "#regc", "#regp" \n\t" \
153  "por "#regb", "#regr" \n\t" \
154  "por "#regd", "#regp" \n\t" \
155  "pxor "#rega", "#regb" \n\t" \
156  "pxor "#regc", "#regd" \n\t" \
157  "pand %%mm6, "#regb" \n\t" \
158  "pand %%mm6, "#regd" \n\t" \
159  "psrlq $1, "#regd" \n\t" \
160  "psrlq $1, "#regb" \n\t" \
161  "psubb "#regb", "#regr" \n\t" \
162  "psubb "#regd", "#regp" \n\t"
163 
164 /***********************************/
165 /* MMX no rounding */
166 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
167 #define SET_RND MOVQ_WONE
168 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
169 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
170 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
171 
172 #include "dsputil_rnd_template.c"
173 
174 #undef DEF
175 #undef SET_RND
176 #undef PAVGBP
177 #undef PAVGB
178 /***********************************/
179 /* MMX rounding */
180 
181 #define DEF(x, y) x ## _ ## y ## _mmx
182 #define SET_RND MOVQ_WTWO
183 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
184 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
185 
186 #include "dsputil_rnd_template.c"
187 
188 #undef DEF
189 #undef SET_RND
190 #undef PAVGBP
191 #undef PAVGB
192 #undef OP_AVG
193 
194 /***********************************/
195 /* 3Dnow specific */
196 
197 #define DEF(x) x ## _3dnow
198 #define PAVGB "pavgusb"
199 #define OP_AVG PAVGB
200 #define SKIP_FOR_3DNOW
201 
202 #include "dsputil_avg_template.c"
203 
204 #undef DEF
205 #undef PAVGB
206 #undef OP_AVG
207 #undef SKIP_FOR_3DNOW
208 
209 /***********************************/
210 /* MMXEXT specific */
211 
212 #define DEF(x) x ## _mmxext
213 
214 /* Introduced only in MMXEXT set */
215 #define PAVGB "pavgb"
216 #define OP_AVG PAVGB
217 
218 #include "dsputil_avg_template.c"
219 
220 #undef DEF
221 #undef PAVGB
222 #undef OP_AVG
223 
224 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
225 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
226 #define put_pixels16_mmxext put_pixels16_mmx
227 #define put_pixels8_mmxext put_pixels8_mmx
228 #define put_pixels4_mmxext put_pixels4_mmx
229 #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx
230 #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx
231 
232 /***********************************/
233 /* standard MMX */
234 
235 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
236  int line_size)
237 {
238  const DCTELEM *p;
239  uint8_t *pix;
240 
241  /* read the pixels */
242  p = block;
243  pix = pixels;
244  /* unrolled loop */
245  __asm__ volatile (
246  "movq (%3), %%mm0 \n\t"
247  "movq 8(%3), %%mm1 \n\t"
248  "movq 16(%3), %%mm2 \n\t"
249  "movq 24(%3), %%mm3 \n\t"
250  "movq 32(%3), %%mm4 \n\t"
251  "movq 40(%3), %%mm5 \n\t"
252  "movq 48(%3), %%mm6 \n\t"
253  "movq 56(%3), %%mm7 \n\t"
254  "packuswb %%mm1, %%mm0 \n\t"
255  "packuswb %%mm3, %%mm2 \n\t"
256  "packuswb %%mm5, %%mm4 \n\t"
257  "packuswb %%mm7, %%mm6 \n\t"
258  "movq %%mm0, (%0) \n\t"
259  "movq %%mm2, (%0, %1) \n\t"
260  "movq %%mm4, (%0, %1, 2) \n\t"
261  "movq %%mm6, (%0, %2) \n\t"
262  :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
263  "r"(p)
264  : "memory");
265  pix += line_size * 4;
266  p += 32;
267 
268  // if here would be an exact copy of the code above
269  // compiler would generate some very strange code
270  // thus using "r"
271  __asm__ volatile (
272  "movq (%3), %%mm0 \n\t"
273  "movq 8(%3), %%mm1 \n\t"
274  "movq 16(%3), %%mm2 \n\t"
275  "movq 24(%3), %%mm3 \n\t"
276  "movq 32(%3), %%mm4 \n\t"
277  "movq 40(%3), %%mm5 \n\t"
278  "movq 48(%3), %%mm6 \n\t"
279  "movq 56(%3), %%mm7 \n\t"
280  "packuswb %%mm1, %%mm0 \n\t"
281  "packuswb %%mm3, %%mm2 \n\t"
282  "packuswb %%mm5, %%mm4 \n\t"
283  "packuswb %%mm7, %%mm6 \n\t"
284  "movq %%mm0, (%0) \n\t"
285  "movq %%mm2, (%0, %1) \n\t"
286  "movq %%mm4, (%0, %1, 2) \n\t"
287  "movq %%mm6, (%0, %2) \n\t"
288  :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
289  : "memory");
290 }
291 
292 #define put_signed_pixels_clamped_mmx_half(off) \
293  "movq "#off"(%2), %%mm1 \n\t" \
294  "movq 16 + "#off"(%2), %%mm2 \n\t" \
295  "movq 32 + "#off"(%2), %%mm3 \n\t" \
296  "movq 48 + "#off"(%2), %%mm4 \n\t" \
297  "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
298  "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
299  "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
300  "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
301  "paddb %%mm0, %%mm1 \n\t" \
302  "paddb %%mm0, %%mm2 \n\t" \
303  "paddb %%mm0, %%mm3 \n\t" \
304  "paddb %%mm0, %%mm4 \n\t" \
305  "movq %%mm1, (%0) \n\t" \
306  "movq %%mm2, (%0, %3) \n\t" \
307  "movq %%mm3, (%0, %3, 2) \n\t" \
308  "movq %%mm4, (%0, %1) \n\t"
309 
310 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
311  int line_size)
312 {
313  x86_reg line_skip = line_size;
314  x86_reg line_skip3;
315 
316  __asm__ volatile (
317  "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
318  "lea (%3, %3, 2), %1 \n\t"
319  put_signed_pixels_clamped_mmx_half(0)
320  "lea (%0, %3, 4), %0 \n\t"
321  put_signed_pixels_clamped_mmx_half(64)
322  : "+&r"(pixels), "=&r"(line_skip3)
323  : "r"(block), "r"(line_skip)
324  : "memory");
325 }
326 
327 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
328  int line_size)
329 {
330  const DCTELEM *p;
331  uint8_t *pix;
332  int i;
333 
334  /* read the pixels */
335  p = block;
336  pix = pixels;
337  MOVQ_ZERO(mm7);
338  i = 4;
339  do {
340  __asm__ volatile (
341  "movq (%2), %%mm0 \n\t"
342  "movq 8(%2), %%mm1 \n\t"
343  "movq 16(%2), %%mm2 \n\t"
344  "movq 24(%2), %%mm3 \n\t"
345  "movq %0, %%mm4 \n\t"
346  "movq %1, %%mm6 \n\t"
347  "movq %%mm4, %%mm5 \n\t"
348  "punpcklbw %%mm7, %%mm4 \n\t"
349  "punpckhbw %%mm7, %%mm5 \n\t"
350  "paddsw %%mm4, %%mm0 \n\t"
351  "paddsw %%mm5, %%mm1 \n\t"
352  "movq %%mm6, %%mm5 \n\t"
353  "punpcklbw %%mm7, %%mm6 \n\t"
354  "punpckhbw %%mm7, %%mm5 \n\t"
355  "paddsw %%mm6, %%mm2 \n\t"
356  "paddsw %%mm5, %%mm3 \n\t"
357  "packuswb %%mm1, %%mm0 \n\t"
358  "packuswb %%mm3, %%mm2 \n\t"
359  "movq %%mm0, %0 \n\t"
360  "movq %%mm2, %1 \n\t"
361  : "+m"(*pix), "+m"(*(pix + line_size))
362  : "r"(p)
363  : "memory");
364  pix += line_size * 2;
365  p += 16;
366  } while (--i);
367 }
368 
369 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
370  int line_size, int h)
371 {
372  __asm__ volatile (
373  "lea (%3, %3), %%"REG_a" \n\t"
374  ".p2align 3 \n\t"
375  "1: \n\t"
376  "movq (%1 ), %%mm0 \n\t"
377  "movq (%1, %3), %%mm1 \n\t"
378  "movq %%mm0, (%2) \n\t"
379  "movq %%mm1, (%2, %3) \n\t"
380  "add %%"REG_a", %1 \n\t"
381  "add %%"REG_a", %2 \n\t"
382  "movq (%1 ), %%mm0 \n\t"
383  "movq (%1, %3), %%mm1 \n\t"
384  "movq %%mm0, (%2) \n\t"
385  "movq %%mm1, (%2, %3) \n\t"
386  "add %%"REG_a", %1 \n\t"
387  "add %%"REG_a", %2 \n\t"
388  "subl $4, %0 \n\t"
389  "jnz 1b \n\t"
390  : "+g"(h), "+r"(pixels), "+r"(block)
391  : "r"((x86_reg)line_size)
392  : "%"REG_a, "memory"
393  );
394 }
395 
396 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
397  int line_size, int h)
398 {
399  __asm__ volatile (
400  "lea (%3, %3), %%"REG_a" \n\t"
401  ".p2align 3 \n\t"
402  "1: \n\t"
403  "movq (%1 ), %%mm0 \n\t"
404  "movq 8(%1 ), %%mm4 \n\t"
405  "movq (%1, %3), %%mm1 \n\t"
406  "movq 8(%1, %3), %%mm5 \n\t"
407  "movq %%mm0, (%2) \n\t"
408  "movq %%mm4, 8(%2) \n\t"
409  "movq %%mm1, (%2, %3) \n\t"
410  "movq %%mm5, 8(%2, %3) \n\t"
411  "add %%"REG_a", %1 \n\t"
412  "add %%"REG_a", %2 \n\t"
413  "movq (%1 ), %%mm0 \n\t"
414  "movq 8(%1 ), %%mm4 \n\t"
415  "movq (%1, %3), %%mm1 \n\t"
416  "movq 8(%1, %3), %%mm5 \n\t"
417  "movq %%mm0, (%2) \n\t"
418  "movq %%mm4, 8(%2) \n\t"
419  "movq %%mm1, (%2, %3) \n\t"
420  "movq %%mm5, 8(%2, %3) \n\t"
421  "add %%"REG_a", %1 \n\t"
422  "add %%"REG_a", %2 \n\t"
423  "subl $4, %0 \n\t"
424  "jnz 1b \n\t"
425  : "+g"(h), "+r"(pixels), "+r"(block)
426  : "r"((x86_reg)line_size)
427  : "%"REG_a, "memory"
428  );
429 }
430 
431 #define CLEAR_BLOCKS(name, n) \
432 static void name(DCTELEM *blocks) \
433 { \
434  __asm__ volatile ( \
435  "pxor %%mm7, %%mm7 \n\t" \
436  "mov %1, %%"REG_a" \n\t" \
437  "1: \n\t" \
438  "movq %%mm7, (%0, %%"REG_a") \n\t" \
439  "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
440  "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
441  "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
442  "add $32, %%"REG_a" \n\t" \
443  "js 1b \n\t" \
444  :: "r"(((uint8_t *)blocks) + 128 * n), \
445  "i"(-128 * n) \
446  : "%"REG_a \
447  ); \
448 }
449 CLEAR_BLOCKS(clear_blocks_mmx, 6)
450 CLEAR_BLOCKS(clear_block_mmx, 1)
451 
452 static void clear_block_sse(DCTELEM *block)
453 {
454  __asm__ volatile (
455  "xorps %%xmm0, %%xmm0 \n"
456  "movaps %%xmm0, (%0) \n"
457  "movaps %%xmm0, 16(%0) \n"
458  "movaps %%xmm0, 32(%0) \n"
459  "movaps %%xmm0, 48(%0) \n"
460  "movaps %%xmm0, 64(%0) \n"
461  "movaps %%xmm0, 80(%0) \n"
462  "movaps %%xmm0, 96(%0) \n"
463  "movaps %%xmm0, 112(%0) \n"
464  :: "r"(block)
465  : "memory"
466  );
467 }
468 
469 static void clear_blocks_sse(DCTELEM *blocks)
470 {
471  __asm__ volatile (
472  "xorps %%xmm0, %%xmm0 \n"
473  "mov %1, %%"REG_a" \n"
474  "1: \n"
475  "movaps %%xmm0, (%0, %%"REG_a") \n"
476  "movaps %%xmm0, 16(%0, %%"REG_a") \n"
477  "movaps %%xmm0, 32(%0, %%"REG_a") \n"
478  "movaps %%xmm0, 48(%0, %%"REG_a") \n"
479  "movaps %%xmm0, 64(%0, %%"REG_a") \n"
480  "movaps %%xmm0, 80(%0, %%"REG_a") \n"
481  "movaps %%xmm0, 96(%0, %%"REG_a") \n"
482  "movaps %%xmm0, 112(%0, %%"REG_a") \n"
483  "add $128, %%"REG_a" \n"
484  "js 1b \n"
485  :: "r"(((uint8_t *)blocks) + 128 * 6),
486  "i"(-128 * 6)
487  : "%"REG_a
488  );
489 }
490 
491 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
492 {
493  x86_reg i = 0;
494  __asm__ volatile (
495  "jmp 2f \n\t"
496  "1: \n\t"
497  "movq (%1, %0), %%mm0 \n\t"
498  "movq (%2, %0), %%mm1 \n\t"
499  "paddb %%mm0, %%mm1 \n\t"
500  "movq %%mm1, (%2, %0) \n\t"
501  "movq 8(%1, %0), %%mm0 \n\t"
502  "movq 8(%2, %0), %%mm1 \n\t"
503  "paddb %%mm0, %%mm1 \n\t"
504  "movq %%mm1, 8(%2, %0) \n\t"
505  "add $16, %0 \n\t"
506  "2: \n\t"
507  "cmp %3, %0 \n\t"
508  "js 1b \n\t"
509  : "+r"(i)
510  : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
511  );
512  for ( ; i < w; i++)
513  dst[i + 0] += src[i + 0];
514 }
515 
516 #if HAVE_7REGS
517 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
518  const uint8_t *diff, int w,
519  int *left, int *left_top)
520 {
521  x86_reg w2 = -w;
522  x86_reg x;
523  int l = *left & 0xff;
524  int tl = *left_top & 0xff;
525  int t;
526  __asm__ volatile (
527  "mov %7, %3 \n"
528  "1: \n"
529  "movzbl (%3, %4), %2 \n"
530  "mov %2, %k3 \n"
531  "sub %b1, %b3 \n"
532  "add %b0, %b3 \n"
533  "mov %2, %1 \n"
534  "cmp %0, %2 \n"
535  "cmovg %0, %2 \n"
536  "cmovg %1, %0 \n"
537  "cmp %k3, %0 \n"
538  "cmovg %k3, %0 \n"
539  "mov %7, %3 \n"
540  "cmp %2, %0 \n"
541  "cmovl %2, %0 \n"
542  "add (%6, %4), %b0 \n"
543  "mov %b0, (%5, %4) \n"
544  "inc %4 \n"
545  "jl 1b \n"
546  : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
547  : "r"(dst + w), "r"(diff + w), "rm"(top + w)
548  );
549  *left = l;
550  *left_top = tl;
551 }
552 #endif
553 
554 static inline void transpose4x4(uint8_t *dst, uint8_t *src, x86_reg dst_stride, x86_reg src_stride){
555  __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
556  "movd (%1), %%mm0 \n\t"
557  "add %3, %1 \n\t"
558  "movd (%1), %%mm1 \n\t"
559  "movd (%1,%3,1), %%mm2 \n\t"
560  "movd (%1,%3,2), %%mm3 \n\t"
561  "punpcklbw %%mm1, %%mm0 \n\t"
562  "punpcklbw %%mm3, %%mm2 \n\t"
563  "movq %%mm0, %%mm1 \n\t"
564  "punpcklwd %%mm2, %%mm0 \n\t"
565  "punpckhwd %%mm2, %%mm1 \n\t"
566  "movd %%mm0, (%0) \n\t"
567  "add %2, %0 \n\t"
568  "punpckhdq %%mm0, %%mm0 \n\t"
569  "movd %%mm0, (%0) \n\t"
570  "movd %%mm1, (%0,%2,1) \n\t"
571  "punpckhdq %%mm1, %%mm1 \n\t"
572  "movd %%mm1, (%0,%2,2) \n\t"
573 
574  : "+&r" (dst),
575  "+&r" (src)
576  : "r" (dst_stride),
577  "r" (src_stride)
578  : "memory"
579  );
580 }
581 
582 #define H263_LOOP_FILTER \
583  "pxor %%mm7, %%mm7 \n\t" \
584  "movq %0, %%mm0 \n\t" \
585  "movq %0, %%mm1 \n\t" \
586  "movq %3, %%mm2 \n\t" \
587  "movq %3, %%mm3 \n\t" \
588  "punpcklbw %%mm7, %%mm0 \n\t" \
589  "punpckhbw %%mm7, %%mm1 \n\t" \
590  "punpcklbw %%mm7, %%mm2 \n\t" \
591  "punpckhbw %%mm7, %%mm3 \n\t" \
592  "psubw %%mm2, %%mm0 \n\t" \
593  "psubw %%mm3, %%mm1 \n\t" \
594  "movq %1, %%mm2 \n\t" \
595  "movq %1, %%mm3 \n\t" \
596  "movq %2, %%mm4 \n\t" \
597  "movq %2, %%mm5 \n\t" \
598  "punpcklbw %%mm7, %%mm2 \n\t" \
599  "punpckhbw %%mm7, %%mm3 \n\t" \
600  "punpcklbw %%mm7, %%mm4 \n\t" \
601  "punpckhbw %%mm7, %%mm5 \n\t" \
602  "psubw %%mm2, %%mm4 \n\t" \
603  "psubw %%mm3, %%mm5 \n\t" \
604  "psllw $2, %%mm4 \n\t" \
605  "psllw $2, %%mm5 \n\t" \
606  "paddw %%mm0, %%mm4 \n\t" \
607  "paddw %%mm1, %%mm5 \n\t" \
608  "pxor %%mm6, %%mm6 \n\t" \
609  "pcmpgtw %%mm4, %%mm6 \n\t" \
610  "pcmpgtw %%mm5, %%mm7 \n\t" \
611  "pxor %%mm6, %%mm4 \n\t" \
612  "pxor %%mm7, %%mm5 \n\t" \
613  "psubw %%mm6, %%mm4 \n\t" \
614  "psubw %%mm7, %%mm5 \n\t" \
615  "psrlw $3, %%mm4 \n\t" \
616  "psrlw $3, %%mm5 \n\t" \
617  "packuswb %%mm5, %%mm4 \n\t" \
618  "packsswb %%mm7, %%mm6 \n\t" \
619  "pxor %%mm7, %%mm7 \n\t" \
620  "movd %4, %%mm2 \n\t" \
621  "punpcklbw %%mm2, %%mm2 \n\t" \
622  "punpcklbw %%mm2, %%mm2 \n\t" \
623  "punpcklbw %%mm2, %%mm2 \n\t" \
624  "psubusb %%mm4, %%mm2 \n\t" \
625  "movq %%mm2, %%mm3 \n\t" \
626  "psubusb %%mm4, %%mm3 \n\t" \
627  "psubb %%mm3, %%mm2 \n\t" \
628  "movq %1, %%mm3 \n\t" \
629  "movq %2, %%mm4 \n\t" \
630  "pxor %%mm6, %%mm3 \n\t" \
631  "pxor %%mm6, %%mm4 \n\t" \
632  "paddusb %%mm2, %%mm3 \n\t" \
633  "psubusb %%mm2, %%mm4 \n\t" \
634  "pxor %%mm6, %%mm3 \n\t" \
635  "pxor %%mm6, %%mm4 \n\t" \
636  "paddusb %%mm2, %%mm2 \n\t" \
637  "packsswb %%mm1, %%mm0 \n\t" \
638  "pcmpgtb %%mm0, %%mm7 \n\t" \
639  "pxor %%mm7, %%mm0 \n\t" \
640  "psubb %%mm7, %%mm0 \n\t" \
641  "movq %%mm0, %%mm1 \n\t" \
642  "psubusb %%mm2, %%mm0 \n\t" \
643  "psubb %%mm0, %%mm1 \n\t" \
644  "pand %5, %%mm1 \n\t" \
645  "psrlw $2, %%mm1 \n\t" \
646  "pxor %%mm7, %%mm1 \n\t" \
647  "psubb %%mm7, %%mm1 \n\t" \
648  "movq %0, %%mm5 \n\t" \
649  "movq %3, %%mm6 \n\t" \
650  "psubb %%mm1, %%mm5 \n\t" \
651  "paddb %%mm1, %%mm6 \n\t"
652 
653 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
654 {
656  const int strength = ff_h263_loop_filter_strength[qscale];
657 
658  __asm__ volatile (
659  H263_LOOP_FILTER
660 
661  "movq %%mm3, %1 \n\t"
662  "movq %%mm4, %2 \n\t"
663  "movq %%mm5, %0 \n\t"
664  "movq %%mm6, %3 \n\t"
665  : "+m"(*(uint64_t*)(src - 2 * stride)),
666  "+m"(*(uint64_t*)(src - 1 * stride)),
667  "+m"(*(uint64_t*)(src + 0 * stride)),
668  "+m"(*(uint64_t*)(src + 1 * stride))
669  : "g"(2 * strength), "m"(ff_pb_FC)
670  );
671  }
672 }
673 
674 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
675 {
677  const int strength = ff_h263_loop_filter_strength[qscale];
678  DECLARE_ALIGNED(8, uint64_t, temp)[4];
679  uint8_t *btemp = (uint8_t*)temp;
680 
681  src -= 2;
682 
683  transpose4x4(btemp, src, 8, stride);
684  transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
685  __asm__ volatile (
686  H263_LOOP_FILTER // 5 3 4 6
687 
688  : "+m"(temp[0]),
689  "+m"(temp[1]),
690  "+m"(temp[2]),
691  "+m"(temp[3])
692  : "g"(2 * strength), "m"(ff_pb_FC)
693  );
694 
695  __asm__ volatile (
696  "movq %%mm5, %%mm1 \n\t"
697  "movq %%mm4, %%mm0 \n\t"
698  "punpcklbw %%mm3, %%mm5 \n\t"
699  "punpcklbw %%mm6, %%mm4 \n\t"
700  "punpckhbw %%mm3, %%mm1 \n\t"
701  "punpckhbw %%mm6, %%mm0 \n\t"
702  "movq %%mm5, %%mm3 \n\t"
703  "movq %%mm1, %%mm6 \n\t"
704  "punpcklwd %%mm4, %%mm5 \n\t"
705  "punpcklwd %%mm0, %%mm1 \n\t"
706  "punpckhwd %%mm4, %%mm3 \n\t"
707  "punpckhwd %%mm0, %%mm6 \n\t"
708  "movd %%mm5, (%0) \n\t"
709  "punpckhdq %%mm5, %%mm5 \n\t"
710  "movd %%mm5, (%0, %2) \n\t"
711  "movd %%mm3, (%0, %2, 2) \n\t"
712  "punpckhdq %%mm3, %%mm3 \n\t"
713  "movd %%mm3, (%0, %3) \n\t"
714  "movd %%mm1, (%1) \n\t"
715  "punpckhdq %%mm1, %%mm1 \n\t"
716  "movd %%mm1, (%1, %2) \n\t"
717  "movd %%mm6, (%1, %2, 2) \n\t"
718  "punpckhdq %%mm6, %%mm6 \n\t"
719  "movd %%mm6, (%1, %3) \n\t"
720  :: "r"(src),
721  "r"(src + 4 * stride),
722  "r"((x86_reg)stride),
723  "r"((x86_reg)(3 * stride))
724  );
725  }
726 }
727 
728 /* Draw the edges of width 'w' of an image of size width, height
729  * this MMX version can only handle w == 8 || w == 16. */
730 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
731  int w, int h, int sides)
732 {
733  uint8_t *ptr, *last_line;
734  int i;
735 
736  last_line = buf + (height - 1) * wrap;
737  /* left and right */
738  ptr = buf;
739  if (w == 8) {
740  __asm__ volatile (
741  "1: \n\t"
742  "movd (%0), %%mm0 \n\t"
743  "punpcklbw %%mm0, %%mm0 \n\t"
744  "punpcklwd %%mm0, %%mm0 \n\t"
745  "punpckldq %%mm0, %%mm0 \n\t"
746  "movq %%mm0, -8(%0) \n\t"
747  "movq -8(%0, %2), %%mm1 \n\t"
748  "punpckhbw %%mm1, %%mm1 \n\t"
749  "punpckhwd %%mm1, %%mm1 \n\t"
750  "punpckhdq %%mm1, %%mm1 \n\t"
751  "movq %%mm1, (%0, %2) \n\t"
752  "add %1, %0 \n\t"
753  "cmp %3, %0 \n\t"
754  "jb 1b \n\t"
755  : "+r"(ptr)
756  : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
757  );
758  } else {
759  __asm__ volatile (
760  "1: \n\t"
761  "movd (%0), %%mm0 \n\t"
762  "punpcklbw %%mm0, %%mm0 \n\t"
763  "punpcklwd %%mm0, %%mm0 \n\t"
764  "punpckldq %%mm0, %%mm0 \n\t"
765  "movq %%mm0, -8(%0) \n\t"
766  "movq %%mm0, -16(%0) \n\t"
767  "movq -8(%0, %2), %%mm1 \n\t"
768  "punpckhbw %%mm1, %%mm1 \n\t"
769  "punpckhwd %%mm1, %%mm1 \n\t"
770  "punpckhdq %%mm1, %%mm1 \n\t"
771  "movq %%mm1, (%0, %2) \n\t"
772  "movq %%mm1, 8(%0, %2) \n\t"
773  "add %1, %0 \n\t"
774  "cmp %3, %0 \n\t"
775  "jb 1b \n\t"
776  : "+r"(ptr)
777  : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
778  );
779  }
780 
781  /* top and bottom (and hopefully also the corners) */
782  if (sides & EDGE_TOP) {
783  for (i = 0; i < h; i += 4) {
784  ptr = buf - (i + 1) * wrap - w;
785  __asm__ volatile (
786  "1: \n\t"
787  "movq (%1, %0), %%mm0 \n\t"
788  "movq %%mm0, (%0) \n\t"
789  "movq %%mm0, (%0, %2) \n\t"
790  "movq %%mm0, (%0, %2, 2) \n\t"
791  "movq %%mm0, (%0, %3) \n\t"
792  "add $8, %0 \n\t"
793  "cmp %4, %0 \n\t"
794  "jb 1b \n\t"
795  : "+r"(ptr)
796  : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
797  "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
798  );
799  }
800  }
801 
802  if (sides & EDGE_BOTTOM) {
803  for (i = 0; i < h; i += 4) {
804  ptr = last_line + (i + 1) * wrap - w;
805  __asm__ volatile (
806  "1: \n\t"
807  "movq (%1, %0), %%mm0 \n\t"
808  "movq %%mm0, (%0) \n\t"
809  "movq %%mm0, (%0, %2) \n\t"
810  "movq %%mm0, (%0, %2, 2) \n\t"
811  "movq %%mm0, (%0, %3) \n\t"
812  "add $8, %0 \n\t"
813  "cmp %4, %0 \n\t"
814  "jb 1b \n\t"
815  : "+r"(ptr)
816  : "r"((x86_reg)last_line - (x86_reg)ptr - w),
817  "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
818  "r"(ptr + width + 2 * w)
819  );
820  }
821  }
822 }
823 
824 #define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
825  in0, in1, in2, in7, out, OP) \
826  "paddw "#m4", "#m3" \n\t" /* x1 */ \
827  "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
828  "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
829  "movq "#in7", "#m3" \n\t" /* d */ \
830  "movq "#in0", %%mm5 \n\t" /* D */ \
831  "paddw "#m3", %%mm5 \n\t" /* x4 */ \
832  "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
833  "movq "#in1", %%mm5 \n\t" /* C */ \
834  "movq "#in2", %%mm6 \n\t" /* B */ \
835  "paddw "#m6", %%mm5 \n\t" /* x3 */ \
836  "paddw "#m5", %%mm6 \n\t" /* x2 */ \
837  "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
838  "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
839  "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
840  "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
841  "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
842  "psraw $5, %%mm5 \n\t" \
843  "packuswb %%mm5, %%mm5 \n\t" \
844  OP(%%mm5, out, %%mm7, d)
845 
846 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMXEXT) \
847 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, \
848  uint8_t *src, \
849  int dstStride, \
850  int srcStride, \
851  int h) \
852 { \
853  uint64_t temp; \
854  \
855  __asm__ volatile ( \
856  "pxor %%mm7, %%mm7 \n\t" \
857  "1: \n\t" \
858  "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
859  "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
860  "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
861  "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
862  "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
863  "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
864  "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
865  "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
866  "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
867  "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
868  "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
869  "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
870  "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
871  "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
872  "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
873  "paddw %%mm3, %%mm5 \n\t" /* b */ \
874  "paddw %%mm2, %%mm6 \n\t" /* c */ \
875  "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
876  "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
877  "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
878  "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
879  "paddw %%mm4, %%mm0 \n\t" /* a */ \
880  "paddw %%mm1, %%mm5 \n\t" /* d */ \
881  "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
882  "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
883  "paddw %6, %%mm6 \n\t" \
884  "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
885  "psraw $5, %%mm0 \n\t" \
886  "movq %%mm0, %5 \n\t" \
887  /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
888  \
889  "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
890  "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
891  "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
892  "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
893  "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
894  "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
895  "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
896  "paddw %%mm0, %%mm2 \n\t" /* b */ \
897  "paddw %%mm5, %%mm3 \n\t" /* c */ \
898  "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
899  "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
900  "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
901  "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
902  "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
903  "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
904  "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
905  "paddw %%mm2, %%mm1 \n\t" /* a */ \
906  "paddw %%mm6, %%mm4 \n\t" /* d */ \
907  "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
908  "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
909  "paddw %6, %%mm1 \n\t" \
910  "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
911  "psraw $5, %%mm3 \n\t" \
912  "movq %5, %%mm1 \n\t" \
913  "packuswb %%mm3, %%mm1 \n\t" \
914  OP_MMXEXT(%%mm1, (%1), %%mm4, q) \
915  /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
916  \
917  "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
918  "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
919  "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
920  "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
921  "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
922  "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
923  "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
924  "paddw %%mm1, %%mm5 \n\t" /* b */ \
925  "paddw %%mm4, %%mm0 \n\t" /* c */ \
926  "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
927  "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
928  "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
929  "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
930  "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
931  "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
932  "paddw %%mm3, %%mm2 \n\t" /* d */ \
933  "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
934  "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
935  "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
936  "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
937  "paddw %%mm2, %%mm6 \n\t" /* a */ \
938  "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
939  "paddw %6, %%mm0 \n\t" \
940  "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
941  "psraw $5, %%mm0 \n\t" \
942  /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
943  /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
944  \
945  "paddw %%mm5, %%mm3 \n\t" /* a */ \
946  "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
947  "paddw %%mm4, %%mm6 \n\t" /* b */ \
948  "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
949  "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
950  "paddw %%mm1, %%mm4 \n\t" /* c */ \
951  "paddw %%mm2, %%mm5 \n\t" /* d */ \
952  "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
953  "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
954  "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
955  "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
956  "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
957  "paddw %6, %%mm4 \n\t" \
958  "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
959  "psraw $5, %%mm4 \n\t" \
960  "packuswb %%mm4, %%mm0 \n\t" \
961  OP_MMXEXT(%%mm0, 8(%1), %%mm4, q) \
962  \
963  "add %3, %0 \n\t" \
964  "add %4, %1 \n\t" \
965  "decl %2 \n\t" \
966  "jnz 1b \n\t" \
967  : "+a"(src), "+c"(dst), "+D"(h) \
968  : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
969  /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
970  : "memory" \
971  ); \
972 } \
973  \
974 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, \
975  uint8_t *src, \
976  int dstStride, \
977  int srcStride, \
978  int h) \
979 { \
980  __asm__ volatile ( \
981  "pxor %%mm7, %%mm7 \n\t" \
982  "1: \n\t" \
983  "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
984  "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
985  "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
986  "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
987  "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
988  "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
989  "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
990  "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
991  "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
992  "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
993  "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
994  "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
995  "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
996  "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
997  "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
998  "paddw %%mm3, %%mm5 \n\t" /* b */ \
999  "paddw %%mm2, %%mm6 \n\t" /* c */ \
1000  "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
1001  "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
1002  "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
1003  "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
1004  "paddw %%mm4, %%mm0 \n\t" /* a */ \
1005  "paddw %%mm1, %%mm5 \n\t" /* d */ \
1006  "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
1007  "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
1008  "paddw %5, %%mm6 \n\t" \
1009  "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
1010  "psraw $5, %%mm0 \n\t" \
1011  /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
1012  \
1013  "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
1014  "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
1015  "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
1016  "paddw %%mm5, %%mm1 \n\t" /* a */ \
1017  "paddw %%mm6, %%mm2 \n\t" /* b */ \
1018  "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
1019  "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
1020  "paddw %%mm6, %%mm3 \n\t" /* c */ \
1021  "paddw %%mm5, %%mm4 \n\t" /* d */ \
1022  "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
1023  "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
1024  "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
1025  "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
1026  "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
1027  "paddw %5, %%mm1 \n\t" \
1028  "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
1029  "psraw $5, %%mm3 \n\t" \
1030  "packuswb %%mm3, %%mm0 \n\t" \
1031  OP_MMXEXT(%%mm0, (%1), %%mm4, q) \
1032  \
1033  "add %3, %0 \n\t" \
1034  "add %4, %1 \n\t" \
1035  "decl %2 \n\t" \
1036  "jnz 1b \n\t" \
1037  : "+a"(src), "+c"(dst), "+d"(h) \
1038  : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
1039  /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
1040  : "memory" \
1041  ); \
1042 }
1043 
1044 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
1045 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
1046  uint8_t *src, \
1047  int dstStride, \
1048  int srcStride) \
1049 { \
1050  uint64_t temp[17 * 4]; \
1051  uint64_t *temp_ptr = temp; \
1052  int count = 17; \
1053  \
1054  /* FIXME unroll */ \
1055  __asm__ volatile ( \
1056  "pxor %%mm7, %%mm7 \n\t" \
1057  "1: \n\t" \
1058  "movq (%0), %%mm0 \n\t" \
1059  "movq (%0), %%mm1 \n\t" \
1060  "movq 8(%0), %%mm2 \n\t" \
1061  "movq 8(%0), %%mm3 \n\t" \
1062  "punpcklbw %%mm7, %%mm0 \n\t" \
1063  "punpckhbw %%mm7, %%mm1 \n\t" \
1064  "punpcklbw %%mm7, %%mm2 \n\t" \
1065  "punpckhbw %%mm7, %%mm3 \n\t" \
1066  "movq %%mm0, (%1) \n\t" \
1067  "movq %%mm1, 17 * 8(%1) \n\t" \
1068  "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
1069  "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
1070  "add $8, %1 \n\t" \
1071  "add %3, %0 \n\t" \
1072  "decl %2 \n\t" \
1073  "jnz 1b \n\t" \
1074  : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1075  : "r"((x86_reg)srcStride) \
1076  : "memory" \
1077  ); \
1078  \
1079  temp_ptr = temp; \
1080  count = 4; \
1081  \
1082  /* FIXME reorder for speed */ \
1083  __asm__ volatile ( \
1084  /* "pxor %%mm7, %%mm7 \n\t" */ \
1085  "1: \n\t" \
1086  "movq (%0), %%mm0 \n\t" \
1087  "movq 8(%0), %%mm1 \n\t" \
1088  "movq 16(%0), %%mm2 \n\t" \
1089  "movq 24(%0), %%mm3 \n\t" \
1090  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1091  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1092  "add %4, %1 \n\t" \
1093  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1094  \
1095  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1096  "add %4, %1 \n\t" \
1097  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1098  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
1099  "add %4, %1 \n\t" \
1100  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
1101  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
1102  "add %4, %1 \n\t" \
1103  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
1104  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
1105  "add %4, %1 \n\t" \
1106  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
1107  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
1108  "add %4, %1 \n\t" \
1109  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
1110  \
1111  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
1112  "add %4, %1 \n\t" \
1113  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
1114  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
1115  \
1116  "add $136, %0 \n\t" \
1117  "add %6, %1 \n\t" \
1118  "decl %2 \n\t" \
1119  "jnz 1b \n\t" \
1120  \
1121  : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1122  : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1123  /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1124  "g"(4 - 14 * (x86_reg)dstStride) \
1125  : "memory" \
1126  ); \
1127 } \
1128  \
1129 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
1130  uint8_t *src, \
1131  int dstStride, \
1132  int srcStride) \
1133 { \
1134  uint64_t temp[9 * 2]; \
1135  uint64_t *temp_ptr = temp; \
1136  int count = 9; \
1137  \
1138  /* FIXME unroll */ \
1139  __asm__ volatile ( \
1140  "pxor %%mm7, %%mm7 \n\t" \
1141  "1: \n\t" \
1142  "movq (%0), %%mm0 \n\t" \
1143  "movq (%0), %%mm1 \n\t" \
1144  "punpcklbw %%mm7, %%mm0 \n\t" \
1145  "punpckhbw %%mm7, %%mm1 \n\t" \
1146  "movq %%mm0, (%1) \n\t" \
1147  "movq %%mm1, 9*8(%1) \n\t" \
1148  "add $8, %1 \n\t" \
1149  "add %3, %0 \n\t" \
1150  "decl %2 \n\t" \
1151  "jnz 1b \n\t" \
1152  : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1153  : "r"((x86_reg)srcStride) \
1154  : "memory" \
1155  ); \
1156  \
1157  temp_ptr = temp; \
1158  count = 2; \
1159  \
1160  /* FIXME reorder for speed */ \
1161  __asm__ volatile ( \
1162  /* "pxor %%mm7, %%mm7 \n\t" */ \
1163  "1: \n\t" \
1164  "movq (%0), %%mm0 \n\t" \
1165  "movq 8(%0), %%mm1 \n\t" \
1166  "movq 16(%0), %%mm2 \n\t" \
1167  "movq 24(%0), %%mm3 \n\t" \
1168  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1169  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1170  "add %4, %1 \n\t" \
1171  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1172  \
1173  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1174  "add %4, %1 \n\t" \
1175  QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1176  \
1177  QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
1178  "add %4, %1 \n\t" \
1179  QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
1180  QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
1181  \
1182  "add $72, %0 \n\t" \
1183  "add %6, %1 \n\t" \
1184  "decl %2 \n\t" \
1185  "jnz 1b \n\t" \
1186  \
1187  : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1188  : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1189  /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1190  "g"(4 - 6 * (x86_reg)dstStride) \
1191  : "memory" \
1192  ); \
1193 } \
1194  \
1195 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1196  int stride) \
1197 { \
1198  OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
1199 } \
1200  \
1201 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1202  int stride) \
1203 { \
1204  uint64_t temp[8]; \
1205  uint8_t * const half = (uint8_t*)temp; \
1206  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1207  stride, 8); \
1208  OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1209 } \
1210  \
1211 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1212  int stride) \
1213 { \
1214  OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
1215  stride, 8); \
1216 } \
1217  \
1218 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1219  int stride) \
1220 { \
1221  uint64_t temp[8]; \
1222  uint8_t * const half = (uint8_t*)temp; \
1223  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1224  stride, 8); \
1225  OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
1226  stride, 8); \
1227 } \
1228  \
1229 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1230  int stride) \
1231 { \
1232  uint64_t temp[8]; \
1233  uint8_t * const half = (uint8_t*)temp; \
1234  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1235  OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1236 } \
1237  \
1238 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1239  int stride) \
1240 { \
1241  OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride); \
1242 } \
1243  \
1244 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1245  int stride) \
1246 { \
1247  uint64_t temp[8]; \
1248  uint8_t * const half = (uint8_t*)temp; \
1249  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1250  OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride, \
1251  stride, 8); \
1252 } \
1253  \
1254 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1255  int stride) \
1256 { \
1257  uint64_t half[8 + 9]; \
1258  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1259  uint8_t * const halfHV = ((uint8_t*)half); \
1260  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1261  stride, 9); \
1262  put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1263  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1264  OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1265 } \
1266  \
1267 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1268  int stride) \
1269 { \
1270  uint64_t half[8 + 9]; \
1271  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1272  uint8_t * const halfHV = ((uint8_t*)half); \
1273  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1274  stride, 9); \
1275  put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1276  stride, 9); \
1277  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1278  OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1279 } \
1280  \
1281 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1282  int stride) \
1283 { \
1284  uint64_t half[8 + 9]; \
1285  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1286  uint8_t * const halfHV = ((uint8_t*)half); \
1287  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1288  stride, 9); \
1289  put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1290  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1291  OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1292 } \
1293  \
1294 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1295  int stride) \
1296 { \
1297  uint64_t half[8 + 9]; \
1298  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1299  uint8_t * const halfHV = ((uint8_t*)half); \
1300  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1301  stride, 9); \
1302  put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1303  stride, 9); \
1304  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1305  OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1306 } \
1307  \
1308 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1309  int stride) \
1310 { \
1311  uint64_t half[8 + 9]; \
1312  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1313  uint8_t * const halfHV = ((uint8_t*)half); \
1314  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1315  stride, 9); \
1316  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1317  OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1318 } \
1319  \
1320 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1321  int stride) \
1322 { \
1323  uint64_t half[8 + 9]; \
1324  uint8_t * const halfH = ((uint8_t*)half) + 64; \
1325  uint8_t * const halfHV = ((uint8_t*)half); \
1326  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1327  stride, 9); \
1328  put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1329  OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1330 } \
1331  \
1332 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1333  int stride) \
1334 { \
1335  uint64_t half[8 + 9]; \
1336  uint8_t * const halfH = ((uint8_t*)half); \
1337  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1338  stride, 9); \
1339  put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1340  OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1341 } \
1342  \
1343 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1344  int stride) \
1345 { \
1346  uint64_t half[8 + 9]; \
1347  uint8_t * const halfH = ((uint8_t*)half); \
1348  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1349  stride, 9); \
1350  put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1351  stride, 9); \
1352  OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1353 } \
1354  \
1355 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1356  int stride) \
1357 { \
1358  uint64_t half[9]; \
1359  uint8_t * const halfH = ((uint8_t*)half); \
1360  put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1361  stride, 9); \
1362  OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1363 } \
1364  \
1365 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1366  int stride) \
1367 { \
1368  OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
1369 } \
1370  \
1371 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1372  int stride) \
1373 { \
1374  uint64_t temp[32]; \
1375  uint8_t * const half = (uint8_t*)temp; \
1376  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1377  stride, 16); \
1378  OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1379 } \
1380  \
1381 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1382  int stride) \
1383 { \
1384  OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
1385  stride, stride, 16); \
1386 } \
1387  \
1388 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1389  int stride) \
1390 { \
1391  uint64_t temp[32]; \
1392  uint8_t * const half = (uint8_t*)temp; \
1393  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1394  stride, 16); \
1395  OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
1396  stride, stride, 16); \
1397 } \
1398  \
1399 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1400  int stride) \
1401 { \
1402  uint64_t temp[32]; \
1403  uint8_t * const half = (uint8_t*)temp; \
1404  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1405  stride); \
1406  OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1407 } \
1408  \
1409 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1410  int stride) \
1411 { \
1412  OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride); \
1413 } \
1414  \
1415 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1416  int stride) \
1417 { \
1418  uint64_t temp[32]; \
1419  uint8_t * const half = (uint8_t*)temp; \
1420  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1421  stride); \
1422  OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
1423  stride, stride, 16); \
1424 } \
1425  \
1426 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1427  int stride) \
1428 { \
1429  uint64_t half[16 * 2 + 17 * 2]; \
1430  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1431  uint8_t * const halfHV = ((uint8_t*)half); \
1432  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1433  stride, 17); \
1434  put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1435  stride, 17); \
1436  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1437  16, 16); \
1438  OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1439 } \
1440  \
1441 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1442  int stride) \
1443 { \
1444  uint64_t half[16 * 2 + 17 * 2]; \
1445  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1446  uint8_t * const halfHV = ((uint8_t*)half); \
1447  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1448  stride, 17); \
1449  put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1450  stride, 17); \
1451  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1452  16, 16); \
1453  OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1454 } \
1455  \
1456 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1457  int stride) \
1458 { \
1459  uint64_t half[16 * 2 + 17 * 2]; \
1460  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1461  uint8_t * const halfHV = ((uint8_t*)half); \
1462  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1463  stride, 17); \
1464  put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1465  stride, 17); \
1466  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1467  16, 16); \
1468  OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1469  16, 16); \
1470 } \
1471  \
1472 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1473  int stride) \
1474 { \
1475  uint64_t half[16 * 2 + 17 * 2]; \
1476  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1477  uint8_t * const halfHV = ((uint8_t*)half); \
1478  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1479  stride, 17); \
1480  put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1481  stride, 17); \
1482  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1483  16, 16); \
1484  OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1485  16, 16); \
1486 } \
1487  \
1488 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1489  int stride) \
1490 { \
1491  uint64_t half[16 * 2 + 17 * 2]; \
1492  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1493  uint8_t * const halfHV = ((uint8_t*)half); \
1494  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1495  stride, 17); \
1496  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1497  16, 16); \
1498  OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1499 } \
1500  \
1501 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1502  int stride) \
1503 { \
1504  uint64_t half[16 * 2 + 17 * 2]; \
1505  uint8_t * const halfH = ((uint8_t*)half) + 256; \
1506  uint8_t * const halfHV = ((uint8_t*)half); \
1507  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1508  stride, 17); \
1509  put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1510  16, 16); \
1511  OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1512  16, 16); \
1513 } \
1514  \
1515 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1516  int stride) \
1517 { \
1518  uint64_t half[17 * 2]; \
1519  uint8_t * const halfH = ((uint8_t*)half); \
1520  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1521  stride, 17); \
1522  put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1523  stride, 17); \
1524  OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1525 } \
1526  \
1527 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1528  int stride) \
1529 { \
1530  uint64_t half[17 * 2]; \
1531  uint8_t * const halfH = ((uint8_t*)half); \
1532  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1533  stride, 17); \
1534  put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1535  stride, 17); \
1536  OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1537 } \
1538  \
1539 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1540  int stride) \
1541 { \
1542  uint64_t half[17 * 2]; \
1543  uint8_t * const halfH = ((uint8_t*)half); \
1544  put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1545  stride, 17); \
1546  OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1547 }
1548 
1549 #define PUT_OP(a, b, temp, size) \
1550  "mov"#size" "#a", "#b" \n\t"
1551 
1552 #define AVG_MMXEXT_OP(a, b, temp, size) \
1553  "mov"#size" "#b", "#temp" \n\t" \
1554  "pavgb "#temp", "#a" \n\t" \
1555  "mov"#size" "#a", "#b" \n\t"
1556 
1557 QPEL_BASE(put_, ff_pw_16, _, PUT_OP)
1558 QPEL_BASE(avg_, ff_pw_16, _, AVG_MMXEXT_OP)
1559 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP)
1560 QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmxext)
1561 QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmxext)
1562 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmxext)
1563 
1564 /***********************************/
1565 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1566 
1567 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL) \
1568 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1569  uint8_t *src, \
1570  int stride) \
1571 { \
1572  OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE); \
1573 }
1574 
1575 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2) \
1576 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1577  uint8_t *src, \
1578  int stride) \
1579 { \
1580  OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src + S0, stride, SIZE, \
1581  S1, S2); \
1582 }
1583 
1584 #define QPEL_2TAP(OPNAME, SIZE, MMX) \
1585 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX) \
1586 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX) \
1587 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx) \
1588 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX = \
1589  OPNAME ## qpel ## SIZE ## _mc00_ ## MMX; \
1590 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX = \
1591  OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX; \
1592 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX = \
1593  OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX; \
1594 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, \
1595  uint8_t *src, \
1596  int stride) \
1597 { \
1598  OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src + 1, stride, SIZE); \
1599 } \
1600 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, \
1601  uint8_t *src, \
1602  int stride) \
1603 { \
1604  OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src + stride, \
1605  stride, SIZE); \
1606 } \
1607 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0) \
1608 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0) \
1609 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0) \
1610 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0) \
1611 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1) \
1612 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1) \
1613 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1) \
1614 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride + 1, -stride, -1) \
1615 
1616 QPEL_2TAP(put_, 16, mmxext)
1617 QPEL_2TAP(avg_, 16, mmxext)
1618 QPEL_2TAP(put_, 8, mmxext)
1619 QPEL_2TAP(avg_, 8, mmxext)
1620 
1621 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1622 {
1623  put_pixels8_xy2_mmx(dst, src, stride, 8);
1624 }
1625 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1626 {
1627  put_pixels16_xy2_mmx(dst, src, stride, 16);
1628 }
1629 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1630 {
1631  avg_pixels8_xy2_mmx(dst, src, stride, 8);
1632 }
1633 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1634 {
1635  avg_pixels16_xy2_mmx(dst, src, stride, 16);
1636 }
1637 
1638 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1639  int stride, int h, int ox, int oy,
1640  int dxx, int dxy, int dyx, int dyy,
1641  int shift, int r, int width, int height)
1642 {
1643  const int w = 8;
1644  const int ix = ox >> (16 + shift);
1645  const int iy = oy >> (16 + shift);
1646  const int oxs = ox >> 4;
1647  const int oys = oy >> 4;
1648  const int dxxs = dxx >> 4;
1649  const int dxys = dxy >> 4;
1650  const int dyxs = dyx >> 4;
1651  const int dyys = dyy >> 4;
1652  const uint16_t r4[4] = { r, r, r, r };
1653  const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1654  const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1655  const uint64_t shift2 = 2 * shift;
1656  int x, y;
1657 
1658  const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1659  const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1660  const int dxh = dxy * (h - 1);
1661  const int dyw = dyx * (w - 1);
1662  if ( // non-constant fullpel offset (3% of blocks)
1663  ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1664  (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1665  // uses more than 16 bits of subpel mv (only at huge resolution)
1666  || (dxx | dxy | dyx | dyy) & 15 ||
1667  (unsigned)ix >= width - w ||
1668  (unsigned)iy >= height - h) {
1669  // FIXME could still use mmx for some of the rows
1670  ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1671  shift, r, width, height);
1672  return;
1673  }
1674 
1675  src += ix + iy * stride;
1676 
1677  __asm__ volatile (
1678  "movd %0, %%mm6 \n\t"
1679  "pxor %%mm7, %%mm7 \n\t"
1680  "punpcklwd %%mm6, %%mm6 \n\t"
1681  "punpcklwd %%mm6, %%mm6 \n\t"
1682  :: "r"(1<<shift)
1683  );
1684 
1685  for (x = 0; x < w; x += 4) {
1686  uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1687  oxs - dxys + dxxs * (x + 1),
1688  oxs - dxys + dxxs * (x + 2),
1689  oxs - dxys + dxxs * (x + 3) };
1690  uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1691  oys - dyys + dyxs * (x + 1),
1692  oys - dyys + dyxs * (x + 2),
1693  oys - dyys + dyxs * (x + 3) };
1694 
1695  for (y = 0; y < h; y++) {
1696  __asm__ volatile (
1697  "movq %0, %%mm4 \n\t"
1698  "movq %1, %%mm5 \n\t"
1699  "paddw %2, %%mm4 \n\t"
1700  "paddw %3, %%mm5 \n\t"
1701  "movq %%mm4, %0 \n\t"
1702  "movq %%mm5, %1 \n\t"
1703  "psrlw $12, %%mm4 \n\t"
1704  "psrlw $12, %%mm5 \n\t"
1705  : "+m"(*dx4), "+m"(*dy4)
1706  : "m"(*dxy4), "m"(*dyy4)
1707  );
1708 
1709  __asm__ volatile (
1710  "movq %%mm6, %%mm2 \n\t"
1711  "movq %%mm6, %%mm1 \n\t"
1712  "psubw %%mm4, %%mm2 \n\t"
1713  "psubw %%mm5, %%mm1 \n\t"
1714  "movq %%mm2, %%mm0 \n\t"
1715  "movq %%mm4, %%mm3 \n\t"
1716  "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1717  "pmullw %%mm5, %%mm3 \n\t" // dx * dy
1718  "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
1719  "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
1720 
1721  "movd %4, %%mm5 \n\t"
1722  "movd %3, %%mm4 \n\t"
1723  "punpcklbw %%mm7, %%mm5 \n\t"
1724  "punpcklbw %%mm7, %%mm4 \n\t"
1725  "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1726  "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1727 
1728  "movd %2, %%mm5 \n\t"
1729  "movd %1, %%mm4 \n\t"
1730  "punpcklbw %%mm7, %%mm5 \n\t"
1731  "punpcklbw %%mm7, %%mm4 \n\t"
1732  "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1733  "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1734  "paddw %5, %%mm1 \n\t"
1735  "paddw %%mm3, %%mm2 \n\t"
1736  "paddw %%mm1, %%mm0 \n\t"
1737  "paddw %%mm2, %%mm0 \n\t"
1738 
1739  "psrlw %6, %%mm0 \n\t"
1740  "packuswb %%mm0, %%mm0 \n\t"
1741  "movd %%mm0, %0 \n\t"
1742 
1743  : "=m"(dst[x + y * stride])
1744  : "m"(src[0]), "m"(src[1]),
1745  "m"(src[stride]), "m"(src[stride + 1]),
1746  "m"(*r4), "m"(shift2)
1747  );
1748  src += stride;
1749  }
1750  src += 4 - h * stride;
1751  }
1752 }
1753 #endif /* HAVE_INLINE_ASM */
1754 
1755 #include "h264_qpel.c"
1756 
1758  int stride, int h, int x, int y);
1760  int stride, int h, int x, int y);
1762  int stride, int h, int x, int y);
1763 
1764 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
1765  int stride, int h, int x, int y);
1767  int stride, int h, int x, int y);
1769  int stride, int h, int x, int y);
1770 
1772  int stride, int h, int x, int y);
1774  int stride, int h, int x, int y);
1775 
1777  int stride, int h, int x, int y);
1779  int stride, int h, int x, int y);
1780 
1782  int stride, int h, int x, int y);
1784  int stride, int h, int x, int y);
1785 
1786 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
1787 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
1788  (uint8_t *dst, uint8_t *src, \
1789  int stride, int h, int x, int y);
1790 
1791 CHROMA_MC(put, 2, 10, mmxext)
1792 CHROMA_MC(avg, 2, 10, mmxext)
1793 CHROMA_MC(put, 4, 10, mmxext)
1794 CHROMA_MC(avg, 4, 10, mmxext)
1795 CHROMA_MC(put, 8, 10, sse2)
1796 CHROMA_MC(avg, 8, 10, sse2)
1797 CHROMA_MC(put, 8, 10, avx)
1798 CHROMA_MC(avg, 8, 10, avx)
1799 
1800 #if HAVE_INLINE_ASM
1801 
1802 /* CAVS-specific */
1803 void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1804 {
1805  put_pixels8_mmx(dst, src, stride, 8);
1806 }
1807 
1808 void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1809 {
1810  avg_pixels8_mmx(dst, src, stride, 8);
1811 }
1812 
1813 void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1814 {
1815  put_pixels16_mmx(dst, src, stride, 16);
1816 }
1817 
1818 void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1819 {
1820  avg_pixels16_mmx(dst, src, stride, 16);
1821 }
1822 
1823 /* VC-1-specific */
1824 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1825  int stride, int rnd)
1826 {
1827  put_pixels8_mmx(dst, src, stride, 8);
1828 }
1829 
1830 void ff_avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
1831  int stride, int rnd)
1832 {
1833  avg_pixels8_mmxext(dst, src, stride, 8);
1834 }
1835 
1836 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1837 {
1838  int i;
1839  __asm__ volatile ("pxor %%mm7, %%mm7":);
1840  for (i = 0; i < blocksize; i += 2) {
1841  __asm__ volatile (
1842  "movq %0, %%mm0 \n\t"
1843  "movq %1, %%mm1 \n\t"
1844  "movq %%mm0, %%mm2 \n\t"
1845  "movq %%mm1, %%mm3 \n\t"
1846  "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1847  "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1848  "pslld $31, %%mm2 \n\t" // keep only the sign bit
1849  "pxor %%mm2, %%mm1 \n\t"
1850  "movq %%mm3, %%mm4 \n\t"
1851  "pand %%mm1, %%mm3 \n\t"
1852  "pandn %%mm1, %%mm4 \n\t"
1853  "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
1854  "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
1855  "movq %%mm3, %1 \n\t"
1856  "movq %%mm0, %0 \n\t"
1857  : "+m"(mag[i]), "+m"(ang[i])
1858  :: "memory"
1859  );
1860  }
1861  __asm__ volatile ("femms");
1862 }
1863 
1864 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1865 {
1866  int i;
1867 
1868  __asm__ volatile (
1869  "movaps %0, %%xmm5 \n\t"
1870  :: "m"(ff_pdw_80000000[0])
1871  );
1872  for (i = 0; i < blocksize; i += 4) {
1873  __asm__ volatile (
1874  "movaps %0, %%xmm0 \n\t"
1875  "movaps %1, %%xmm1 \n\t"
1876  "xorps %%xmm2, %%xmm2 \n\t"
1877  "xorps %%xmm3, %%xmm3 \n\t"
1878  "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1879  "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1880  "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1881  "xorps %%xmm2, %%xmm1 \n\t"
1882  "movaps %%xmm3, %%xmm4 \n\t"
1883  "andps %%xmm1, %%xmm3 \n\t"
1884  "andnps %%xmm1, %%xmm4 \n\t"
1885  "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
1886  "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
1887  "movaps %%xmm3, %1 \n\t"
1888  "movaps %%xmm0, %0 \n\t"
1889  : "+m"(mag[i]), "+m"(ang[i])
1890  :: "memory"
1891  );
1892  }
1893 }
1894 
1895 static void vector_clipf_sse(float *dst, const float *src,
1896  float min, float max, int len)
1897 {
1898  x86_reg i = (len - 16) * 4;
1899  __asm__ volatile (
1900  "movss %3, %%xmm4 \n\t"
1901  "movss %4, %%xmm5 \n\t"
1902  "shufps $0, %%xmm4, %%xmm4 \n\t"
1903  "shufps $0, %%xmm5, %%xmm5 \n\t"
1904  "1: \n\t"
1905  "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
1906  "movaps 16(%2, %0), %%xmm1 \n\t"
1907  "movaps 32(%2, %0), %%xmm2 \n\t"
1908  "movaps 48(%2, %0), %%xmm3 \n\t"
1909  "maxps %%xmm4, %%xmm0 \n\t"
1910  "maxps %%xmm4, %%xmm1 \n\t"
1911  "maxps %%xmm4, %%xmm2 \n\t"
1912  "maxps %%xmm4, %%xmm3 \n\t"
1913  "minps %%xmm5, %%xmm0 \n\t"
1914  "minps %%xmm5, %%xmm1 \n\t"
1915  "minps %%xmm5, %%xmm2 \n\t"
1916  "minps %%xmm5, %%xmm3 \n\t"
1917  "movaps %%xmm0, (%1, %0) \n\t"
1918  "movaps %%xmm1, 16(%1, %0) \n\t"
1919  "movaps %%xmm2, 32(%1, %0) \n\t"
1920  "movaps %%xmm3, 48(%1, %0) \n\t"
1921  "sub $64, %0 \n\t"
1922  "jge 1b \n\t"
1923  : "+&r"(i)
1924  : "r"(dst), "r"(src), "m"(min), "m"(max)
1925  : "memory"
1926  );
1927 }
1928 
1929 #endif /* HAVE_INLINE_ASM */
1930 
1931 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1932  int order);
1933 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1934  int order);
1935 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1936  const int16_t *v3,
1937  int order, int mul);
1938 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1939  const int16_t *v3,
1940  int order, int mul);
1941 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1942  const int16_t *v3,
1943  int order, int mul);
1944 
1945 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1946  const int16_t *window, unsigned int len);
1947 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1948  const int16_t *window, unsigned int len);
1949 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1950  const int16_t *window, unsigned int len);
1951 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1952  const int16_t *window, unsigned int len);
1953 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1954  const int16_t *window, unsigned int len);
1955 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1956  const int16_t *window, unsigned int len);
1957 
1958 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1959 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1960 
1962  const uint8_t *diff, int w,
1963  int *left, int *left_top);
1965  int w, int left);
1966 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1967  int w, int left);
1968 
1969 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
1970 
1971 void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
1972  const float *src1, int len);
1973 void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
1974  const float *src1, int len);
1975 
1976 void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
1977  const float *src2, int len);
1978 void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
1979  const float *src2, int len);
1980 
1981 void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
1982  int32_t min, int32_t max, unsigned int len);
1983 void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
1984  int32_t min, int32_t max, unsigned int len);
1985 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1986  int32_t min, int32_t max, unsigned int len);
1987 void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
1988  int32_t min, int32_t max, unsigned int len);
1989 
1990 extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
1991  const float *src1, int len);
1992 extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
1993  const float *src1, int len);
1994 
1995 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
1996  do { \
1997  c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1998  c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1999  c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
2000  c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
2001  c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
2002  c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
2003  c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
2004  c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
2005  c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
2006  c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
2007  c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
2008  c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
2009  c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
2010  c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
2011  c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
2012  c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
2013  } while (0)
2014 
2015 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2016  do { \
2017  c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2018  c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2019  c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2020  c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
2021  } while (0)
2022 
2023 #define H264_QPEL_FUNCS(x, y, CPU) \
2024  do { \
2025  c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2026  c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2027  c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2028  c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2029  } while (0)
2030 
2031 #define H264_QPEL_FUNCS_10(x, y, CPU) \
2032  do { \
2033  c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2034  c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2035  c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2036  c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2037  } while (0)
2038 
2039 static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2040 {
2041  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2042 
2043 #if HAVE_INLINE_ASM
2047 
2048  if (!high_bit_depth) {
2049  c->clear_block = clear_block_mmx;
2050  c->clear_blocks = clear_blocks_mmx;
2051  c->draw_edges = draw_edges_mmx;
2052 
2053  SET_HPEL_FUNCS(put, 0, 16, mmx);
2054  SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2055  SET_HPEL_FUNCS(avg, 0, 16, mmx);
2056  SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2057  SET_HPEL_FUNCS(put, 1, 8, mmx);
2058  SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2059  SET_HPEL_FUNCS(avg, 1, 8, mmx);
2060  SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2061 
2062  switch (avctx->idct_algo) {
2063  case FF_IDCT_AUTO:
2064  case FF_IDCT_SIMPLEMMX:
2067  c->idct = ff_simple_idct_mmx;
2069  break;
2070  case FF_IDCT_XVIDMMX:
2073  c->idct = ff_idct_xvid_mmx;
2074  break;
2075  }
2076  }
2077 
2078  c->gmc = gmc_mmx;
2079 
2080  c->add_bytes = add_bytes_mmx;
2081 
2083  c->h263_v_loop_filter = h263_v_loop_filter_mmx;
2084  c->h263_h_loop_filter = h263_h_loop_filter_mmx;
2085  }
2086 #endif /* HAVE_INLINE_ASM */
2087 
2088 #if HAVE_YASM
2089  if (!high_bit_depth && CONFIG_H264CHROMA) {
2092  }
2093 
2095 #endif
2096 
2097 }
2098 
2100  int mm_flags)
2101 {
2102  const int bit_depth = avctx->bits_per_raw_sample;
2103  const int high_bit_depth = bit_depth > 8;
2104 
2105 #if HAVE_INLINE_ASM
2106  SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
2107  SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
2108  SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmxext, );
2109  SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmxext, );
2110 
2111  SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
2112  SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
2113  SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmxext, );
2114  SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmxext, );
2115  SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
2116  SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
2117 
2118  if (!high_bit_depth) {
2119  c->put_pixels_tab[0][1] = put_pixels16_x2_mmxext;
2120  c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
2121 
2122  c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
2123  c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
2124  c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
2125 
2126  c->put_pixels_tab[1][1] = put_pixels8_x2_mmxext;
2127  c->put_pixels_tab[1][2] = put_pixels8_y2_mmxext;
2128 
2129  c->avg_pixels_tab[1][0] = avg_pixels8_mmxext;
2130  c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmxext;
2131  c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmxext;
2132  }
2133 
2134  if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
2135  if (!high_bit_depth) {
2136  c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
2137  c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
2138  c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmxext;
2139  c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmxext;
2140 
2141  c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
2142  c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmxext;
2143  }
2144  }
2145 
2146  if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
2150  }
2151 
2152  if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
2153  avctx->codec_id == AV_CODEC_ID_THEORA)) {
2154  c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmxext;
2155  c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmxext;
2156  }
2157 #endif /* HAVE_INLINE_ASM */
2158 
2159 #if HAVE_MMXEXT_EXTERNAL
2160  if (CONFIG_H264QPEL) {
2161  if (!high_bit_depth) {
2162  SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, );
2163  SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmxext, );
2164  SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmxext, );
2165  SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, );
2166  SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmxext, );
2167  SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmxext, );
2168  } else if (bit_depth == 10) {
2169 #if !ARCH_X86_64
2170  SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
2171  SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
2172  SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
2173  SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
2174 #endif
2175  SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
2176  SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
2177  }
2178  }
2179 
2180  if (!high_bit_depth && CONFIG_H264CHROMA) {
2185  }
2186  if (bit_depth == 10 && CONFIG_H264CHROMA) {
2187  c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
2188  c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
2189  c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
2190  c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
2191  }
2192 
2193  /* slower than cmov version on AMD */
2194  if (!(mm_flags & AV_CPU_FLAG_3DNOW))
2196 
2199 
2200  if (avctx->flags & CODEC_FLAG_BITEXACT) {
2202  } else {
2204  }
2205 #endif /* HAVE_MMXEXT_EXTERNAL */
2206 }
2207 
2209  int mm_flags)
2210 {
2211  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2212 
2213 #if HAVE_INLINE_ASM
2214  if (!high_bit_depth) {
2215  c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2216  c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2217 
2218  c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2219  c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2220  c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2221 
2222  c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2223  c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2224 
2225  c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2226  c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2227  c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2228 
2229  if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
2230  c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2231  c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2232  c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2233  c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2234 
2235  c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2236  c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2237  }
2238  }
2239 
2240  if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
2241  avctx->codec_id == AV_CODEC_ID_THEORA)) {
2242  c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2243  c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2244  }
2245 
2246  c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2247 #endif /* HAVE_INLINE_ASM */
2248 
2249 #if HAVE_YASM
2250  if (!high_bit_depth && CONFIG_H264CHROMA) {
2253  }
2254 #endif /* HAVE_YASM */
2255 }
2256 
2257 static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2258 {
2259  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2260 
2261 #if HAVE_INLINE_ASM
2262  if (!high_bit_depth) {
2263  if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
2264  /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2265  c->clear_block = clear_block_sse;
2266  c->clear_blocks = clear_blocks_sse;
2267  }
2268  }
2269 
2270  c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2271 
2272  c->vector_clipf = vector_clipf_sse;
2273 #endif /* HAVE_INLINE_ASM */
2274 
2275 #if HAVE_YASM
2278 
2281 #endif /* HAVE_YASM */
2282 }
2283 
2285  int mm_flags)
2286 {
2287  const int bit_depth = avctx->bits_per_raw_sample;
2288  const int high_bit_depth = bit_depth > 8;
2289 
2290 #if HAVE_SSE2_INLINE
2291  if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
2294  c->idct = ff_idct_xvid_sse2;
2296  }
2297 #endif /* HAVE_SSE2_INLINE */
2298 
2299 #if HAVE_SSE2_EXTERNAL
2300  if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2301  // these functions are slower than mmx on AMD, but faster on Intel
2302  if (!high_bit_depth) {
2303  c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
2304  c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
2305  c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
2306  if (CONFIG_H264QPEL)
2307  H264_QPEL_FUNCS(0, 0, sse2);
2308  }
2309  }
2310 
2311  if (!high_bit_depth && CONFIG_H264QPEL) {
2312  H264_QPEL_FUNCS(0, 1, sse2);
2313  H264_QPEL_FUNCS(0, 2, sse2);
2314  H264_QPEL_FUNCS(0, 3, sse2);
2315  H264_QPEL_FUNCS(1, 1, sse2);
2316  H264_QPEL_FUNCS(1, 2, sse2);
2317  H264_QPEL_FUNCS(1, 3, sse2);
2318  H264_QPEL_FUNCS(2, 1, sse2);
2319  H264_QPEL_FUNCS(2, 2, sse2);
2320  H264_QPEL_FUNCS(2, 3, sse2);
2321  H264_QPEL_FUNCS(3, 1, sse2);
2322  H264_QPEL_FUNCS(3, 2, sse2);
2323  H264_QPEL_FUNCS(3, 3, sse2);
2324  }
2325 
2326  if (bit_depth == 10) {
2327  if (CONFIG_H264QPEL) {
2328  SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
2329  SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
2330  SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
2331  SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
2332  H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
2333  H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
2334  H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
2335  }
2336  if (CONFIG_H264CHROMA) {
2337  c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
2338  c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
2339  }
2340  }
2341 
2344  if (mm_flags & AV_CPU_FLAG_ATOM) {
2346  } else {
2348  }
2349  if (avctx->flags & CODEC_FLAG_BITEXACT) {
2351  } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2353  }
2355 #endif /* HAVE_SSE2_EXTERNAL */
2356 }
2357 
2359  int mm_flags)
2360 {
2361 #if HAVE_SSSE3_EXTERNAL
2362  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2363  const int bit_depth = avctx->bits_per_raw_sample;
2364 
2365  if (!high_bit_depth && CONFIG_H264QPEL) {
2366  H264_QPEL_FUNCS(1, 0, ssse3);
2367  H264_QPEL_FUNCS(1, 1, ssse3);
2368  H264_QPEL_FUNCS(1, 2, ssse3);
2369  H264_QPEL_FUNCS(1, 3, ssse3);
2370  H264_QPEL_FUNCS(2, 0, ssse3);
2371  H264_QPEL_FUNCS(2, 1, ssse3);
2372  H264_QPEL_FUNCS(2, 2, ssse3);
2373  H264_QPEL_FUNCS(2, 3, ssse3);
2374  H264_QPEL_FUNCS(3, 0, ssse3);
2375  H264_QPEL_FUNCS(3, 1, ssse3);
2376  H264_QPEL_FUNCS(3, 2, ssse3);
2377  H264_QPEL_FUNCS(3, 3, ssse3);
2378  }
2379  if (bit_depth == 10 && CONFIG_H264QPEL) {
2380  H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
2381  H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
2382  H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
2383  }
2384  if (!high_bit_depth && CONFIG_H264CHROMA) {
2389  }
2391  if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
2393 
2394  if (mm_flags & AV_CPU_FLAG_ATOM)
2396  else
2398  if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
2401 #endif /* HAVE_SSSE3_EXTERNAL */
2402 }
2403 
2405  int mm_flags)
2406 {
2407 #if HAVE_SSE4_EXTERNAL
2409 #endif /* HAVE_SSE4_EXTERNAL */
2410 }
2411 
2412 static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2413 {
2414 #if HAVE_AVX_EXTERNAL
2415  const int bit_depth = avctx->bits_per_raw_sample;
2416 
2417  if (bit_depth == 10) {
2418  // AVX implies !cache64.
2419  // TODO: Port cache(32|64) detection from x264.
2420  if (CONFIG_H264QPEL) {
2421  H264_QPEL_FUNCS_10(1, 0, sse2);
2422  H264_QPEL_FUNCS_10(2, 0, sse2);
2423  H264_QPEL_FUNCS_10(3, 0, sse2);
2424  }
2425 
2426  if (CONFIG_H264CHROMA) {
2427  c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
2428  c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
2429  }
2430  }
2434 #endif /* HAVE_AVX_EXTERNAL */
2435 }
2436 
2438 {
2439  int mm_flags = av_get_cpu_flags();
2440 
2441 #if HAVE_7REGS && HAVE_INLINE_ASM
2442  if (mm_flags & AV_CPU_FLAG_CMOV)
2443  c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2444 #endif
2445 
2446  if (mm_flags & AV_CPU_FLAG_MMX)
2447  dsputil_init_mmx(c, avctx, mm_flags);
2448 
2449  if (mm_flags & AV_CPU_FLAG_MMXEXT)
2450  dsputil_init_mmxext(c, avctx, mm_flags);
2451 
2452  if (mm_flags & AV_CPU_FLAG_3DNOW)
2453  dsputil_init_3dnow(c, avctx, mm_flags);
2454 
2455  if (mm_flags & AV_CPU_FLAG_SSE)
2456  dsputil_init_sse(c, avctx, mm_flags);
2457 
2458  if (mm_flags & AV_CPU_FLAG_SSE2)
2459  dsputil_init_sse2(c, avctx, mm_flags);
2460 
2461  if (mm_flags & AV_CPU_FLAG_SSSE3)
2462  dsputil_init_ssse3(c, avctx, mm_flags);
2463 
2464  if (mm_flags & AV_CPU_FLAG_SSE4)
2465  dsputil_init_sse4(c, avctx, mm_flags);
2466 
2467  if (mm_flags & AV_CPU_FLAG_AVX)
2468  dsputil_init_avx(c, avctx, mm_flags);
2469 
2470  if (CONFIG_ENCODERS)
2471  ff_dsputilenc_init_mmx(c, avctx);
2472 }