idct_sse2_xvid.c
Go to the documentation of this file.
1 /*
2  * XVID MPEG-4 VIDEO CODEC
3  * - SSE2 inverse discrete cosine transform -
4  *
5  * Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
6  *
7  * Conversion to gcc syntax with modifications
8  * by Alexander Strange <astrange@ithinksw.com>
9  *
10  * Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
11  *
12  * This file is part of Libav.
13  *
14  * Vertical pass is an implementation of the scheme:
15  * Loeffler C., Ligtenberg A., and Moschytz C.S.:
16  * Practical Fast 1D DCT Algorithm with Eleven Multiplications,
17  * Proc. ICASSP 1989, 988-991.
18  *
19  * Horizontal pass is a double 4x4 vector/matrix multiplication,
20  * (see also Intel's Application Note 922:
21  * http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
22  * Copyright (C) 1999 Intel Corporation)
23  *
24  * More details at http://skal.planet-d.net/coding/dct.html
25  *
26  * Libav is free software; you can redistribute it and/or
27  * modify it under the terms of the GNU Lesser General Public
28  * License as published by the Free Software Foundation; either
29  * version 2.1 of the License, or (at your option) any later version.
30  *
31  * Libav is distributed in the hope that it will be useful,
32  * but WITHOUT ANY WARRANTY; without even the implied warranty of
33  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34  * Lesser General Public License for more details.
35  *
36  * You should have received a copy of the GNU Lesser General Public License
37  * along with Libav; if not, write to the Free Software Foundation,
38  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
39  */
40 
41 #include "libavcodec/dsputil.h"
42 #include "libavutil/internal.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/x86/asm.h"
45 #include "idct_xvid.h"
46 #include "dsputil_mmx.h"
47 
48 #if HAVE_INLINE_ASM
49 
55 #define X8(x) x,x,x,x,x,x,x,x
56 
57 #define ROW_SHIFT 11
58 #define COL_SHIFT 6
59 
60 DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16)
61 DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
62 DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1
63 DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2)
64 DECLARE_ASM_CONST(8, uint8_t, m127)[] = {X8(127)};
65 
66 DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
67  0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
68  0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
69  0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
70  0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
71 };
72 
73 DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
74  0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
75  0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
76  0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
77  0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
78 };
79 
80 DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
81  0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
82  0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
83  0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
84  0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
85 };
86 
87 DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
88  0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
89  0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
90  0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
91  0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
92 };
93 
94 DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
95  65536, 65536, 65536, 65536,
96  3597, 3597, 3597, 3597,
97  2260, 2260, 2260, 2260,
98  1203, 1203, 1203, 1203,
99  120, 120, 120, 120,
100  512, 512, 512, 512
101 };
102 
103 // Temporary storage before the column pass
104 #define ROW1 "%%xmm6"
105 #define ROW3 "%%xmm4"
106 #define ROW5 "%%xmm5"
107 #define ROW7 "%%xmm7"
108 
109 #define CLEAR_ODD(r) "pxor "r","r" \n\t"
110 #define PUT_ODD(dst) "pshufhw $0x1B, %%xmm2, "dst" \n\t"
111 
112 #if ARCH_X86_64
113 
114 # define ROW0 "%%xmm8"
115 # define REG0 ROW0
116 # define ROW2 "%%xmm9"
117 # define REG2 ROW2
118 # define ROW4 "%%xmm10"
119 # define REG4 ROW4
120 # define ROW6 "%%xmm11"
121 # define REG6 ROW6
122 # define CLEAR_EVEN(r) CLEAR_ODD(r)
123 # define PUT_EVEN(dst) PUT_ODD(dst)
124 # define XMMS "%%xmm12"
125 # define MOV_32_ONLY "#"
126 # define SREG2 REG2
127 # define TAN3 "%%xmm13"
128 # define TAN1 "%%xmm14"
129 
130 #else
131 
132 # define ROW0 "(%0)"
133 # define REG0 "%%xmm4"
134 # define ROW2 "2*16(%0)"
135 # define REG2 "%%xmm4"
136 # define ROW4 "4*16(%0)"
137 # define REG4 "%%xmm6"
138 # define ROW6 "6*16(%0)"
139 # define REG6 "%%xmm6"
140 # define CLEAR_EVEN(r)
141 # define PUT_EVEN(dst) \
142  "pshufhw $0x1B, %%xmm2, %%xmm2 \n\t" \
143  "movdqa %%xmm2, "dst" \n\t"
144 # define XMMS "%%xmm2"
145 # define MOV_32_ONLY "movdqa "
146 # define SREG2 "%%xmm7"
147 # define TAN3 "%%xmm0"
148 # define TAN1 "%%xmm2"
149 
150 #endif
151 
152 #define ROUND(x) "paddd "MANGLE(x)
153 
154 #define JZ(reg, to) \
155  "testl "reg","reg" \n\t" \
156  "jz "to" \n\t"
157 
158 #define JNZ(reg, to) \
159  "testl "reg","reg" \n\t" \
160  "jnz "to" \n\t"
161 
162 #define TEST_ONE_ROW(src, reg, clear) \
163  clear \
164  "movq "src", %%mm1 \n\t" \
165  "por 8+"src", %%mm1 \n\t" \
166  "paddusb %%mm0, %%mm1 \n\t" \
167  "pmovmskb %%mm1, "reg" \n\t"
168 
169 #define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \
170  clear1 \
171  clear2 \
172  "movq "row1", %%mm1 \n\t" \
173  "por 8+"row1", %%mm1 \n\t" \
174  "movq "row2", %%mm2 \n\t" \
175  "por 8+"row2", %%mm2 \n\t" \
176  "paddusb %%mm0, %%mm1 \n\t" \
177  "paddusb %%mm0, %%mm2 \n\t" \
178  "pmovmskb %%mm1, "reg1" \n\t" \
179  "pmovmskb %%mm2, "reg2" \n\t"
180 
182 #define iMTX_MULT(src, table, rounder, put) \
183  "movdqa "src", %%xmm3 \n\t" \
184  "movdqa %%xmm3, %%xmm0 \n\t" \
185  "pshufd $0x11, %%xmm3, %%xmm1 \n\t" /* 4602 */ \
186  "punpcklqdq %%xmm0, %%xmm0 \n\t" /* 0246 */ \
187  "pmaddwd "table", %%xmm0 \n\t" \
188  "pmaddwd 16+"table", %%xmm1 \n\t" \
189  "pshufd $0xBB, %%xmm3, %%xmm2 \n\t" /* 5713 */ \
190  "punpckhqdq %%xmm3, %%xmm3 \n\t" /* 1357 */ \
191  "pmaddwd 32+"table", %%xmm2 \n\t" \
192  "pmaddwd 48+"table", %%xmm3 \n\t" \
193  "paddd %%xmm1, %%xmm0 \n\t" \
194  "paddd %%xmm3, %%xmm2 \n\t" \
195  rounder", %%xmm0 \n\t" \
196  "movdqa %%xmm2, %%xmm3 \n\t" \
197  "paddd %%xmm0, %%xmm2 \n\t" \
198  "psubd %%xmm3, %%xmm0 \n\t" \
199  "psrad $11, %%xmm2 \n\t" \
200  "psrad $11, %%xmm0 \n\t" \
201  "packssdw %%xmm0, %%xmm2 \n\t" \
202  put \
203  "1: \n\t"
204 
205 #define iLLM_HEAD \
206  "movdqa "MANGLE(tan3)", "TAN3" \n\t" \
207  "movdqa "MANGLE(tan1)", "TAN1" \n\t" \
208 
209 
210 #define iLLM_PASS(dct) \
211  "movdqa "TAN3", %%xmm1 \n\t" \
212  "movdqa "TAN1", %%xmm3 \n\t" \
213  "pmulhw %%xmm4, "TAN3" \n\t" \
214  "pmulhw %%xmm5, %%xmm1 \n\t" \
215  "paddsw %%xmm4, "TAN3" \n\t" \
216  "paddsw %%xmm5, %%xmm1 \n\t" \
217  "psubsw %%xmm5, "TAN3" \n\t" \
218  "paddsw %%xmm4, %%xmm1 \n\t" \
219  "pmulhw %%xmm7, %%xmm3 \n\t" \
220  "pmulhw %%xmm6, "TAN1" \n\t" \
221  "paddsw %%xmm6, %%xmm3 \n\t" \
222  "psubsw %%xmm7, "TAN1" \n\t" \
223  "movdqa %%xmm3, %%xmm7 \n\t" \
224  "movdqa "TAN1", %%xmm6 \n\t" \
225  "psubsw %%xmm1, %%xmm3 \n\t" \
226  "psubsw "TAN3", "TAN1" \n\t" \
227  "paddsw %%xmm7, %%xmm1 \n\t" \
228  "paddsw %%xmm6, "TAN3" \n\t" \
229  "movdqa %%xmm3, %%xmm6 \n\t" \
230  "psubsw "TAN3", %%xmm3 \n\t" \
231  "paddsw %%xmm6, "TAN3" \n\t" \
232  "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
233  "pmulhw %%xmm4, %%xmm3 \n\t" \
234  "pmulhw %%xmm4, "TAN3" \n\t" \
235  "paddsw "TAN3", "TAN3" \n\t" \
236  "paddsw %%xmm3, %%xmm3 \n\t" \
237  "movdqa "MANGLE(tan2)", %%xmm7 \n\t" \
238  MOV_32_ONLY ROW2", "REG2" \n\t" \
239  MOV_32_ONLY ROW6", "REG6" \n\t" \
240  "movdqa %%xmm7, %%xmm5 \n\t" \
241  "pmulhw "REG6", %%xmm7 \n\t" \
242  "pmulhw "REG2", %%xmm5 \n\t" \
243  "paddsw "REG2", %%xmm7 \n\t" \
244  "psubsw "REG6", %%xmm5 \n\t" \
245  MOV_32_ONLY ROW0", "REG0" \n\t" \
246  MOV_32_ONLY ROW4", "REG4" \n\t" \
247  MOV_32_ONLY" "TAN1", (%0) \n\t" \
248  "movdqa "REG0", "XMMS" \n\t" \
249  "psubsw "REG4", "REG0" \n\t" \
250  "paddsw "XMMS", "REG4" \n\t" \
251  "movdqa "REG4", "XMMS" \n\t" \
252  "psubsw %%xmm7, "REG4" \n\t" \
253  "paddsw "XMMS", %%xmm7 \n\t" \
254  "movdqa "REG0", "XMMS" \n\t" \
255  "psubsw %%xmm5, "REG0" \n\t" \
256  "paddsw "XMMS", %%xmm5 \n\t" \
257  "movdqa %%xmm5, "XMMS" \n\t" \
258  "psubsw "TAN3", %%xmm5 \n\t" \
259  "paddsw "XMMS", "TAN3" \n\t" \
260  "movdqa "REG0", "XMMS" \n\t" \
261  "psubsw %%xmm3, "REG0" \n\t" \
262  "paddsw "XMMS", %%xmm3 \n\t" \
263  MOV_32_ONLY" (%0), "TAN1" \n\t" \
264  "psraw $6, %%xmm5 \n\t" \
265  "psraw $6, "REG0" \n\t" \
266  "psraw $6, "TAN3" \n\t" \
267  "psraw $6, %%xmm3 \n\t" \
268  "movdqa "TAN3", 1*16("dct") \n\t" \
269  "movdqa %%xmm3, 2*16("dct") \n\t" \
270  "movdqa "REG0", 5*16("dct") \n\t" \
271  "movdqa %%xmm5, 6*16("dct") \n\t" \
272  "movdqa %%xmm7, %%xmm0 \n\t" \
273  "movdqa "REG4", %%xmm4 \n\t" \
274  "psubsw %%xmm1, %%xmm7 \n\t" \
275  "psubsw "TAN1", "REG4" \n\t" \
276  "paddsw %%xmm0, %%xmm1 \n\t" \
277  "paddsw %%xmm4, "TAN1" \n\t" \
278  "psraw $6, %%xmm1 \n\t" \
279  "psraw $6, %%xmm7 \n\t" \
280  "psraw $6, "TAN1" \n\t" \
281  "psraw $6, "REG4" \n\t" \
282  "movdqa %%xmm1, ("dct") \n\t" \
283  "movdqa "TAN1", 3*16("dct") \n\t" \
284  "movdqa "REG4", 4*16("dct") \n\t" \
285  "movdqa %%xmm7, 7*16("dct") \n\t"
286 
288 #define iLLM_PASS_SPARSE(dct) \
289  "pmulhw %%xmm4, "TAN3" \n\t" \
290  "paddsw %%xmm4, "TAN3" \n\t" \
291  "movdqa %%xmm6, %%xmm3 \n\t" \
292  "pmulhw %%xmm6, "TAN1" \n\t" \
293  "movdqa %%xmm4, %%xmm1 \n\t" \
294  "psubsw %%xmm1, %%xmm3 \n\t" \
295  "paddsw %%xmm6, %%xmm1 \n\t" \
296  "movdqa "TAN1", %%xmm6 \n\t" \
297  "psubsw "TAN3", "TAN1" \n\t" \
298  "paddsw %%xmm6, "TAN3" \n\t" \
299  "movdqa %%xmm3, %%xmm6 \n\t" \
300  "psubsw "TAN3", %%xmm3 \n\t" \
301  "paddsw %%xmm6, "TAN3" \n\t" \
302  "movdqa "MANGLE(sqrt2)", %%xmm4 \n\t" \
303  "pmulhw %%xmm4, %%xmm3 \n\t" \
304  "pmulhw %%xmm4, "TAN3" \n\t" \
305  "paddsw "TAN3", "TAN3" \n\t" \
306  "paddsw %%xmm3, %%xmm3 \n\t" \
307  "movdqa "MANGLE(tan2)", %%xmm5 \n\t" \
308  MOV_32_ONLY ROW2", "SREG2" \n\t" \
309  "pmulhw "SREG2", %%xmm5 \n\t" \
310  MOV_32_ONLY ROW0", "REG0" \n\t" \
311  "movdqa "REG0", %%xmm6 \n\t" \
312  "psubsw "SREG2", %%xmm6 \n\t" \
313  "paddsw "REG0", "SREG2" \n\t" \
314  MOV_32_ONLY" "TAN1", (%0) \n\t" \
315  "movdqa "REG0", "XMMS" \n\t" \
316  "psubsw %%xmm5, "REG0" \n\t" \
317  "paddsw "XMMS", %%xmm5 \n\t" \
318  "movdqa %%xmm5, "XMMS" \n\t" \
319  "psubsw "TAN3", %%xmm5 \n\t" \
320  "paddsw "XMMS", "TAN3" \n\t" \
321  "movdqa "REG0", "XMMS" \n\t" \
322  "psubsw %%xmm3, "REG0" \n\t" \
323  "paddsw "XMMS", %%xmm3 \n\t" \
324  MOV_32_ONLY" (%0), "TAN1" \n\t" \
325  "psraw $6, %%xmm5 \n\t" \
326  "psraw $6, "REG0" \n\t" \
327  "psraw $6, "TAN3" \n\t" \
328  "psraw $6, %%xmm3 \n\t" \
329  "movdqa "TAN3", 1*16("dct") \n\t" \
330  "movdqa %%xmm3, 2*16("dct") \n\t" \
331  "movdqa "REG0", 5*16("dct") \n\t" \
332  "movdqa %%xmm5, 6*16("dct") \n\t" \
333  "movdqa "SREG2", %%xmm0 \n\t" \
334  "movdqa %%xmm6, %%xmm4 \n\t" \
335  "psubsw %%xmm1, "SREG2" \n\t" \
336  "psubsw "TAN1", %%xmm6 \n\t" \
337  "paddsw %%xmm0, %%xmm1 \n\t" \
338  "paddsw %%xmm4, "TAN1" \n\t" \
339  "psraw $6, %%xmm1 \n\t" \
340  "psraw $6, "SREG2" \n\t" \
341  "psraw $6, "TAN1" \n\t" \
342  "psraw $6, %%xmm6 \n\t" \
343  "movdqa %%xmm1, ("dct") \n\t" \
344  "movdqa "TAN1", 3*16("dct") \n\t" \
345  "movdqa %%xmm6, 4*16("dct") \n\t" \
346  "movdqa "SREG2", 7*16("dct") \n\t"
347 
348 inline void ff_idct_xvid_sse2(short *block)
349 {
350  __asm__ volatile(
351  "movq "MANGLE(m127)", %%mm0 \n\t"
352  iMTX_MULT("(%0)", MANGLE(iTab1), ROUND(walkenIdctRounders), PUT_EVEN(ROW0))
353  iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+1*16), PUT_ODD(ROW1))
354  iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+2*16), PUT_EVEN(ROW2))
355 
356  TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
357  JZ("%%eax", "1f")
358  iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+3*16), PUT_ODD(ROW3))
359 
360  TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
361  TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
362  iLLM_HEAD
363  ".p2align 4 \n\t"
364  JNZ("%%ecx", "2f")
365  JNZ("%%eax", "3f")
366  JNZ("%%edx", "4f")
367  JNZ("%%esi", "5f")
368  iLLM_PASS_SPARSE("%0")
369  "jmp 6f \n\t"
370  "2: \n\t"
371  iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
372  "3: \n\t"
373  iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders+4*16), PUT_ODD(ROW5))
374  JZ("%%edx", "1f")
375  "4: \n\t"
376  iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders+5*16), PUT_EVEN(ROW6))
377  JZ("%%esi", "1f")
378  "5: \n\t"
379  iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders+5*16), PUT_ODD(ROW7))
380 #if !ARCH_X86_64
381  iLLM_HEAD
382 #endif
383  iLLM_PASS("%0")
384  "6: \n\t"
385  : "+r"(block)
386  :
387  : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
388  "%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" ,)
389 #if ARCH_X86_64
390  XMM_CLOBBERS("%xmm8" , "%xmm9" , "%xmm10", "%xmm11",
391  "%xmm12", "%xmm13", "%xmm14",)
392 #endif
393  "%eax", "%ecx", "%edx", "%esi", "memory"
394  );
395 }
396 
397 void ff_idct_xvid_sse2_put(uint8_t *dest, int line_size, short *block)
398 {
399  ff_idct_xvid_sse2(block);
400  ff_put_pixels_clamped_mmx(block, dest, line_size);
401 }
402 
403 void ff_idct_xvid_sse2_add(uint8_t *dest, int line_size, short *block)
404 {
405  ff_idct_xvid_sse2(block);
406  ff_add_pixels_clamped_mmx(block, dest, line_size);
407 }
408 
409 #endif /* HAVE_INLINE_ASM */