utils.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config.h"
22 
23 #define _SVID_SOURCE // needed for MAP_ANONYMOUS
24 #include <assert.h>
25 #include <inttypes.h>
26 #include <math.h>
27 #include <stdio.h>
28 #include <string.h>
29 #if HAVE_SYS_MMAN_H
30 #include <sys/mman.h>
31 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
32 #define MAP_ANONYMOUS MAP_ANON
33 #endif
34 #endif
35 #if HAVE_VIRTUALALLOC
36 #define WIN32_LEAN_AND_MEAN
37 #include <windows.h>
38 #endif
39 
40 #include "libavutil/attributes.h"
41 #include "libavutil/avutil.h"
42 #include "libavutil/bswap.h"
43 #include "libavutil/cpu.h"
44 #include "libavutil/intreadwrite.h"
45 #include "libavutil/mathematics.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/pixdesc.h"
48 #include "libavutil/x86/asm.h"
49 #include "libavutil/x86/cpu.h"
50 #include "rgb2rgb.h"
51 #include "swscale.h"
52 #include "swscale_internal.h"
53 
54 unsigned swscale_version(void)
55 {
57 }
58 
59 const char *swscale_configuration(void)
60 {
61  return LIBAV_CONFIGURATION;
62 }
63 
64 const char *swscale_license(void)
65 {
66 #define LICENSE_PREFIX "libswscale license: "
67  return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
68 }
69 
70 #define RET 0xC3 // near return opcode for x86
71 
72 typedef struct FormatEntry {
74 } FormatEntry;
75 
77  [AV_PIX_FMT_YUV420P] = { 1, 1 },
78  [AV_PIX_FMT_YUYV422] = { 1, 1 },
79  [AV_PIX_FMT_RGB24] = { 1, 1 },
80  [AV_PIX_FMT_BGR24] = { 1, 1 },
81  [AV_PIX_FMT_YUV422P] = { 1, 1 },
82  [AV_PIX_FMT_YUV444P] = { 1, 1 },
83  [AV_PIX_FMT_YUV410P] = { 1, 1 },
84  [AV_PIX_FMT_YUV411P] = { 1, 1 },
85  [AV_PIX_FMT_GRAY8] = { 1, 1 },
86  [AV_PIX_FMT_MONOWHITE] = { 1, 1 },
87  [AV_PIX_FMT_MONOBLACK] = { 1, 1 },
88  [AV_PIX_FMT_PAL8] = { 1, 0 },
89  [AV_PIX_FMT_YUVJ420P] = { 1, 1 },
90  [AV_PIX_FMT_YUVJ422P] = { 1, 1 },
91  [AV_PIX_FMT_YUVJ444P] = { 1, 1 },
92  [AV_PIX_FMT_UYVY422] = { 1, 1 },
93  [AV_PIX_FMT_UYYVYY411] = { 0, 0 },
94  [AV_PIX_FMT_BGR8] = { 1, 1 },
95  [AV_PIX_FMT_BGR4] = { 0, 1 },
96  [AV_PIX_FMT_BGR4_BYTE] = { 1, 1 },
97  [AV_PIX_FMT_RGB8] = { 1, 1 },
98  [AV_PIX_FMT_RGB4] = { 0, 1 },
99  [AV_PIX_FMT_RGB4_BYTE] = { 1, 1 },
100  [AV_PIX_FMT_NV12] = { 1, 1 },
101  [AV_PIX_FMT_NV21] = { 1, 1 },
102  [AV_PIX_FMT_ARGB] = { 1, 1 },
103  [AV_PIX_FMT_RGBA] = { 1, 1 },
104  [AV_PIX_FMT_ABGR] = { 1, 1 },
105  [AV_PIX_FMT_BGRA] = { 1, 1 },
106  [AV_PIX_FMT_GRAY16BE] = { 1, 1 },
107  [AV_PIX_FMT_GRAY16LE] = { 1, 1 },
108  [AV_PIX_FMT_YUV440P] = { 1, 1 },
109  [AV_PIX_FMT_YUVJ440P] = { 1, 1 },
110  [AV_PIX_FMT_YUVA420P] = { 1, 1 },
111  [AV_PIX_FMT_YUVA422P] = { 1, 1 },
112  [AV_PIX_FMT_YUVA444P] = { 1, 1 },
113  [AV_PIX_FMT_YUVA420P9BE] = { 1, 1 },
114  [AV_PIX_FMT_YUVA420P9LE] = { 1, 1 },
115  [AV_PIX_FMT_YUVA422P9BE] = { 1, 1 },
116  [AV_PIX_FMT_YUVA422P9LE] = { 1, 1 },
117  [AV_PIX_FMT_YUVA444P9BE] = { 1, 1 },
118  [AV_PIX_FMT_YUVA444P9LE] = { 1, 1 },
119  [AV_PIX_FMT_YUVA420P10BE]= { 1, 1 },
120  [AV_PIX_FMT_YUVA420P10LE]= { 1, 1 },
121  [AV_PIX_FMT_YUVA422P10BE]= { 1, 1 },
122  [AV_PIX_FMT_YUVA422P10LE]= { 1, 1 },
123  [AV_PIX_FMT_YUVA444P10BE]= { 1, 1 },
124  [AV_PIX_FMT_YUVA444P10LE]= { 1, 1 },
125  [AV_PIX_FMT_YUVA420P16BE]= { 1, 1 },
126  [AV_PIX_FMT_YUVA420P16LE]= { 1, 1 },
127  [AV_PIX_FMT_YUVA422P16BE]= { 1, 1 },
128  [AV_PIX_FMT_YUVA422P16LE]= { 1, 1 },
129  [AV_PIX_FMT_YUVA444P16BE]= { 1, 1 },
130  [AV_PIX_FMT_YUVA444P16LE]= { 1, 1 },
131  [AV_PIX_FMT_RGB48BE] = { 1, 1 },
132  [AV_PIX_FMT_RGB48LE] = { 1, 1 },
133  [AV_PIX_FMT_RGB565BE] = { 1, 1 },
134  [AV_PIX_FMT_RGB565LE] = { 1, 1 },
135  [AV_PIX_FMT_RGB555BE] = { 1, 1 },
136  [AV_PIX_FMT_RGB555LE] = { 1, 1 },
137  [AV_PIX_FMT_BGR565BE] = { 1, 1 },
138  [AV_PIX_FMT_BGR565LE] = { 1, 1 },
139  [AV_PIX_FMT_BGR555BE] = { 1, 1 },
140  [AV_PIX_FMT_BGR555LE] = { 1, 1 },
141  [AV_PIX_FMT_YUV420P16LE] = { 1, 1 },
142  [AV_PIX_FMT_YUV420P16BE] = { 1, 1 },
143  [AV_PIX_FMT_YUV422P16LE] = { 1, 1 },
144  [AV_PIX_FMT_YUV422P16BE] = { 1, 1 },
145  [AV_PIX_FMT_YUV444P16LE] = { 1, 1 },
146  [AV_PIX_FMT_YUV444P16BE] = { 1, 1 },
147  [AV_PIX_FMT_RGB444LE] = { 1, 1 },
148  [AV_PIX_FMT_RGB444BE] = { 1, 1 },
149  [AV_PIX_FMT_BGR444LE] = { 1, 1 },
150  [AV_PIX_FMT_BGR444BE] = { 1, 1 },
151  [AV_PIX_FMT_Y400A] = { 1, 0 },
152  [AV_PIX_FMT_BGR48BE] = { 1, 1 },
153  [AV_PIX_FMT_BGR48LE] = { 1, 1 },
154  [AV_PIX_FMT_YUV420P9BE] = { 1, 1 },
155  [AV_PIX_FMT_YUV420P9LE] = { 1, 1 },
156  [AV_PIX_FMT_YUV420P10BE] = { 1, 1 },
157  [AV_PIX_FMT_YUV420P10LE] = { 1, 1 },
158  [AV_PIX_FMT_YUV422P9BE] = { 1, 1 },
159  [AV_PIX_FMT_YUV422P9LE] = { 1, 1 },
160  [AV_PIX_FMT_YUV422P10BE] = { 1, 1 },
161  [AV_PIX_FMT_YUV422P10LE] = { 1, 1 },
162  [AV_PIX_FMT_YUV444P9BE] = { 1, 1 },
163  [AV_PIX_FMT_YUV444P9LE] = { 1, 1 },
164  [AV_PIX_FMT_YUV444P10BE] = { 1, 1 },
165  [AV_PIX_FMT_YUV444P10LE] = { 1, 1 },
166  [AV_PIX_FMT_GBRP] = { 1, 0 },
167  [AV_PIX_FMT_GBRP9LE] = { 1, 0 },
168  [AV_PIX_FMT_GBRP9BE] = { 1, 0 },
169  [AV_PIX_FMT_GBRP10LE] = { 1, 0 },
170  [AV_PIX_FMT_GBRP10BE] = { 1, 0 },
171  [AV_PIX_FMT_GBRP16LE] = { 1, 0 },
172  [AV_PIX_FMT_GBRP16BE] = { 1, 0 },
173 };
174 
176 {
177  return (unsigned)pix_fmt < AV_PIX_FMT_NB ?
178  format_entries[pix_fmt].is_supported_in : 0;
179 }
180 
182 {
183  return (unsigned)pix_fmt < AV_PIX_FMT_NB ?
184  format_entries[pix_fmt].is_supported_out : 0;
185 }
186 
187 extern const int32_t ff_yuv2rgb_coeffs[8][4];
188 
189 const char *sws_format_name(enum AVPixelFormat format)
190 {
191  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
192  if (desc)
193  return desc->name;
194  else
195  return "Unknown format";
196 }
197 
198 static double getSplineCoeff(double a, double b, double c, double d,
199  double dist)
200 {
201  if (dist <= 1.0)
202  return ((d * dist + c) * dist + b) * dist + a;
203  else
204  return getSplineCoeff(0.0,
205  b + 2.0 * c + 3.0 * d,
206  c + 3.0 * d,
207  -b - 3.0 * c - 6.0 * d,
208  dist - 1.0);
209 }
210 
211 static int initFilter(int16_t **outFilter, int32_t **filterPos,
212  int *outFilterSize, int xInc, int srcW, int dstW,
213  int filterAlign, int one, int flags, int cpu_flags,
214  SwsVector *srcFilter, SwsVector *dstFilter,
215  double param[2], int is_horizontal)
216 {
217  int i;
218  int filterSize;
219  int filter2Size;
220  int minFilterSize;
221  int64_t *filter = NULL;
222  int64_t *filter2 = NULL;
223  const int64_t fone = 1LL << 54;
224  int ret = -1;
225 
226  emms_c(); // FIXME should not be required but IS (even for non-MMX versions)
227 
228  // NOTE: the +3 is for the MMX(+1) / SSE(+3) scaler which reads over the end
229  FF_ALLOC_OR_GOTO(NULL, *filterPos, (dstW + 3) * sizeof(**filterPos), fail);
230 
231  if (FFABS(xInc - 0x10000) < 10) { // unscaled
232  int i;
233  filterSize = 1;
234  FF_ALLOCZ_OR_GOTO(NULL, filter,
235  dstW * sizeof(*filter) * filterSize, fail);
236 
237  for (i = 0; i < dstW; i++) {
238  filter[i * filterSize] = fone;
239  (*filterPos)[i] = i;
240  }
241  } else if (flags & SWS_POINT) { // lame looking point sampling mode
242  int i;
243  int xDstInSrc;
244  filterSize = 1;
245  FF_ALLOC_OR_GOTO(NULL, filter,
246  dstW * sizeof(*filter) * filterSize, fail);
247 
248  xDstInSrc = xInc / 2 - 0x8000;
249  for (i = 0; i < dstW; i++) {
250  int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
251 
252  (*filterPos)[i] = xx;
253  filter[i] = fone;
254  xDstInSrc += xInc;
255  }
256  } else if ((xInc <= (1 << 16) && (flags & SWS_AREA)) ||
257  (flags & SWS_FAST_BILINEAR)) { // bilinear upscale
258  int i;
259  int xDstInSrc;
260  filterSize = 2;
261  FF_ALLOC_OR_GOTO(NULL, filter,
262  dstW * sizeof(*filter) * filterSize, fail);
263 
264  xDstInSrc = xInc / 2 - 0x8000;
265  for (i = 0; i < dstW; i++) {
266  int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16;
267  int j;
268 
269  (*filterPos)[i] = xx;
270  // bilinear upscale / linear interpolate / area averaging
271  for (j = 0; j < filterSize; j++) {
272  int64_t coeff = fone - FFABS((xx << 16) - xDstInSrc) *
273  (fone >> 16);
274  if (coeff < 0)
275  coeff = 0;
276  filter[i * filterSize + j] = coeff;
277  xx++;
278  }
279  xDstInSrc += xInc;
280  }
281  } else {
282  int64_t xDstInSrc;
283  int sizeFactor;
284 
285  if (flags & SWS_BICUBIC)
286  sizeFactor = 4;
287  else if (flags & SWS_X)
288  sizeFactor = 8;
289  else if (flags & SWS_AREA)
290  sizeFactor = 1; // downscale only, for upscale it is bilinear
291  else if (flags & SWS_GAUSS)
292  sizeFactor = 8; // infinite ;)
293  else if (flags & SWS_LANCZOS)
294  sizeFactor = param[0] != SWS_PARAM_DEFAULT ? ceil(2 * param[0]) : 6;
295  else if (flags & SWS_SINC)
296  sizeFactor = 20; // infinite ;)
297  else if (flags & SWS_SPLINE)
298  sizeFactor = 20; // infinite ;)
299  else if (flags & SWS_BILINEAR)
300  sizeFactor = 2;
301  else {
302  sizeFactor = 0; // GCC warning killer
303  assert(0);
304  }
305 
306  if (xInc <= 1 << 16)
307  filterSize = 1 + sizeFactor; // upscale
308  else
309  filterSize = 1 + (sizeFactor * srcW + dstW - 1) / dstW;
310 
311  filterSize = FFMIN(filterSize, srcW - 2);
312  filterSize = FFMAX(filterSize, 1);
313 
314  FF_ALLOC_OR_GOTO(NULL, filter,
315  dstW * sizeof(*filter) * filterSize, fail);
316 
317  xDstInSrc = xInc - 0x10000;
318  for (i = 0; i < dstW; i++) {
319  int xx = (xDstInSrc - ((int64_t)(filterSize - 2) << 16)) / (1 << 17);
320  int j;
321  (*filterPos)[i] = xx;
322  for (j = 0; j < filterSize; j++) {
323  int64_t d = (FFABS(((int64_t)xx << 17) - xDstInSrc)) << 13;
324  double floatd;
325  int64_t coeff;
326 
327  if (xInc > 1 << 16)
328  d = d * dstW / srcW;
329  floatd = d * (1.0 / (1 << 30));
330 
331  if (flags & SWS_BICUBIC) {
332  int64_t B = (param[0] != SWS_PARAM_DEFAULT ? param[0] : 0) * (1 << 24);
333  int64_t C = (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1 << 24);
334 
335  if (d >= 1LL << 31) {
336  coeff = 0.0;
337  } else {
338  int64_t dd = (d * d) >> 30;
339  int64_t ddd = (dd * d) >> 30;
340 
341  if (d < 1LL << 30)
342  coeff = (12 * (1 << 24) - 9 * B - 6 * C) * ddd +
343  (-18 * (1 << 24) + 12 * B + 6 * C) * dd +
344  (6 * (1 << 24) - 2 * B) * (1 << 30);
345  else
346  coeff = (-B - 6 * C) * ddd +
347  (6 * B + 30 * C) * dd +
348  (-12 * B - 48 * C) * d +
349  (8 * B + 24 * C) * (1 << 30);
350  }
351  coeff *= fone >> (30 + 24);
352  }
353 #if 0
354  else if (flags & SWS_X) {
355  double p = param ? param * 0.01 : 0.3;
356  coeff = d ? sin(d * M_PI) / (d * M_PI) : 1.0;
357  coeff *= pow(2.0, -p * d * d);
358  }
359 #endif
360  else if (flags & SWS_X) {
361  double A = param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
362  double c;
363 
364  if (floatd < 1.0)
365  c = cos(floatd * M_PI);
366  else
367  c = -1.0;
368  if (c < 0.0)
369  c = -pow(-c, A);
370  else
371  c = pow(c, A);
372  coeff = (c * 0.5 + 0.5) * fone;
373  } else if (flags & SWS_AREA) {
374  int64_t d2 = d - (1 << 29);
375  if (d2 * xInc < -(1LL << (29 + 16)))
376  coeff = 1.0 * (1LL << (30 + 16));
377  else if (d2 * xInc < (1LL << (29 + 16)))
378  coeff = -d2 * xInc + (1LL << (29 + 16));
379  else
380  coeff = 0.0;
381  coeff *= fone >> (30 + 16);
382  } else if (flags & SWS_GAUSS) {
383  double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
384  coeff = (pow(2.0, -p * floatd * floatd)) * fone;
385  } else if (flags & SWS_SINC) {
386  coeff = (d ? sin(floatd * M_PI) / (floatd * M_PI) : 1.0) * fone;
387  } else if (flags & SWS_LANCZOS) {
388  double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
389  coeff = (d ? sin(floatd * M_PI) * sin(floatd * M_PI / p) /
390  (floatd * floatd * M_PI * M_PI / p) : 1.0) * fone;
391  if (floatd > p)
392  coeff = 0;
393  } else if (flags & SWS_BILINEAR) {
394  coeff = (1 << 30) - d;
395  if (coeff < 0)
396  coeff = 0;
397  coeff *= fone >> 30;
398  } else if (flags & SWS_SPLINE) {
399  double p = -2.196152422706632;
400  coeff = getSplineCoeff(1.0, 0.0, p, -p - 1.0, floatd) * fone;
401  } else {
402  coeff = 0.0; // GCC warning killer
403  assert(0);
404  }
405 
406  filter[i * filterSize + j] = coeff;
407  xx++;
408  }
409  xDstInSrc += 2 * xInc;
410  }
411  }
412 
413  /* apply src & dst Filter to filter -> filter2
414  * av_free(filter);
415  */
416  assert(filterSize > 0);
417  filter2Size = filterSize;
418  if (srcFilter)
419  filter2Size += srcFilter->length - 1;
420  if (dstFilter)
421  filter2Size += dstFilter->length - 1;
422  assert(filter2Size > 0);
423  FF_ALLOCZ_OR_GOTO(NULL, filter2, filter2Size * dstW * sizeof(*filter2), fail);
424 
425  for (i = 0; i < dstW; i++) {
426  int j, k;
427 
428  if (srcFilter) {
429  for (k = 0; k < srcFilter->length; k++) {
430  for (j = 0; j < filterSize; j++)
431  filter2[i * filter2Size + k + j] +=
432  srcFilter->coeff[k] * filter[i * filterSize + j];
433  }
434  } else {
435  for (j = 0; j < filterSize; j++)
436  filter2[i * filter2Size + j] = filter[i * filterSize + j];
437  }
438  // FIXME dstFilter
439 
440  (*filterPos)[i] += (filterSize - 1) / 2 - (filter2Size - 1) / 2;
441  }
442  av_freep(&filter);
443 
444  /* try to reduce the filter-size (step1 find size and shift left) */
445  // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
446  minFilterSize = 0;
447  for (i = dstW - 1; i >= 0; i--) {
448  int min = filter2Size;
449  int j;
450  int64_t cutOff = 0.0;
451 
452  /* get rid of near zero elements on the left by shifting left */
453  for (j = 0; j < filter2Size; j++) {
454  int k;
455  cutOff += FFABS(filter2[i * filter2Size]);
456 
457  if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone)
458  break;
459 
460  /* preserve monotonicity because the core can't handle the
461  * filter otherwise */
462  if (i < dstW - 1 && (*filterPos)[i] >= (*filterPos)[i + 1])
463  break;
464 
465  // move filter coefficients left
466  for (k = 1; k < filter2Size; k++)
467  filter2[i * filter2Size + k - 1] = filter2[i * filter2Size + k];
468  filter2[i * filter2Size + k - 1] = 0;
469  (*filterPos)[i]++;
470  }
471 
472  cutOff = 0;
473  /* count near zeros on the right */
474  for (j = filter2Size - 1; j > 0; j--) {
475  cutOff += FFABS(filter2[i * filter2Size + j]);
476 
477  if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone)
478  break;
479  min--;
480  }
481 
482  if (min > minFilterSize)
483  minFilterSize = min;
484  }
485 
486  if (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) {
487  // we can handle the special case 4, so we don't want to go the full 8
488  if (minFilterSize < 5)
489  filterAlign = 4;
490 
491  /* We really don't want to waste our time doing useless computation, so
492  * fall back on the scalar C code for very small filters.
493  * Vectorizing is worth it only if you have a decent-sized vector. */
494  if (minFilterSize < 3)
495  filterAlign = 1;
496  }
497 
498  if (INLINE_MMX(cpu_flags)) {
499  // special case for unscaled vertical filtering
500  if (minFilterSize == 1 && filterAlign == 2)
501  filterAlign = 1;
502  }
503 
504  assert(minFilterSize > 0);
505  filterSize = (minFilterSize + (filterAlign - 1)) & (~(filterAlign - 1));
506  assert(filterSize > 0);
507  filter = av_malloc(filterSize * dstW * sizeof(*filter));
508  if (filterSize >= MAX_FILTER_SIZE * 16 /
509  ((flags & SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter)
510  goto fail;
511  *outFilterSize = filterSize;
512 
513  if (flags & SWS_PRINT_INFO)
515  "SwScaler: reducing / aligning filtersize %d -> %d\n",
516  filter2Size, filterSize);
517  /* try to reduce the filter-size (step2 reduce it) */
518  for (i = 0; i < dstW; i++) {
519  int j;
520 
521  for (j = 0; j < filterSize; j++) {
522  if (j >= filter2Size)
523  filter[i * filterSize + j] = 0;
524  else
525  filter[i * filterSize + j] = filter2[i * filter2Size + j];
526  if ((flags & SWS_BITEXACT) && j >= minFilterSize)
527  filter[i * filterSize + j] = 0;
528  }
529  }
530 
531  // FIXME try to align filterPos if possible
532 
533  // fix borders
534  if (is_horizontal) {
535  for (i = 0; i < dstW; i++) {
536  int j;
537  if ((*filterPos)[i] < 0) {
538  // move filter coefficients left to compensate for filterPos
539  for (j = 1; j < filterSize; j++) {
540  int left = FFMAX(j + (*filterPos)[i], 0);
541  filter[i * filterSize + left] += filter[i * filterSize + j];
542  filter[i * filterSize + j] = 0;
543  }
544  (*filterPos)[i] = 0;
545  }
546 
547  if ((*filterPos)[i] + filterSize > srcW) {
548  int shift = (*filterPos)[i] + filterSize - srcW;
549  // move filter coefficients right to compensate for filterPos
550  for (j = filterSize - 2; j >= 0; j--) {
551  int right = FFMIN(j + shift, filterSize - 1);
552  filter[i * filterSize + right] += filter[i * filterSize + j];
553  filter[i * filterSize + j] = 0;
554  }
555  (*filterPos)[i] = srcW - filterSize;
556  }
557  }
558  }
559 
560  // Note the +1 is for the MMX scaler which reads over the end
561  /* align at 16 for AltiVec (needed by hScale_altivec_real) */
562  FF_ALLOCZ_OR_GOTO(NULL, *outFilter,
563  *outFilterSize * (dstW + 3) * sizeof(int16_t), fail);
564 
565  /* normalize & store in outFilter */
566  for (i = 0; i < dstW; i++) {
567  int j;
568  int64_t error = 0;
569  int64_t sum = 0;
570 
571  for (j = 0; j < filterSize; j++) {
572  sum += filter[i * filterSize + j];
573  }
574  sum = (sum + one / 2) / one;
575  for (j = 0; j < *outFilterSize; j++) {
576  int64_t v = filter[i * filterSize + j] + error;
577  int intV = ROUNDED_DIV(v, sum);
578  (*outFilter)[i * (*outFilterSize) + j] = intV;
579  error = v - intV * sum;
580  }
581  }
582 
583  (*filterPos)[dstW + 0] =
584  (*filterPos)[dstW + 1] =
585  (*filterPos)[dstW + 2] = (*filterPos)[dstW - 1]; /* the MMX/SSE scaler will
586  * read over the end */
587  for (i = 0; i < *outFilterSize; i++) {
588  int k = (dstW - 1) * (*outFilterSize) + i;
589  (*outFilter)[k + 1 * (*outFilterSize)] =
590  (*outFilter)[k + 2 * (*outFilterSize)] =
591  (*outFilter)[k + 3 * (*outFilterSize)] = (*outFilter)[k];
592  }
593 
594  ret = 0;
595 
596 fail:
597  av_free(filter);
598  av_free(filter2);
599  return ret;
600 }
601 
602 #if HAVE_MMXEXT_INLINE
603 static int init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode,
604  int16_t *filter, int32_t *filterPos,
605  int numSplits)
606 {
607  uint8_t *fragmentA;
608  x86_reg imm8OfPShufW1A;
609  x86_reg imm8OfPShufW2A;
610  x86_reg fragmentLengthA;
611  uint8_t *fragmentB;
612  x86_reg imm8OfPShufW1B;
613  x86_reg imm8OfPShufW2B;
614  x86_reg fragmentLengthB;
615  int fragmentPos;
616 
617  int xpos, i;
618 
619  // create an optimized horizontal scaling routine
620  /* This scaler is made of runtime-generated MMXEXT code using specially tuned
621  * pshufw instructions. For every four output pixels, if four input pixels
622  * are enough for the fast bilinear scaling, then a chunk of fragmentB is
623  * used. If five input pixels are needed, then a chunk of fragmentA is used.
624  */
625 
626  // code fragment
627 
628  __asm__ volatile (
629  "jmp 9f \n\t"
630  // Begin
631  "0: \n\t"
632  "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
633  "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
634  "movd 1(%%"REG_c", %%"REG_S"), %%mm1 \n\t"
635  "punpcklbw %%mm7, %%mm1 \n\t"
636  "punpcklbw %%mm7, %%mm0 \n\t"
637  "pshufw $0xFF, %%mm1, %%mm1 \n\t"
638  "1: \n\t"
639  "pshufw $0xFF, %%mm0, %%mm0 \n\t"
640  "2: \n\t"
641  "psubw %%mm1, %%mm0 \n\t"
642  "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t"
643  "pmullw %%mm3, %%mm0 \n\t"
644  "psllw $7, %%mm1 \n\t"
645  "paddw %%mm1, %%mm0 \n\t"
646 
647  "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t"
648 
649  "add $8, %%"REG_a" \n\t"
650  // End
651  "9: \n\t"
652  // "int $3 \n\t"
653  "lea " LOCAL_MANGLE(0b) ", %0 \n\t"
654  "lea " LOCAL_MANGLE(1b) ", %1 \n\t"
655  "lea " LOCAL_MANGLE(2b) ", %2 \n\t"
656  "dec %1 \n\t"
657  "dec %2 \n\t"
658  "sub %0, %1 \n\t"
659  "sub %0, %2 \n\t"
660  "lea " LOCAL_MANGLE(9b) ", %3 \n\t"
661  "sub %0, %3 \n\t"
662 
663 
664  : "=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
665  "=r" (fragmentLengthA)
666  );
667 
668  __asm__ volatile (
669  "jmp 9f \n\t"
670  // Begin
671  "0: \n\t"
672  "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
673  "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
674  "punpcklbw %%mm7, %%mm0 \n\t"
675  "pshufw $0xFF, %%mm0, %%mm1 \n\t"
676  "1: \n\t"
677  "pshufw $0xFF, %%mm0, %%mm0 \n\t"
678  "2: \n\t"
679  "psubw %%mm1, %%mm0 \n\t"
680  "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t"
681  "pmullw %%mm3, %%mm0 \n\t"
682  "psllw $7, %%mm1 \n\t"
683  "paddw %%mm1, %%mm0 \n\t"
684 
685  "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t"
686 
687  "add $8, %%"REG_a" \n\t"
688  // End
689  "9: \n\t"
690  // "int $3 \n\t"
691  "lea " LOCAL_MANGLE(0b) ", %0 \n\t"
692  "lea " LOCAL_MANGLE(1b) ", %1 \n\t"
693  "lea " LOCAL_MANGLE(2b) ", %2 \n\t"
694  "dec %1 \n\t"
695  "dec %2 \n\t"
696  "sub %0, %1 \n\t"
697  "sub %0, %2 \n\t"
698  "lea " LOCAL_MANGLE(9b) ", %3 \n\t"
699  "sub %0, %3 \n\t"
700 
701 
702  : "=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
703  "=r" (fragmentLengthB)
704  );
705 
706  xpos = 0; // lumXInc/2 - 0x8000; // difference between pixel centers
707  fragmentPos = 0;
708 
709  for (i = 0; i < dstW / numSplits; i++) {
710  int xx = xpos >> 16;
711 
712  if ((i & 3) == 0) {
713  int a = 0;
714  int b = ((xpos + xInc) >> 16) - xx;
715  int c = ((xpos + xInc * 2) >> 16) - xx;
716  int d = ((xpos + xInc * 3) >> 16) - xx;
717  int inc = (d + 1 < 4);
718  uint8_t *fragment = (d + 1 < 4) ? fragmentB : fragmentA;
719  x86_reg imm8OfPShufW1 = (d + 1 < 4) ? imm8OfPShufW1B : imm8OfPShufW1A;
720  x86_reg imm8OfPShufW2 = (d + 1 < 4) ? imm8OfPShufW2B : imm8OfPShufW2A;
721  x86_reg fragmentLength = (d + 1 < 4) ? fragmentLengthB : fragmentLengthA;
722  int maxShift = 3 - (d + inc);
723  int shift = 0;
724 
725  if (filterCode) {
726  filter[i] = ((xpos & 0xFFFF) ^ 0xFFFF) >> 9;
727  filter[i + 1] = (((xpos + xInc) & 0xFFFF) ^ 0xFFFF) >> 9;
728  filter[i + 2] = (((xpos + xInc * 2) & 0xFFFF) ^ 0xFFFF) >> 9;
729  filter[i + 3] = (((xpos + xInc * 3) & 0xFFFF) ^ 0xFFFF) >> 9;
730  filterPos[i / 2] = xx;
731 
732  memcpy(filterCode + fragmentPos, fragment, fragmentLength);
733 
734  filterCode[fragmentPos + imm8OfPShufW1] = (a + inc) |
735  ((b + inc) << 2) |
736  ((c + inc) << 4) |
737  ((d + inc) << 6);
738  filterCode[fragmentPos + imm8OfPShufW2] = a | (b << 2) |
739  (c << 4) |
740  (d << 6);
741 
742  if (i + 4 - inc >= dstW)
743  shift = maxShift; // avoid overread
744  else if ((filterPos[i / 2] & 3) <= maxShift)
745  shift = filterPos[i / 2] & 3; // align
746 
747  if (shift && i >= shift) {
748  filterCode[fragmentPos + imm8OfPShufW1] += 0x55 * shift;
749  filterCode[fragmentPos + imm8OfPShufW2] += 0x55 * shift;
750  filterPos[i / 2] -= shift;
751  }
752  }
753 
754  fragmentPos += fragmentLength;
755 
756  if (filterCode)
757  filterCode[fragmentPos] = RET;
758  }
759  xpos += xInc;
760  }
761  if (filterCode)
762  filterPos[((i / 2) + 1) & (~1)] = xpos >> 16; // needed to jump to the next part
763 
764  return fragmentPos + 1;
765 }
766 #endif /* HAVE_MMXEXT_INLINE */
767 
768 static void getSubSampleFactors(int *h, int *v, enum AVPixelFormat format)
769 {
770  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
771  *h = desc->log2_chroma_w;
772  *v = desc->log2_chroma_h;
773 }
774 
775 int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
776  int srcRange, const int table[4], int dstRange,
777  int brightness, int contrast, int saturation)
778 {
779  const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(c->dstFormat);
780  const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(c->srcFormat);
781  memcpy(c->srcColorspaceTable, inv_table, sizeof(int) * 4);
782  memcpy(c->dstColorspaceTable, table, sizeof(int) * 4);
783 
784  c->brightness = brightness;
785  c->contrast = contrast;
786  c->saturation = saturation;
787  c->srcRange = srcRange;
788  c->dstRange = dstRange;
789  if (isYUV(c->dstFormat) || isGray(c->dstFormat))
790  return -1;
791 
792  c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
793  c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
794 
795  ff_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness,
796  contrast, saturation);
797  // FIXME factorize
798 
800  ff_yuv2rgb_init_tables_altivec(c, inv_table, brightness,
801  contrast, saturation);
802  return 0;
803 }
804 
805 int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
806  int *srcRange, int **table, int *dstRange,
807  int *brightness, int *contrast, int *saturation)
808 {
809  if (isYUV(c->dstFormat) || isGray(c->dstFormat))
810  return -1;
811 
812  *inv_table = c->srcColorspaceTable;
813  *table = c->dstColorspaceTable;
814  *srcRange = c->srcRange;
815  *dstRange = c->dstRange;
816  *brightness = c->brightness;
817  *contrast = c->contrast;
818  *saturation = c->saturation;
819 
820  return 0;
821 }
822 
823 static int handle_jpeg(enum AVPixelFormat *format)
824 {
825  switch (*format) {
826  case AV_PIX_FMT_YUVJ420P:
827  *format = AV_PIX_FMT_YUV420P;
828  return 1;
829  case AV_PIX_FMT_YUVJ422P:
830  *format = AV_PIX_FMT_YUV422P;
831  return 1;
832  case AV_PIX_FMT_YUVJ444P:
833  *format = AV_PIX_FMT_YUV444P;
834  return 1;
835  case AV_PIX_FMT_YUVJ440P:
836  *format = AV_PIX_FMT_YUV440P;
837  return 1;
838  default:
839  return 0;
840  }
841 }
842 
844 {
845  SwsContext *c = av_mallocz(sizeof(SwsContext));
846 
847  if (c) {
850  }
851 
852  return c;
853 }
854 
856  SwsFilter *dstFilter)
857 {
858  int i;
859  int usesVFilter, usesHFilter;
860  int unscaled;
861  SwsFilter dummyFilter = { NULL, NULL, NULL, NULL };
862  int srcW = c->srcW;
863  int srcH = c->srcH;
864  int dstW = c->dstW;
865  int dstH = c->dstH;
866  int dst_stride = FFALIGN(dstW * sizeof(int16_t) + 16, 16);
867  int dst_stride_px = dst_stride >> 1;
868  int flags, cpu_flags;
869  enum AVPixelFormat srcFormat = c->srcFormat;
870  enum AVPixelFormat dstFormat = c->dstFormat;
871  const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(srcFormat);
872  const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(dstFormat);
873 
874  cpu_flags = av_get_cpu_flags();
875  flags = c->flags;
876  emms_c();
877  if (!rgb15to16)
879 
880  unscaled = (srcW == dstW && srcH == dstH);
881 
882  if (!sws_isSupportedInput(srcFormat)) {
883  av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n",
884  sws_format_name(srcFormat));
885  return AVERROR(EINVAL);
886  }
887  if (!sws_isSupportedOutput(dstFormat)) {
888  av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n",
889  sws_format_name(dstFormat));
890  return AVERROR(EINVAL);
891  }
892 
893  i = flags & (SWS_POINT |
894  SWS_AREA |
895  SWS_BILINEAR |
897  SWS_BICUBIC |
898  SWS_X |
899  SWS_GAUSS |
900  SWS_LANCZOS |
901  SWS_SINC |
902  SWS_SPLINE |
903  SWS_BICUBLIN);
904  if (!i || (i & (i - 1))) {
905  av_log(c, AV_LOG_ERROR,
906  "Exactly one scaler algorithm must be chosen\n");
907  return AVERROR(EINVAL);
908  }
909  /* sanity check */
910  if (srcW < 4 || srcH < 1 || dstW < 8 || dstH < 1) {
911  /* FIXME check if these are enough and try to lower them after
912  * fixing the relevant parts of the code */
913  av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n",
914  srcW, srcH, dstW, dstH);
915  return AVERROR(EINVAL);
916  }
917 
918  if (!dstFilter)
919  dstFilter = &dummyFilter;
920  if (!srcFilter)
921  srcFilter = &dummyFilter;
922 
923  c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW;
924  c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH;
925  c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
926  c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
927  c->vRounder = 4 * 0x0001000100010001ULL;
928 
929  usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) ||
930  (srcFilter->chrV && srcFilter->chrV->length > 1) ||
931  (dstFilter->lumV && dstFilter->lumV->length > 1) ||
932  (dstFilter->chrV && dstFilter->chrV->length > 1);
933  usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) ||
934  (srcFilter->chrH && srcFilter->chrH->length > 1) ||
935  (dstFilter->lumH && dstFilter->lumH->length > 1) ||
936  (dstFilter->chrH && dstFilter->chrH->length > 1);
937 
940 
941  /* reuse chroma for 2 pixels RGB/BGR unless user wants full
942  * chroma interpolation */
943  if (flags & SWS_FULL_CHR_H_INT &&
944  isAnyRGB(dstFormat) &&
945  dstFormat != AV_PIX_FMT_RGBA &&
946  dstFormat != AV_PIX_FMT_ARGB &&
947  dstFormat != AV_PIX_FMT_BGRA &&
948  dstFormat != AV_PIX_FMT_ABGR &&
949  dstFormat != AV_PIX_FMT_RGB24 &&
950  dstFormat != AV_PIX_FMT_BGR24) {
951  av_log(c, AV_LOG_ERROR,
952  "full chroma interpolation for destination format '%s' not yet implemented\n",
953  sws_format_name(dstFormat));
954  flags &= ~SWS_FULL_CHR_H_INT;
955  c->flags = flags;
956  }
957  if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT))
958  c->chrDstHSubSample = 1;
959 
960  // drop some chroma lines if the user wants it
961  c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >>
963  c->chrSrcVSubSample += c->vChrDrop;
964 
965  /* drop every other pixel for chroma calculation unless user
966  * wants full chroma */
967  if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) &&
968  srcFormat != AV_PIX_FMT_RGB8 && srcFormat != AV_PIX_FMT_BGR8 &&
969  srcFormat != AV_PIX_FMT_RGB4 && srcFormat != AV_PIX_FMT_BGR4 &&
970  srcFormat != AV_PIX_FMT_RGB4_BYTE && srcFormat != AV_PIX_FMT_BGR4_BYTE &&
971  ((dstW >> c->chrDstHSubSample) <= (srcW >> 1) ||
972  (flags & SWS_FAST_BILINEAR)))
973  c->chrSrcHSubSample = 1;
974 
975  // Note the -((-x)>>y) is so that we always round toward +inf.
976  c->chrSrcW = -((-srcW) >> c->chrSrcHSubSample);
977  c->chrSrcH = -((-srcH) >> c->chrSrcVSubSample);
978  c->chrDstW = -((-dstW) >> c->chrDstHSubSample);
979  c->chrDstH = -((-dstH) >> c->chrDstVSubSample);
980 
981  /* unscaled special cases */
982  if (unscaled && !usesHFilter && !usesVFilter &&
983  (c->srcRange == c->dstRange || isAnyRGB(dstFormat))) {
985 
986  if (c->swScale) {
987  if (flags & SWS_PRINT_INFO)
988  av_log(c, AV_LOG_INFO,
989  "using unscaled %s -> %s special converter\n",
990  sws_format_name(srcFormat), sws_format_name(dstFormat));
991  return 0;
992  }
993  }
994 
995  c->srcBpc = 1 + desc_src->comp[0].depth_minus1;
996  if (c->srcBpc < 8)
997  c->srcBpc = 8;
998  c->dstBpc = 1 + desc_dst->comp[0].depth_minus1;
999  if (c->dstBpc < 8)
1000  c->dstBpc = 8;
1001  if (c->dstBpc == 16)
1002  dst_stride <<= 1;
1004  (FFALIGN(srcW, 16) * 2 * FFALIGN(c->srcBpc, 8) >> 3) + 16,
1005  fail);
1006  if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 10) {
1007  c->canMMXEXTBeUsed = (dstW >= srcW && (dstW & 31) == 0 &&
1008  (srcW & 15) == 0) ? 1 : 0;
1009  if (!c->canMMXEXTBeUsed && dstW >= srcW && (srcW & 15) == 0
1010  && (flags & SWS_FAST_BILINEAR)) {
1011  if (flags & SWS_PRINT_INFO)
1012  av_log(c, AV_LOG_INFO,
1013  "output width is not a multiple of 32 -> no MMXEXT scaler\n");
1014  }
1015  if (usesHFilter)
1016  c->canMMXEXTBeUsed = 0;
1017  } else
1018  c->canMMXEXTBeUsed = 0;
1019 
1020  c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW;
1021  c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH;
1022 
1023  /* Match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src
1024  * to pixel n-2 of dst, but only for the FAST_BILINEAR mode otherwise do
1025  * correct scaling.
1026  * n-2 is the last chrominance sample available.
1027  * This is not perfect, but no one should notice the difference, the more
1028  * correct variant would be like the vertical one, but that would require
1029  * some special code for the first and last pixel */
1030  if (flags & SWS_FAST_BILINEAR) {
1031  if (c->canMMXEXTBeUsed) {
1032  c->lumXInc += 20;
1033  c->chrXInc += 20;
1034  }
1035  // we don't use the x86 asm scaler if MMX is available
1036  else if (INLINE_MMX(cpu_flags)) {
1037  c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
1038  c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20;
1039  }
1040  }
1041 
1042 #define USE_MMAP (HAVE_MMAP && HAVE_MPROTECT && defined MAP_ANONYMOUS)
1043 
1044  /* precalculate horizontal scaler filter coefficients */
1045  {
1046 #if HAVE_MMXEXT_INLINE
1047 // can't downscale !!!
1048  if (c->canMMXEXTBeUsed && (flags & SWS_FAST_BILINEAR)) {
1049  c->lumMmxextFilterCodeSize = init_hscaler_mmxext(dstW, c->lumXInc, NULL,
1050  NULL, NULL, 8);
1051  c->chrMmxextFilterCodeSize = init_hscaler_mmxext(c->chrDstW, c->chrXInc,
1052  NULL, NULL, NULL, 4);
1053 
1054 #if USE_MMAP
1056  PROT_READ | PROT_WRITE,
1057  MAP_PRIVATE | MAP_ANONYMOUS,
1058  -1, 0);
1060  PROT_READ | PROT_WRITE,
1061  MAP_PRIVATE | MAP_ANONYMOUS,
1062  -1, 0);
1063 #elif HAVE_VIRTUALALLOC
1064  c->lumMmxextFilterCode = VirtualAlloc(NULL,
1066  MEM_COMMIT,
1067  PAGE_EXECUTE_READWRITE);
1068  c->chrMmxextFilterCode = VirtualAlloc(NULL,
1070  MEM_COMMIT,
1071  PAGE_EXECUTE_READWRITE);
1072 #else
1075 #endif
1076 
1078  return AVERROR(ENOMEM);
1079  FF_ALLOCZ_OR_GOTO(c, c->hLumFilter, (dstW / 8 + 8) * sizeof(int16_t), fail);
1080  FF_ALLOCZ_OR_GOTO(c, c->hChrFilter, (c->chrDstW / 4 + 8) * sizeof(int16_t), fail);
1081  FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW / 2 / 8 + 8) * sizeof(int32_t), fail);
1082  FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW / 2 / 4 + 8) * sizeof(int32_t), fail);
1083 
1084  init_hscaler_mmxext(dstW, c->lumXInc, c->lumMmxextFilterCode,
1085  c->hLumFilter, c->hLumFilterPos, 8);
1086  init_hscaler_mmxext(c->chrDstW, c->chrXInc, c->chrMmxextFilterCode,
1087  c->hChrFilter, c->hChrFilterPos, 4);
1088 
1089 #if USE_MMAP
1090  mprotect(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ);
1091  mprotect(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ);
1092 #endif
1093  } else
1094 #endif /* HAVE_MMXEXT_INLINE */
1095  {
1096  const int filterAlign =
1097  (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? 4 :
1098  (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) ? 8 :
1099  1;
1100 
1101  if (initFilter(&c->hLumFilter, &c->hLumFilterPos,
1102  &c->hLumFilterSize, c->lumXInc,
1103  srcW, dstW, filterAlign, 1 << 14,
1104  (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
1105  cpu_flags, srcFilter->lumH, dstFilter->lumH,
1106  c->param, 1) < 0)
1107  goto fail;
1108  if (initFilter(&c->hChrFilter, &c->hChrFilterPos,
1109  &c->hChrFilterSize, c->chrXInc,
1110  c->chrSrcW, c->chrDstW, filterAlign, 1 << 14,
1111  (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
1112  cpu_flags, srcFilter->chrH, dstFilter->chrH,
1113  c->param, 1) < 0)
1114  goto fail;
1115  }
1116  } // initialize horizontal stuff
1117 
1118  /* precalculate vertical scaler filter coefficients */
1119  {
1120  const int filterAlign =
1121  (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? 2 :
1122  (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC) ? 8 :
1123  1;
1124 
1126  c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
1127  (flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
1128  cpu_flags, srcFilter->lumV, dstFilter->lumV,
1129  c->param, 0) < 0)
1130  goto fail;
1132  c->chrYInc, c->chrSrcH, c->chrDstH,
1133  filterAlign, (1 << 12),
1134  (flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
1135  cpu_flags, srcFilter->chrV, dstFilter->chrV,
1136  c->param, 0) < 0)
1137  goto fail;
1138 
1139 #if HAVE_ALTIVEC
1140  FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof(vector signed short) * c->vLumFilterSize * c->dstH, fail);
1141  FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof(vector signed short) * c->vChrFilterSize * c->chrDstH, fail);
1142 
1143  for (i = 0; i < c->vLumFilterSize * c->dstH; i++) {
1144  int j;
1145  short *p = (short *)&c->vYCoeffsBank[i];
1146  for (j = 0; j < 8; j++)
1147  p[j] = c->vLumFilter[i];
1148  }
1149 
1150  for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) {
1151  int j;
1152  short *p = (short *)&c->vCCoeffsBank[i];
1153  for (j = 0; j < 8; j++)
1154  p[j] = c->vChrFilter[i];
1155  }
1156 #endif
1157  }
1158 
1159  // calculate buffer sizes so that they won't run out while handling these damn slices
1160  c->vLumBufSize = c->vLumFilterSize;
1161  c->vChrBufSize = c->vChrFilterSize;
1162  for (i = 0; i < dstH; i++) {
1163  int chrI = (int64_t)i * c->chrDstH / dstH;
1164  int nextSlice = FFMAX(c->vLumFilterPos[i] + c->vLumFilterSize - 1,
1165  ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)
1166  << c->chrSrcVSubSample));
1167 
1168  nextSlice >>= c->chrSrcVSubSample;
1169  nextSlice <<= c->chrSrcVSubSample;
1170  if (c->vLumFilterPos[i] + c->vLumBufSize < nextSlice)
1171  c->vLumBufSize = nextSlice - c->vLumFilterPos[i];
1172  if (c->vChrFilterPos[chrI] + c->vChrBufSize <
1173  (nextSlice >> c->chrSrcVSubSample))
1174  c->vChrBufSize = (nextSlice >> c->chrSrcVSubSample) -
1175  c->vChrFilterPos[chrI];
1176  }
1177 
1178  /* Allocate pixbufs (we use dynamic allocation because otherwise we would
1179  * need to allocate several megabytes to handle all possible cases) */
1180  FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
1181  FF_ALLOC_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
1182  FF_ALLOC_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
1184  FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
1185  /* Note we need at least one pixel more at the end because of the MMX code
1186  * (just in case someone wants to replace the 4000/8000). */
1187  /* align at 16 bytes for AltiVec */
1188  for (i = 0; i < c->vLumBufSize; i++) {
1189  FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i + c->vLumBufSize],
1190  dst_stride + 16, fail);
1191  c->lumPixBuf[i] = c->lumPixBuf[i + c->vLumBufSize];
1192  }
1193  // 64 / (c->dstBpc & ~7) is the same as 16 / sizeof(scaling_intermediate)
1194  c->uv_off_px = dst_stride_px + 64 / (c->dstBpc & ~7);
1195  c->uv_off_byte = dst_stride + 16;
1196  for (i = 0; i < c->vChrBufSize; i++) {
1197  FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i + c->vChrBufSize],
1198  dst_stride * 2 + 32, fail);
1199  c->chrUPixBuf[i] = c->chrUPixBuf[i + c->vChrBufSize];
1200  c->chrVPixBuf[i] = c->chrVPixBuf[i + c->vChrBufSize]
1201  = c->chrUPixBuf[i] + (dst_stride >> 1) + 8;
1202  }
1203  if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1204  for (i = 0; i < c->vLumBufSize; i++) {
1205  FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i + c->vLumBufSize],
1206  dst_stride + 16, fail);
1207  c->alpPixBuf[i] = c->alpPixBuf[i + c->vLumBufSize];
1208  }
1209 
1210  // try to avoid drawing green stuff between the right end and the stride end
1211  for (i = 0; i < c->vChrBufSize; i++)
1212  memset(c->chrUPixBuf[i], 64, dst_stride * 2 + 1);
1213 
1214  assert(c->chrDstH <= dstH);
1215 
1216  if (flags & SWS_PRINT_INFO) {
1217  if (flags & SWS_FAST_BILINEAR)
1218  av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, ");
1219  else if (flags & SWS_BILINEAR)
1220  av_log(c, AV_LOG_INFO, "BILINEAR scaler, ");
1221  else if (flags & SWS_BICUBIC)
1222  av_log(c, AV_LOG_INFO, "BICUBIC scaler, ");
1223  else if (flags & SWS_X)
1224  av_log(c, AV_LOG_INFO, "Experimental scaler, ");
1225  else if (flags & SWS_POINT)
1226  av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, ");
1227  else if (flags & SWS_AREA)
1228  av_log(c, AV_LOG_INFO, "Area Averaging scaler, ");
1229  else if (flags & SWS_BICUBLIN)
1230  av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, ");
1231  else if (flags & SWS_GAUSS)
1232  av_log(c, AV_LOG_INFO, "Gaussian scaler, ");
1233  else if (flags & SWS_SINC)
1234  av_log(c, AV_LOG_INFO, "Sinc scaler, ");
1235  else if (flags & SWS_LANCZOS)
1236  av_log(c, AV_LOG_INFO, "Lanczos scaler, ");
1237  else if (flags & SWS_SPLINE)
1238  av_log(c, AV_LOG_INFO, "Bicubic spline scaler, ");
1239  else
1240  av_log(c, AV_LOG_INFO, "ehh flags invalid?! ");
1241 
1242  av_log(c, AV_LOG_INFO, "from %s to %s%s ",
1243  sws_format_name(srcFormat),
1244 #ifdef DITHER1XBPP
1245  dstFormat == AV_PIX_FMT_BGR555 || dstFormat == AV_PIX_FMT_BGR565 ||
1246  dstFormat == AV_PIX_FMT_RGB444BE || dstFormat == AV_PIX_FMT_RGB444LE ||
1247  dstFormat == AV_PIX_FMT_BGR444BE || dstFormat == AV_PIX_FMT_BGR444LE ?
1248  "dithered " : "",
1249 #else
1250  "",
1251 #endif
1252  sws_format_name(dstFormat));
1253 
1254  if (INLINE_MMXEXT(cpu_flags))
1255  av_log(c, AV_LOG_INFO, "using MMXEXT\n");
1256  else if (INLINE_AMD3DNOW(cpu_flags))
1257  av_log(c, AV_LOG_INFO, "using 3DNOW\n");
1258  else if (INLINE_MMX(cpu_flags))
1259  av_log(c, AV_LOG_INFO, "using MMX\n");
1260  else if (HAVE_ALTIVEC && cpu_flags & AV_CPU_FLAG_ALTIVEC)
1261  av_log(c, AV_LOG_INFO, "using AltiVec\n");
1262  else
1263  av_log(c, AV_LOG_INFO, "using C\n");
1264 
1265  av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
1266  av_log(c, AV_LOG_DEBUG,
1267  "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
1268  c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
1269  av_log(c, AV_LOG_DEBUG,
1270  "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
1271  c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH,
1272  c->chrXInc, c->chrYInc);
1273  }
1274 
1275  c->swScale = ff_getSwsFunc(c);
1276  return 0;
1277 fail: // FIXME replace things by appropriate error codes
1278  return -1;
1279 }
1280 
1281 #if FF_API_SWS_GETCONTEXT
1282 SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
1283  int dstW, int dstH, enum AVPixelFormat dstFormat,
1284  int flags, SwsFilter *srcFilter,
1285  SwsFilter *dstFilter, const double *param)
1286 {
1287  SwsContext *c;
1288 
1289  if (!(c = sws_alloc_context()))
1290  return NULL;
1291 
1292  c->flags = flags;
1293  c->srcW = srcW;
1294  c->srcH = srcH;
1295  c->dstW = dstW;
1296  c->dstH = dstH;
1297  c->srcRange = handle_jpeg(&srcFormat);
1298  c->dstRange = handle_jpeg(&dstFormat);
1299  c->srcFormat = srcFormat;
1300  c->dstFormat = dstFormat;
1301 
1302  if (param) {
1303  c->param[0] = param[0];
1304  c->param[1] = param[1];
1305  }
1307  ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/,
1308  c->dstRange, 0, 1 << 16, 1 << 16);
1309 
1310  if (sws_init_context(c, srcFilter, dstFilter) < 0) {
1311  sws_freeContext(c);
1312  return NULL;
1313  }
1314 
1315  return c;
1316 }
1317 #endif
1318 
1319 SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
1320  float lumaSharpen, float chromaSharpen,
1321  float chromaHShift, float chromaVShift,
1322  int verbose)
1323 {
1324  SwsFilter *filter = av_malloc(sizeof(SwsFilter));
1325  if (!filter)
1326  return NULL;
1327 
1328  if (lumaGBlur != 0.0) {
1329  filter->lumH = sws_getGaussianVec(lumaGBlur, 3.0);
1330  filter->lumV = sws_getGaussianVec(lumaGBlur, 3.0);
1331  } else {
1332  filter->lumH = sws_getIdentityVec();
1333  filter->lumV = sws_getIdentityVec();
1334  }
1335 
1336  if (chromaGBlur != 0.0) {
1337  filter->chrH = sws_getGaussianVec(chromaGBlur, 3.0);
1338  filter->chrV = sws_getGaussianVec(chromaGBlur, 3.0);
1339  } else {
1340  filter->chrH = sws_getIdentityVec();
1341  filter->chrV = sws_getIdentityVec();
1342  }
1343 
1344  if (chromaSharpen != 0.0) {
1345  SwsVector *id = sws_getIdentityVec();
1346  sws_scaleVec(filter->chrH, -chromaSharpen);
1347  sws_scaleVec(filter->chrV, -chromaSharpen);
1348  sws_addVec(filter->chrH, id);
1349  sws_addVec(filter->chrV, id);
1350  sws_freeVec(id);
1351  }
1352 
1353  if (lumaSharpen != 0.0) {
1354  SwsVector *id = sws_getIdentityVec();
1355  sws_scaleVec(filter->lumH, -lumaSharpen);
1356  sws_scaleVec(filter->lumV, -lumaSharpen);
1357  sws_addVec(filter->lumH, id);
1358  sws_addVec(filter->lumV, id);
1359  sws_freeVec(id);
1360  }
1361 
1362  if (chromaHShift != 0.0)
1363  sws_shiftVec(filter->chrH, (int)(chromaHShift + 0.5));
1364 
1365  if (chromaVShift != 0.0)
1366  sws_shiftVec(filter->chrV, (int)(chromaVShift + 0.5));
1367 
1368  sws_normalizeVec(filter->chrH, 1.0);
1369  sws_normalizeVec(filter->chrV, 1.0);
1370  sws_normalizeVec(filter->lumH, 1.0);
1371  sws_normalizeVec(filter->lumV, 1.0);
1372 
1373  if (verbose)
1374  sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
1375  if (verbose)
1376  sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
1377 
1378  return filter;
1379 }
1380 
1382 {
1383  SwsVector *vec = av_malloc(sizeof(SwsVector));
1384  if (!vec)
1385  return NULL;
1386  vec->length = length;
1387  vec->coeff = av_malloc(sizeof(double) * length);
1388  if (!vec->coeff)
1389  av_freep(&vec);
1390  return vec;
1391 }
1392 
1393 SwsVector *sws_getGaussianVec(double variance, double quality)
1394 {
1395  const int length = (int)(variance * quality + 0.5) | 1;
1396  int i;
1397  double middle = (length - 1) * 0.5;
1398  SwsVector *vec = sws_allocVec(length);
1399 
1400  if (!vec)
1401  return NULL;
1402 
1403  for (i = 0; i < length; i++) {
1404  double dist = i - middle;
1405  vec->coeff[i] = exp(-dist * dist / (2 * variance * variance)) /
1406  sqrt(2 * variance * M_PI);
1407  }
1408 
1409  sws_normalizeVec(vec, 1.0);
1410 
1411  return vec;
1412 }
1413 
1414 SwsVector *sws_getConstVec(double c, int length)
1415 {
1416  int i;
1417  SwsVector *vec = sws_allocVec(length);
1418 
1419  if (!vec)
1420  return NULL;
1421 
1422  for (i = 0; i < length; i++)
1423  vec->coeff[i] = c;
1424 
1425  return vec;
1426 }
1427 
1429 {
1430  return sws_getConstVec(1.0, 1);
1431 }
1432 
1433 static double sws_dcVec(SwsVector *a)
1434 {
1435  int i;
1436  double sum = 0;
1437 
1438  for (i = 0; i < a->length; i++)
1439  sum += a->coeff[i];
1440 
1441  return sum;
1442 }
1443 
1444 void sws_scaleVec(SwsVector *a, double scalar)
1445 {
1446  int i;
1447 
1448  for (i = 0; i < a->length; i++)
1449  a->coeff[i] *= scalar;
1450 }
1451 
1453 {
1454  sws_scaleVec(a, height / sws_dcVec(a));
1455 }
1456 
1458 {
1459  int length = a->length + b->length - 1;
1460  int i, j;
1461  SwsVector *vec = sws_getConstVec(0.0, length);
1462 
1463  if (!vec)
1464  return NULL;
1465 
1466  for (i = 0; i < a->length; i++) {
1467  for (j = 0; j < b->length; j++) {
1468  vec->coeff[i + j] += a->coeff[i] * b->coeff[j];
1469  }
1470  }
1471 
1472  return vec;
1473 }
1474 
1476 {
1477  int length = FFMAX(a->length, b->length);
1478  int i;
1479  SwsVector *vec = sws_getConstVec(0.0, length);
1480 
1481  if (!vec)
1482  return NULL;
1483 
1484  for (i = 0; i < a->length; i++)
1485  vec->coeff[i + (length - 1) / 2 - (a->length - 1) / 2] += a->coeff[i];
1486  for (i = 0; i < b->length; i++)
1487  vec->coeff[i + (length - 1) / 2 - (b->length - 1) / 2] += b->coeff[i];
1488 
1489  return vec;
1490 }
1491 
1493 {
1494  int length = FFMAX(a->length, b->length);
1495  int i;
1496  SwsVector *vec = sws_getConstVec(0.0, length);
1497 
1498  if (!vec)
1499  return NULL;
1500 
1501  for (i = 0; i < a->length; i++)
1502  vec->coeff[i + (length - 1) / 2 - (a->length - 1) / 2] += a->coeff[i];
1503  for (i = 0; i < b->length; i++)
1504  vec->coeff[i + (length - 1) / 2 - (b->length - 1) / 2] -= b->coeff[i];
1505 
1506  return vec;
1507 }
1508 
1509 /* shift left / or right if "shift" is negative */
1510 static SwsVector *sws_getShiftedVec(SwsVector *a, int shift)
1511 {
1512  int length = a->length + FFABS(shift) * 2;
1513  int i;
1514  SwsVector *vec = sws_getConstVec(0.0, length);
1515 
1516  if (!vec)
1517  return NULL;
1518 
1519  for (i = 0; i < a->length; i++) {
1520  vec->coeff[i + (length - 1) / 2 -
1521  (a->length - 1) / 2 - shift] = a->coeff[i];
1522  }
1523 
1524  return vec;
1525 }
1526 
1527 void sws_shiftVec(SwsVector *a, int shift)
1528 {
1529  SwsVector *shifted = sws_getShiftedVec(a, shift);
1530  av_free(a->coeff);
1531  a->coeff = shifted->coeff;
1532  a->length = shifted->length;
1533  av_free(shifted);
1534 }
1535 
1537 {
1538  SwsVector *sum = sws_sumVec(a, b);
1539  av_free(a->coeff);
1540  a->coeff = sum->coeff;
1541  a->length = sum->length;
1542  av_free(sum);
1543 }
1544 
1546 {
1547  SwsVector *diff = sws_diffVec(a, b);
1548  av_free(a->coeff);
1549  a->coeff = diff->coeff;
1550  a->length = diff->length;
1551  av_free(diff);
1552 }
1553 
1555 {
1556  SwsVector *conv = sws_getConvVec(a, b);
1557  av_free(a->coeff);
1558  a->coeff = conv->coeff;
1559  a->length = conv->length;
1560  av_free(conv);
1561 }
1562 
1564 {
1565  int i;
1566  SwsVector *vec = sws_allocVec(a->length);
1567 
1568  if (!vec)
1569  return NULL;
1570 
1571  for (i = 0; i < a->length; i++)
1572  vec->coeff[i] = a->coeff[i];
1573 
1574  return vec;
1575 }
1576 
1577 void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level)
1578 {
1579  int i;
1580  double max = 0;
1581  double min = 0;
1582  double range;
1583 
1584  for (i = 0; i < a->length; i++)
1585  if (a->coeff[i] > max)
1586  max = a->coeff[i];
1587 
1588  for (i = 0; i < a->length; i++)
1589  if (a->coeff[i] < min)
1590  min = a->coeff[i];
1591 
1592  range = max - min;
1593 
1594  for (i = 0; i < a->length; i++) {
1595  int x = (int)((a->coeff[i] - min) * 60.0 / range + 0.5);
1596  av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]);
1597  for (; x > 0; x--)
1598  av_log(log_ctx, log_level, " ");
1599  av_log(log_ctx, log_level, "|\n");
1600  }
1601 }
1602 
1604 {
1605  if (!a)
1606  return;
1607  av_freep(&a->coeff);
1608  a->length = 0;
1609  av_free(a);
1610 }
1611 
1613 {
1614  if (!filter)
1615  return;
1616 
1617  if (filter->lumH)
1618  sws_freeVec(filter->lumH);
1619  if (filter->lumV)
1620  sws_freeVec(filter->lumV);
1621  if (filter->chrH)
1622  sws_freeVec(filter->chrH);
1623  if (filter->chrV)
1624  sws_freeVec(filter->chrV);
1625  av_free(filter);
1626 }
1627 
1629 {
1630  int i;
1631  if (!c)
1632  return;
1633 
1634  if (c->lumPixBuf) {
1635  for (i = 0; i < c->vLumBufSize; i++)
1636  av_freep(&c->lumPixBuf[i]);
1637  av_freep(&c->lumPixBuf);
1638  }
1639 
1640  if (c->chrUPixBuf) {
1641  for (i = 0; i < c->vChrBufSize; i++)
1642  av_freep(&c->chrUPixBuf[i]);
1643  av_freep(&c->chrUPixBuf);
1644  av_freep(&c->chrVPixBuf);
1645  }
1646 
1647  if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1648  for (i = 0; i < c->vLumBufSize; i++)
1649  av_freep(&c->alpPixBuf[i]);
1650  av_freep(&c->alpPixBuf);
1651  }
1652 
1653  av_freep(&c->vLumFilter);
1654  av_freep(&c->vChrFilter);
1655  av_freep(&c->hLumFilter);
1656  av_freep(&c->hChrFilter);
1657 #if HAVE_ALTIVEC
1658  av_freep(&c->vYCoeffsBank);
1659  av_freep(&c->vCCoeffsBank);
1660 #endif
1661 
1662  av_freep(&c->vLumFilterPos);
1663  av_freep(&c->vChrFilterPos);
1664  av_freep(&c->hLumFilterPos);
1665  av_freep(&c->hChrFilterPos);
1666 
1667 #if HAVE_MMX_INLINE
1668 #if USE_MMAP
1669  if (c->lumMmxextFilterCode)
1671  if (c->chrMmxextFilterCode)
1673 #elif HAVE_VIRTUALALLOC
1674  if (c->lumMmxextFilterCode)
1675  VirtualFree(c->lumMmxextFilterCode, 0, MEM_RELEASE);
1676  if (c->chrMmxextFilterCode)
1677  VirtualFree(c->chrMmxextFilterCode, 0, MEM_RELEASE);
1678 #else
1681 #endif
1684 #endif /* HAVE_MMX_INLINE */
1685 
1686  av_freep(&c->yuvTable);
1688 
1689  av_free(c);
1690 }
1691 
1692 struct SwsContext *sws_getCachedContext(struct SwsContext *context, int srcW,
1693  int srcH, enum AVPixelFormat srcFormat,
1694  int dstW, int dstH,
1695  enum AVPixelFormat dstFormat, int flags,
1696  SwsFilter *srcFilter,
1697  SwsFilter *dstFilter,
1698  const double *param)
1699 {
1700  static const double default_param[2] = { SWS_PARAM_DEFAULT,
1702 
1703  if (!param)
1704  param = default_param;
1705 
1706  if (context &&
1707  (context->srcW != srcW ||
1708  context->srcH != srcH ||
1709  context->srcFormat != srcFormat ||
1710  context->dstW != dstW ||
1711  context->dstH != dstH ||
1712  context->dstFormat != dstFormat ||
1713  context->flags != flags ||
1714  context->param[0] != param[0] ||
1715  context->param[1] != param[1])) {
1716  sws_freeContext(context);
1717  context = NULL;
1718  }
1719 
1720  if (!context) {
1721  if (!(context = sws_alloc_context()))
1722  return NULL;
1723  context->srcW = srcW;
1724  context->srcH = srcH;
1725  context->srcRange = handle_jpeg(&srcFormat);
1726  context->srcFormat = srcFormat;
1727  context->dstW = dstW;
1728  context->dstH = dstH;
1729  context->dstRange = handle_jpeg(&dstFormat);
1730  context->dstFormat = dstFormat;
1731  context->flags = flags;
1732  context->param[0] = param[0];
1733  context->param[1] = param[1];
1735  context->srcRange,
1736  ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/,
1737  context->dstRange, 0, 1 << 16, 1 << 16);
1738  if (sws_init_context(context, srcFilter, dstFilter) < 0) {
1739  sws_freeContext(context);
1740  return NULL;
1741  }
1742  }
1743  return context;
1744 }