svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The Libav Project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 #include "internal.h"
43 #include "dsputil.h"
44 #include "avcodec.h"
45 #include "mpegvideo.h"
46 #include "h264.h"
47 
48 #include "h264data.h" // FIXME FIXME FIXME
49 
50 #include "h264_mvpred.h"
51 #include "golomb.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54 
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58 
59 #include "svq1.h"
60 
66 typedef struct {
72  uint32_t watermark_key;
73 } SVQ3Context;
74 
75 #define FULLPEL_MODE 1
76 #define HALFPEL_MODE 2
77 #define THIRDPEL_MODE 3
78 #define PREDICT_MODE 4
79 
80 /* dual scan (from some older h264 draft)
81  * o-->o-->o o
82  * | /|
83  * o o o / o
84  * | / | |/ |
85  * o o o o
86  * /
87  * o-->o-->o-->o
88  */
89 static const uint8_t svq3_scan[16] = {
90  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
91  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
92  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
93  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
94 };
95 
96 static const uint8_t svq3_pred_0[25][2] = {
97  { 0, 0 },
98  { 1, 0 }, { 0, 1 },
99  { 0, 2 }, { 1, 1 }, { 2, 0 },
100  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
101  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
102  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
103  { 2, 4 }, { 3, 3 }, { 4, 2 },
104  { 4, 3 }, { 3, 4 },
105  { 4, 4 }
106 };
107 
108 static const int8_t svq3_pred_1[6][6][5] = {
109  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
110  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
111  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
112  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
113  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
114  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
115  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
116  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
117  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
118  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
119  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
120  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
121 };
122 
123 static const struct {
126 } svq3_dct_tables[2][16] = {
127  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
128  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
129  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
130  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
131 };
132 
133 static const uint32_t svq3_dequant_coeff[32] = {
134  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
135  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
136  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
137  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
138 };
139 
140 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
141 {
142  const int qmul = svq3_dequant_coeff[qp];
143 #define stride 16
144  int i;
145  int temp[16];
146  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
147 
148  for (i = 0; i < 4; i++) {
149  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
150  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
151  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
152  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
153 
154  temp[4 * i + 0] = z0 + z3;
155  temp[4 * i + 1] = z1 + z2;
156  temp[4 * i + 2] = z1 - z2;
157  temp[4 * i + 3] = z0 - z3;
158  }
159 
160  for (i = 0; i < 4; i++) {
161  const int offset = x_offset[i];
162  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
163  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
164  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
165  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
166 
167  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
168  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
169  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
170  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
171  }
172 }
173 #undef stride
174 
176  int stride, int qp, int dc)
177 {
178  const int qmul = svq3_dequant_coeff[qp];
179  int i;
180 
181  if (dc) {
182  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
183  : qmul * (block[0] >> 3) / 2);
184  block[0] = 0;
185  }
186 
187  for (i = 0; i < 4; i++) {
188  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
189  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
190  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
191  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
192 
193  block[0 + 4 * i] = z0 + z3;
194  block[1 + 4 * i] = z1 + z2;
195  block[2 + 4 * i] = z1 - z2;
196  block[3 + 4 * i] = z0 - z3;
197  }
198 
199  for (i = 0; i < 4; i++) {
200  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
201  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
202  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
203  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
204  const int rr = (dc + 0x80000);
205 
206  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
207  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
208  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
209  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
210  }
211 }
212 
214  int index, const int type)
215 {
216  static const uint8_t *const scan_patterns[4] =
218 
219  int run, level, limit;
220  unsigned vlc;
221  const int intra = 3 * type >> 2;
222  const uint8_t *const scan = scan_patterns[type];
223 
224  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
225  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
226  int sign = (vlc & 1) ? 0 : -1;
227  vlc = vlc + 1 >> 1;
228 
229  if (type == 3) {
230  if (vlc < 3) {
231  run = 0;
232  level = vlc;
233  } else if (vlc < 4) {
234  run = 1;
235  level = 1;
236  } else {
237  run = vlc & 0x3;
238  level = (vlc + 9 >> 2) - run;
239  }
240  } else {
241  if (vlc < 16) {
242  run = svq3_dct_tables[intra][vlc].run;
243  level = svq3_dct_tables[intra][vlc].level;
244  } else if (intra) {
245  run = vlc & 0x7;
246  level = (vlc >> 3) +
247  ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
248  } else {
249  run = vlc & 0xF;
250  level = (vlc >> 4) +
251  ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
252  }
253  }
254 
255  if ((index += run) >= limit)
256  return -1;
257 
258  block[scan[index]] = (level ^ sign) - sign;
259  }
260 
261  if (type != 2) {
262  break;
263  }
264  }
265 
266  return 0;
267 }
268 
269 static inline void svq3_mc_dir_part(MpegEncContext *s,
270  int x, int y, int width, int height,
271  int mx, int my, int dxy,
272  int thirdpel, int dir, int avg)
273 {
274  const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
275  uint8_t *src, *dest;
276  int i, emu = 0;
277  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
278 
279  mx += x;
280  my += y;
281 
282  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
283  my < 0 || my >= s->v_edge_pos - height - 1) {
284  if ((s->flags & CODEC_FLAG_EMU_EDGE))
285  emu = 1;
286 
287  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
288  my = av_clip(my, -16, s->v_edge_pos - height + 15);
289  }
290 
291  /* form component predictions */
292  dest = s->current_picture.f.data[0] + x + y * s->linesize;
293  src = pic->f.data[0] + mx + my * s->linesize;
294 
295  if (emu) {
297  width + 1, height + 1,
298  mx, my, s->h_edge_pos, s->v_edge_pos);
299  src = s->edge_emu_buffer;
300  }
301  if (thirdpel)
302  (avg ? s->dsp.avg_tpel_pixels_tab
303  : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize,
304  width, height);
305  else
306  (avg ? s->dsp.avg_pixels_tab
307  : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize,
308  height);
309 
310  if (!(s->flags & CODEC_FLAG_GRAY)) {
311  mx = mx + (mx < (int) x) >> 1;
312  my = my + (my < (int) y) >> 1;
313  width = width >> 1;
314  height = height >> 1;
315  blocksize++;
316 
317  for (i = 1; i < 3; i++) {
318  dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize;
319  src = pic->f.data[i] + mx + my * s->uvlinesize;
320 
321  if (emu) {
323  width + 1, height + 1,
324  mx, my, (s->h_edge_pos >> 1),
325  s->v_edge_pos >> 1);
326  src = s->edge_emu_buffer;
327  }
328  if (thirdpel)
329  (avg ? s->dsp.avg_tpel_pixels_tab
330  : s->dsp.put_tpel_pixels_tab)[dxy](dest, src,
331  s->uvlinesize,
332  width, height);
333  else
334  (avg ? s->dsp.avg_pixels_tab
335  : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src,
336  s->uvlinesize,
337  height);
338  }
339  }
340 }
341 
342 static inline int svq3_mc_dir(H264Context *h, int size, int mode,
343  int dir, int avg)
344 {
345  int i, j, k, mx, my, dx, dy, x, y;
346  MpegEncContext *const s = (MpegEncContext *)h;
347  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
348  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
349  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
350  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
351  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
352 
353  for (i = 0; i < 16; i += part_height)
354  for (j = 0; j < 16; j += part_width) {
355  const int b_xy = (4 * s->mb_x + (j >> 2)) +
356  (4 * s->mb_y + (i >> 2)) * h->b_stride;
357  int dxy;
358  x = 16 * s->mb_x + j;
359  y = 16 * s->mb_y + i;
360  k = (j >> 2 & 1) + (i >> 1 & 2) +
361  (j >> 1 & 4) + (i & 8);
362 
363  if (mode != PREDICT_MODE) {
364  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
365  } else {
366  mx = s->next_picture.f.motion_val[0][b_xy][0] << 1;
367  my = s->next_picture.f.motion_val[0][b_xy][1] << 1;
368 
369  if (dir == 0) {
370  mx = mx * h->frame_num_offset /
371  h->prev_frame_num_offset + 1 >> 1;
372  my = my * h->frame_num_offset /
373  h->prev_frame_num_offset + 1 >> 1;
374  } else {
375  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
376  h->prev_frame_num_offset + 1 >> 1;
377  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
378  h->prev_frame_num_offset + 1 >> 1;
379  }
380  }
381 
382  /* clip motion vector prediction to frame border */
383  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
384  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
385 
386  /* get (optional) motion vector differential */
387  if (mode == PREDICT_MODE) {
388  dx = dy = 0;
389  } else {
390  dy = svq3_get_se_golomb(&s->gb);
391  dx = svq3_get_se_golomb(&s->gb);
392 
393  if (dx == INVALID_VLC || dy == INVALID_VLC) {
394  av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
395  return -1;
396  }
397  }
398 
399  /* compute motion vector */
400  if (mode == THIRDPEL_MODE) {
401  int fx, fy;
402  mx = (mx + 1 >> 1) + dx;
403  my = (my + 1 >> 1) + dy;
404  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
405  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
406  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
407 
408  svq3_mc_dir_part(s, x, y, part_width, part_height,
409  fx, fy, dxy, 1, dir, avg);
410  mx += mx;
411  my += my;
412  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
413  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
414  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
415  dxy = (mx & 1) + 2 * (my & 1);
416 
417  svq3_mc_dir_part(s, x, y, part_width, part_height,
418  mx >> 1, my >> 1, dxy, 0, dir, avg);
419  mx *= 3;
420  my *= 3;
421  } else {
422  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
423  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
424 
425  svq3_mc_dir_part(s, x, y, part_width, part_height,
426  mx, my, 0, 0, dir, avg);
427  mx *= 6;
428  my *= 6;
429  }
430 
431  /* update mv_cache */
432  if (mode != PREDICT_MODE) {
433  int32_t mv = pack16to32(mx, my);
434 
435  if (part_height == 8 && i < 8) {
436  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
437 
438  if (part_width == 8 && j < 8)
439  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
440  }
441  if (part_width == 8 && j < 8)
442  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
443  if (part_width == 4 || part_height == 4)
444  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
445  }
446 
447  /* write back motion vectors */
449  part_width >> 2, part_height >> 2, h->b_stride,
450  pack16to32(mx, my), 4);
451  }
452 
453  return 0;
454 }
455 
456 static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
457 {
458  H264Context *h = &svq3->h;
459  int i, j, k, m, dir, mode;
460  int cbp = 0;
461  uint32_t vlc;
462  int8_t *top, *left;
463  MpegEncContext *const s = (MpegEncContext *)h;
464  const int mb_xy = h->mb_xy;
465  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride;
466 
467  h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
468  h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
469  h->topright_samples_available = 0xFFFF;
470 
471  if (mb_type == 0) { /* SKIP */
472  if (s->pict_type == AV_PICTURE_TYPE_P ||
473  s->next_picture.f.mb_type[mb_xy] == -1) {
474  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
475  0, 0, 0, 0, 0, 0);
476 
477  if (s->pict_type == AV_PICTURE_TYPE_B)
478  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
479  0, 0, 0, 0, 1, 1);
480 
481  mb_type = MB_TYPE_SKIP;
482  } else {
483  mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
484  if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
485  return -1;
486  if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
487  return -1;
488 
489  mb_type = MB_TYPE_16x16;
490  }
491  } else if (mb_type < 8) { /* INTER */
492  if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1(&s->gb))
493  mode = THIRDPEL_MODE;
494  else if (svq3->halfpel_flag &&
495  svq3->thirdpel_flag == !get_bits1(&s->gb))
496  mode = HALFPEL_MODE;
497  else
498  mode = FULLPEL_MODE;
499 
500  /* fill caches */
501  /* note ref_cache should contain here:
502  * ????????
503  * ???11111
504  * N??11111
505  * N??11111
506  * N??11111
507  */
508 
509  for (m = 0; m < 2; m++) {
510  if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
511  for (i = 0; i < 4; i++)
512  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
513  s->current_picture.f.motion_val[m][b_xy - 1 + i * h->b_stride]);
514  } else {
515  for (i = 0; i < 4; i++)
516  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
517  }
518  if (s->mb_y > 0) {
519  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
520  s->current_picture.f.motion_val[m][b_xy - h->b_stride],
521  4 * 2 * sizeof(int16_t));
522  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
523  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
524 
525  if (s->mb_x < s->mb_width - 1) {
526  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
527  s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]);
528  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
529  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
530  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
531  } else
532  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
533  if (s->mb_x > 0) {
534  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
535  s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]);
536  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
537  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
538  } else
539  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
540  } else
541  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
542  PART_NOT_AVAILABLE, 8);
543 
544  if (s->pict_type != AV_PICTURE_TYPE_B)
545  break;
546  }
547 
548  /* decode motion vector(s) and form prediction(s) */
549  if (s->pict_type == AV_PICTURE_TYPE_P) {
550  if (svq3_mc_dir(h, mb_type - 1, mode, 0, 0) < 0)
551  return -1;
552  } else { /* AV_PICTURE_TYPE_B */
553  if (mb_type != 2) {
554  if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
555  return -1;
556  } else {
557  for (i = 0; i < 4; i++)
558  memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride],
559  0, 4 * 2 * sizeof(int16_t));
560  }
561  if (mb_type != 1) {
562  if (svq3_mc_dir(h, 0, mode, 1, mb_type == 3) < 0)
563  return -1;
564  } else {
565  for (i = 0; i < 4; i++)
566  memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride],
567  0, 4 * 2 * sizeof(int16_t));
568  }
569  }
570 
571  mb_type = MB_TYPE_16x16;
572  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
573  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
574 
575  if (mb_type == 8) {
576  if (s->mb_x > 0) {
577  for (i = 0; i < 4; i++)
578  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
579  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
580  h->left_samples_available = 0x5F5F;
581  }
582  if (s->mb_y > 0) {
583  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 0];
584  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 1];
585  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 2];
586  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 3];
587 
588  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
589  h->top_samples_available = 0x33FF;
590  }
591 
592  /* decode prediction codes for luma blocks */
593  for (i = 0; i < 16; i += 2) {
594  vlc = svq3_get_ue_golomb(&s->gb);
595 
596  if (vlc >= 25) {
597  av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
598  return -1;
599  }
600 
601  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
602  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
603 
604  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
605  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
606 
607  if (left[1] == -1 || left[2] == -1) {
608  av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
609  return -1;
610  }
611  }
612  } else { /* mb_type == 33, DC_128_PRED block type */
613  for (i = 0; i < 4; i++)
614  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
615  }
616 
618 
619  if (mb_type == 8) {
621 
622  h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
623  h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
624  } else {
625  for (i = 0; i < 4; i++)
626  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
627 
628  h->top_samples_available = 0x33FF;
629  h->left_samples_available = 0x5F5F;
630  }
631 
632  mb_type = MB_TYPE_INTRA4x4;
633  } else { /* INTRA16x16 */
634  dir = i_mb_type_info[mb_type - 8].pred_mode;
635  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
636 
637  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
638  av_log(h->s.avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
639  return h->intra16x16_pred_mode;
640  }
641 
642  cbp = i_mb_type_info[mb_type - 8].cbp;
643  mb_type = MB_TYPE_INTRA16x16;
644  }
645 
646  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
647  for (i = 0; i < 4; i++)
648  memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride],
649  0, 4 * 2 * sizeof(int16_t));
650  if (s->pict_type == AV_PICTURE_TYPE_B) {
651  for (i = 0; i < 4; i++)
652  memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride],
653  0, 4 * 2 * sizeof(int16_t));
654  }
655  }
656  if (!IS_INTRA4x4(mb_type)) {
657  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
658  }
659  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
660  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
661  s->dsp.clear_blocks(h->mb + 0);
662  s->dsp.clear_blocks(h->mb + 384);
663  }
664 
665  if (!IS_INTRA16x16(mb_type) &&
666  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
667  if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48) {
668  av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
669  return -1;
670  }
671 
672  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
673  : golomb_to_inter_cbp[vlc];
674  }
675  if (IS_INTRA16x16(mb_type) ||
676  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
677  s->qscale += svq3_get_se_golomb(&s->gb);
678 
679  if (s->qscale > 31u) {
680  av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
681  return -1;
682  }
683  }
684  if (IS_INTRA16x16(mb_type)) {
685  AV_ZERO128(h->mb_luma_dc[0] + 0);
686  AV_ZERO128(h->mb_luma_dc[0] + 8);
687  if (svq3_decode_block(&s->gb, h->mb_luma_dc[0], 0, 1)) {
689  "error while decoding intra luma dc\n");
690  return -1;
691  }
692  }
693 
694  if (cbp) {
695  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
696  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
697 
698  for (i = 0; i < 4; i++)
699  if ((cbp & (1 << i))) {
700  for (j = 0; j < 4; j++) {
701  k = index ? (1 * (j & 1) + 2 * (i & 1) +
702  2 * (j & 2) + 4 * (i & 2))
703  : (4 * i + j);
704  h->non_zero_count_cache[scan8[k]] = 1;
705 
706  if (svq3_decode_block(&s->gb, &h->mb[16 * k], index, type)) {
708  "error while decoding block\n");
709  return -1;
710  }
711  }
712  }
713 
714  if ((cbp & 0x30)) {
715  for (i = 1; i < 3; ++i)
716  if (svq3_decode_block(&s->gb, &h->mb[16 * 16 * i], 0, 3)) {
718  "error while decoding chroma dc block\n");
719  return -1;
720  }
721 
722  if ((cbp & 0x20)) {
723  for (i = 1; i < 3; i++) {
724  for (j = 0; j < 4; j++) {
725  k = 16 * i + j;
726  h->non_zero_count_cache[scan8[k]] = 1;
727 
728  if (svq3_decode_block(&s->gb, &h->mb[16 * k], 1, 1)) {
730  "error while decoding chroma ac block\n");
731  return -1;
732  }
733  }
734  }
735  }
736  }
737  }
738 
739  h->cbp = cbp;
740  s->current_picture.f.mb_type[mb_xy] = mb_type;
741 
742  if (IS_INTRA(mb_type))
744 
745  return 0;
746 }
747 
749 {
750  SVQ3Context *svq3 = avctx->priv_data;
751  H264Context *h = &svq3->h;
752  MpegEncContext *s = &h->s;
753  const int mb_xy = h->mb_xy;
754  int i, header;
755  unsigned slice_id;
756 
757  header = get_bits(&s->gb, 8);
758 
759  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
760  /* TODO: what? */
761  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
762  return -1;
763  } else {
764  int length = header >> 5 & 3;
765 
766  svq3->next_slice_index = get_bits_count(&s->gb) +
767  8 * show_bits(&s->gb, 8 * length) +
768  8 * length;
769 
770  if (svq3->next_slice_index > s->gb.size_in_bits) {
771  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
772  return -1;
773  }
774 
775  s->gb.size_in_bits = svq3->next_slice_index - 8 * (length - 1);
776  skip_bits(&s->gb, 8);
777 
778  if (svq3->watermark_key) {
779  uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1]);
780  AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1],
781  header ^ svq3->watermark_key);
782  }
783  if (length > 0) {
784  memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
785  &s->gb.buffer[s->gb.size_in_bits >> 3], length - 1);
786  }
787  skip_bits_long(&s->gb, 0);
788  }
789 
790  if ((slice_id = svq3_get_ue_golomb(&s->gb)) >= 3) {
791  av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
792  return -1;
793  }
794 
795  h->slice_type = golomb_to_pict_type[slice_id];
796 
797  if ((header & 0x9F) == 2) {
798  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
799  s->mb_skip_run = get_bits(&s->gb, i) -
800  (s->mb_y * s->mb_width + s->mb_x);
801  } else {
802  skip_bits1(&s->gb);
803  s->mb_skip_run = 0;
804  }
805 
806  h->slice_num = get_bits(&s->gb, 8);
807  s->qscale = get_bits(&s->gb, 5);
808  s->adaptive_quant = get_bits1(&s->gb);
809 
810  /* unknown fields */
811  skip_bits1(&s->gb);
812 
813  if (svq3->unknown_flag)
814  skip_bits1(&s->gb);
815 
816  skip_bits1(&s->gb);
817  skip_bits(&s->gb, 2);
818 
819  while (get_bits1(&s->gb))
820  skip_bits(&s->gb, 8);
821 
822  /* reset intra predictors and invalidate motion vector references */
823  if (s->mb_x > 0) {
824  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
825  -1, 4 * sizeof(int8_t));
826  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_x],
827  -1, 8 * sizeof(int8_t) * s->mb_x);
828  }
829  if (s->mb_y > 0) {
830  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_stride],
831  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
832 
833  if (s->mb_x > 0)
834  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
835  }
836 
837  return 0;
838 }
839 
841 {
842  SVQ3Context *svq3 = avctx->priv_data;
843  H264Context *h = &svq3->h;
844  MpegEncContext *s = &h->s;
845  int m;
846  unsigned char *extradata;
847  unsigned char *extradata_end;
848  unsigned int size;
849  int marker_found = 0;
850 
851  if (ff_h264_decode_init(avctx) < 0)
852  return -1;
853 
854  s->flags = avctx->flags;
855  s->flags2 = avctx->flags2;
856  s->unrestricted_mv = 1;
857  h->is_complex = 1;
858  avctx->pix_fmt = avctx->codec->pix_fmts[0];
859 
860  if (!s->context_initialized) {
861  h->chroma_qp[0] = h->chroma_qp[1] = 4;
862 
863  svq3->halfpel_flag = 1;
864  svq3->thirdpel_flag = 1;
865  svq3->unknown_flag = 0;
866 
867  /* prowl for the "SEQH" marker in the extradata */
868  extradata = (unsigned char *)avctx->extradata;
869  extradata_end = avctx->extradata + avctx->extradata_size;
870  if (extradata) {
871  for (m = 0; m + 8 < avctx->extradata_size; m++) {
872  if (!memcmp(extradata, "SEQH", 4)) {
873  marker_found = 1;
874  break;
875  }
876  extradata++;
877  }
878  }
879 
880  /* if a match was found, parse the extra data */
881  if (marker_found) {
882  GetBitContext gb;
883  int frame_size_code;
884 
885  size = AV_RB32(&extradata[4]);
886  if (size > extradata_end - extradata - 8)
887  return AVERROR_INVALIDDATA;
888  init_get_bits(&gb, extradata + 8, size * 8);
889 
890  /* 'frame size code' and optional 'width, height' */
891  frame_size_code = get_bits(&gb, 3);
892  switch (frame_size_code) {
893  case 0:
894  avctx->width = 160;
895  avctx->height = 120;
896  break;
897  case 1:
898  avctx->width = 128;
899  avctx->height = 96;
900  break;
901  case 2:
902  avctx->width = 176;
903  avctx->height = 144;
904  break;
905  case 3:
906  avctx->width = 352;
907  avctx->height = 288;
908  break;
909  case 4:
910  avctx->width = 704;
911  avctx->height = 576;
912  break;
913  case 5:
914  avctx->width = 240;
915  avctx->height = 180;
916  break;
917  case 6:
918  avctx->width = 320;
919  avctx->height = 240;
920  break;
921  case 7:
922  avctx->width = get_bits(&gb, 12);
923  avctx->height = get_bits(&gb, 12);
924  break;
925  }
926 
927  svq3->halfpel_flag = get_bits1(&gb);
928  svq3->thirdpel_flag = get_bits1(&gb);
929 
930  /* unknown fields */
931  skip_bits1(&gb);
932  skip_bits1(&gb);
933  skip_bits1(&gb);
934  skip_bits1(&gb);
935 
936  s->low_delay = get_bits1(&gb);
937 
938  /* unknown field */
939  skip_bits1(&gb);
940 
941  while (get_bits1(&gb))
942  skip_bits(&gb, 8);
943 
944  svq3->unknown_flag = get_bits1(&gb);
945  avctx->has_b_frames = !s->low_delay;
946  if (svq3->unknown_flag) {
947 #if CONFIG_ZLIB
948  unsigned watermark_width = svq3_get_ue_golomb(&gb);
949  unsigned watermark_height = svq3_get_ue_golomb(&gb);
950  int u1 = svq3_get_ue_golomb(&gb);
951  int u2 = get_bits(&gb, 8);
952  int u3 = get_bits(&gb, 2);
953  int u4 = svq3_get_ue_golomb(&gb);
954  unsigned long buf_len = watermark_width *
955  watermark_height * 4;
956  int offset = get_bits_count(&gb) + 7 >> 3;
957  uint8_t *buf;
958 
959  if (watermark_height > 0 &&
960  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
961  return -1;
962 
963  buf = av_malloc(buf_len);
964  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
965  watermark_width, watermark_height);
966  av_log(avctx, AV_LOG_DEBUG,
967  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
968  u1, u2, u3, u4, offset);
969  if (uncompress(buf, &buf_len, extradata + 8 + offset,
970  size - offset) != Z_OK) {
971  av_log(avctx, AV_LOG_ERROR,
972  "could not uncompress watermark logo\n");
973  av_free(buf);
974  return -1;
975  }
976  svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
977  svq3->watermark_key = svq3->watermark_key << 16 |
978  svq3->watermark_key;
979  av_log(avctx, AV_LOG_DEBUG,
980  "watermark key %#x\n", svq3->watermark_key);
981  av_free(buf);
982 #else
983  av_log(avctx, AV_LOG_ERROR,
984  "this svq3 file contains watermark which need zlib support compiled in\n");
985  return -1;
986 #endif
987  }
988  }
989 
990  s->width = avctx->width;
991  s->height = avctx->height;
992 
993  if (ff_MPV_common_init(s) < 0)
994  return -1;
995 
996  h->b_stride = 4 * s->mb_width;
997 
998  if (ff_h264_alloc_tables(h) < 0) {
999  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1000  return AVERROR(ENOMEM);
1001  }
1002  }
1003 
1004  return 0;
1005 }
1006 
1007 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1008  int *got_frame, AVPacket *avpkt)
1009 {
1010  const uint8_t *buf = avpkt->data;
1011  SVQ3Context *svq3 = avctx->priv_data;
1012  H264Context *h = &svq3->h;
1013  MpegEncContext *s = &h->s;
1014  int buf_size = avpkt->size;
1015  int m;
1016 
1017  /* special case for last picture */
1018  if (buf_size == 0) {
1019  if (s->next_picture_ptr && !s->low_delay) {
1020  *(AVFrame *) data = s->next_picture.f;
1021  s->next_picture_ptr = NULL;
1022  *got_frame = 1;
1023  }
1024  return 0;
1025  }
1026 
1027  init_get_bits(&s->gb, buf, 8 * buf_size);
1028 
1029  s->mb_x = s->mb_y = h->mb_xy = 0;
1030 
1031  if (svq3_decode_slice_header(avctx))
1032  return -1;
1033 
1034  s->pict_type = h->slice_type;
1035  s->picture_number = h->slice_num;
1036 
1037  if (avctx->debug & FF_DEBUG_PICT_INFO)
1038  av_log(h->s.avctx, AV_LOG_DEBUG,
1039  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1041  svq3->halfpel_flag, svq3->thirdpel_flag,
1042  s->adaptive_quant, s->qscale, h->slice_num);
1043 
1044  /* for skipping the frame */
1047 
1048  /* Skip B-frames if we do not have reference frames. */
1049  if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
1050  return 0;
1051  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1053  avctx->skip_frame >= AVDISCARD_ALL)
1054  return 0;
1055 
1056  if (s->next_p_frame_damaged) {
1057  if (s->pict_type == AV_PICTURE_TYPE_B)
1058  return 0;
1059  else
1060  s->next_p_frame_damaged = 0;
1061  }
1062 
1063  if (ff_h264_frame_start(h) < 0)
1064  return -1;
1065 
1066  if (s->pict_type == AV_PICTURE_TYPE_B) {
1068 
1069  if (h->frame_num_offset < 0)
1070  h->frame_num_offset += 256;
1071  if (h->frame_num_offset == 0 ||
1073  av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1074  return -1;
1075  }
1076  } else {
1077  h->prev_frame_num = h->frame_num;
1078  h->frame_num = h->slice_num;
1080 
1081  if (h->prev_frame_num_offset < 0)
1082  h->prev_frame_num_offset += 256;
1083  }
1084 
1085  for (m = 0; m < 2; m++) {
1086  int i;
1087  for (i = 0; i < 4; i++) {
1088  int j;
1089  for (j = -1; j < 4; j++)
1090  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1091  if (i < 3)
1092  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1093  }
1094  }
1095 
1096  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1097  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1098  unsigned mb_type;
1099  h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1100 
1101  if ((get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1102  ((get_bits_count(&s->gb) & 7) == 0 ||
1103  show_bits(&s->gb, -get_bits_count(&s->gb) & 7) == 0)) {
1104  skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb));
1105  s->gb.size_in_bits = 8 * buf_size;
1106 
1107  if (svq3_decode_slice_header(avctx))
1108  return -1;
1109 
1110  /* TODO: support s->mb_skip_run */
1111  }
1112 
1113  mb_type = svq3_get_ue_golomb(&s->gb);
1114 
1115  if (s->pict_type == AV_PICTURE_TYPE_I)
1116  mb_type += 8;
1117  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1118  mb_type += 4;
1119  if (mb_type > 33 || svq3_decode_mb(svq3, mb_type)) {
1120  av_log(h->s.avctx, AV_LOG_ERROR,
1121  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1122  return -1;
1123  }
1124 
1125  if (mb_type != 0)
1127 
1128  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1129  s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1130  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1131  }
1132 
1133  ff_draw_horiz_band(s, 16 * s->mb_y, 16);
1134  }
1135 
1136  ff_MPV_frame_end(s);
1137 
1138  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1139  *(AVFrame *)data = s->current_picture.f;
1140  else
1141  *(AVFrame *)data = s->last_picture.f;
1142 
1143  /* Do not output the last pic after seeking. */
1144  if (s->last_picture_ptr || s->low_delay)
1145  *got_frame = 1;
1146 
1147  return buf_size;
1148 }
1149 
1151 {
1152  SVQ3Context *svq3 = avctx->priv_data;
1153  H264Context *h = &svq3->h;
1154  MpegEncContext *s = &h->s;
1155 
1157 
1158  ff_MPV_common_end(s);
1159 
1160  return 0;
1161 }
1162 
1164  .name = "svq3",
1165  .type = AVMEDIA_TYPE_VIDEO,
1166  .id = AV_CODEC_ID_SVQ3,
1167  .priv_data_size = sizeof(SVQ3Context),
1171  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1172  CODEC_CAP_DR1 |
1174  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1175  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1176  AV_PIX_FMT_NONE},
1177 };