bunch of small fixes, add msg.txt to about prefs
[goodguy/history.git] / cinelerra-5.0 / quicktime / qtffmpeg.c
1 #include "colormodels.h"
2 #include "funcprotos.h"
3 #include "quicktime.h"
4 #include "qtffmpeg.h"
5 #include "qtprivate.h"
6
7 #include <string.h>
8 // FFMPEG front end for quicktime.
9
10 int ffmpeg_initialized = 0;
11 pthread_mutex_t ffmpeg_lock = PTHREAD_MUTEX_INITIALIZER;
12
13 quicktime_ffmpeg_t *quicktime_new_ffmpeg(int cpus,
14                  int fields, int ffmpeg_id, int w, int h,
15                  quicktime_stsd_table_t *stsd_table)
16 {
17         quicktime_ffmpeg_t *ptr = calloc(1, sizeof(quicktime_ffmpeg_t));
18         quicktime_esds_t *esds = &stsd_table->esds;
19         quicktime_avcc_t *avcc = &stsd_table->avcc;
20         int i;
21
22         ptr->fields = fields;
23         ptr->width = w;
24         ptr->height = h;
25         ptr->ffmpeg_id = ffmpeg_id;
26
27         if( ffmpeg_id == CODEC_ID_SVQ1 ) {
28                 ptr->width_i = quicktime_quantize32(ptr->width);
29                 ptr->height_i = quicktime_quantize32(ptr->height);
30         }
31         else {
32                 ptr->width_i = quicktime_quantize16(ptr->width);
33                 ptr->height_i = quicktime_quantize16(ptr->height);
34         }
35
36         pthread_mutex_lock(&ffmpeg_lock);
37         if( !ffmpeg_initialized ) {
38                 ffmpeg_initialized = 1;
39                 av_register_all();
40         }
41
42         for( i=0; i<fields; ++i ) {
43                 ptr->decoder[i] = avcodec_find_decoder(ptr->ffmpeg_id);
44                 if( !ptr->decoder[i] ) {
45                         printf("quicktime_new_ffmpeg: avcodec_find_decoder returned NULL.\n");
46                         quicktime_delete_ffmpeg(ptr);
47                         return 0;
48                 }
49
50                 AVCodecContext *context = avcodec_alloc_context3(ptr->decoder[i]);
51                 ptr->decoder_context[i] = context;
52                 static char fake_data[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
53 //              static unsigned char extra_data[] = {
54 //                      0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
55 //                      0x00, 0xc8, 0x88, 0xba, 0x98, 0x60, 0xfa, 0x67,
56 //                      0x80, 0x91, 0x02, 0x83, 0x1f };
57                 context->width = ptr->width_i;
58                 context->height = ptr->height_i;
59 //              context->width = w;
60 //              context->height = h;
61                 context->extradata = (unsigned char *) fake_data;
62                 context->extradata_size = 0;
63                 if( esds->mpeg4_header && esds->mpeg4_header_size ) {
64                         context->extradata = (unsigned char *) esds->mpeg4_header;
65                         context->extradata_size = esds->mpeg4_header_size;
66                 }
67                 else if( avcc->data && avcc->data_size ) {
68                         context->extradata = (unsigned char *) avcc->data;
69                         context->extradata_size = avcc->data_size;
70                 }
71                 int result = -1;
72                 if( cpus > 1 ) {
73                         context->thread_count = cpus;
74                         result = avcodec_open2(context, ptr->decoder[i], 0);
75                 }
76                 if( result < 0 ) {
77                         context->thread_count = 1;
78                         result = avcodec_open2(context, ptr->decoder[i], 0);
79                 }
80                 if( result < 0 ) {
81                         printf("quicktime_new_ffmpeg: avcodec_open failed.\n");
82                         quicktime_delete_ffmpeg(ptr);
83                         ptr = NULL;
84                         break;
85                 }
86
87                 ptr->last_frame[i] = -1;
88         }
89
90         pthread_mutex_unlock(&ffmpeg_lock);
91         return ptr;
92 }
93
94 void quicktime_delete_ffmpeg(quicktime_ffmpeg_t *ptr)
95 {
96         int i;
97         if( !ptr ) return;
98         pthread_mutex_lock(&ffmpeg_lock);
99         for( i=0; i<ptr->fields; ++i ) {
100                 if( !ptr->decoder_context[i] ) continue;
101                 avcodec_close(ptr->decoder_context[i]);
102                 free(ptr->decoder_context[i]);
103         }
104         pthread_mutex_unlock(&ffmpeg_lock);
105
106         if(ptr->temp_frame) free(ptr->temp_frame);
107         if(ptr->work_buffer) free(ptr->work_buffer);
108         free(ptr);
109 }
110
111 //avcodec_get_frame_defaults
112 static void frame_defaults(AVFrame *frame)
113 {
114         memset(frame, 0, sizeof(AVFrame));
115         av_frame_unref(frame);
116 }
117
118 static int decode_wrapper(quicktime_t *file,
119                  quicktime_video_map_t *vtrack, quicktime_ffmpeg_t *ffmpeg,
120                  int frame_number, int current_field, int track, int drop_it)
121 {
122         quicktime_trak_t *trak = vtrack->track;
123         quicktime_stsd_table_t *stsd_table = &trak->mdia.minf.stbl.stsd.table[0];
124         int got_picture = 0, result = 0;
125         int header_bytes = !frame_number ? stsd_table->esds.mpeg4_header_size : 0;
126         int bytes;  char *data;
127         quicktime_set_video_position(file, frame_number, track);
128         bytes = quicktime_frame_size(file, frame_number, track) + header_bytes;
129
130         if( ffmpeg->work_buffer && ffmpeg->buffer_size < bytes ) {
131                 free(ffmpeg->work_buffer);  ffmpeg->work_buffer = 0;
132         }
133         if( !ffmpeg->work_buffer ) {
134                 ffmpeg->buffer_size = bytes;
135                 ffmpeg->work_buffer = calloc(1, ffmpeg->buffer_size + 100);
136         }
137         data = (char *)ffmpeg->work_buffer;
138         if( header_bytes ) {
139                 memcpy(data, stsd_table->esds.mpeg4_header, header_bytes);
140                 data += header_bytes;  bytes -= header_bytes;
141                 header_bytes = 0;
142         }
143
144         if( !quicktime_read_data(file, data, bytes) )
145                 result = -1;
146
147         AVPacket avpkt;
148         av_init_packet(&avpkt);
149         avpkt.data = ffmpeg->work_buffer;
150         avpkt.size = bytes;
151         avpkt.flags = AV_PKT_FLAG_KEY;
152
153         while( !result && !got_picture && avpkt.size > 0 ) {
154                 ffmpeg->decoder_context[current_field]->skip_frame =
155                         drop_it ?  AVDISCARD_NONREF : AVDISCARD_DEFAULT;
156                 frame_defaults(&ffmpeg->picture[current_field]);
157                 ffmpeg->decoder_context[current_field]->workaround_bugs = FF_BUG_NO_PADDING;
158                 result = avcodec_decode_video2(ffmpeg->decoder_context[current_field],
159                                  &ffmpeg->picture[current_field], &got_picture, &avpkt);
160                 if( result < 0 ) break;
161                 avpkt.data += result;
162                 avpkt.size -= result;
163         }
164
165 #ifdef ARCH_X86
166         asm("emms");
167 #endif
168         return !got_picture;
169 }
170
171 // Get amount chroma planes are downsampled from luma plane.
172 // Used for copying planes into cache.
173 static int get_chroma_factor(quicktime_ffmpeg_t *ffmpeg, int current_field)
174 {
175         switch(ffmpeg->decoder_context[current_field]->pix_fmt)
176         {
177                 case PIX_FMT_YUV420P:  return 4;
178                 case PIX_FMT_YUVJ420P: return 4;
179                 case PIX_FMT_YUYV422:  return 2;
180                 case PIX_FMT_YUV422P:  return 2;
181                 case PIX_FMT_YUV410P:  return 9;
182                 default:
183                         fprintf(stderr, "get_chroma_factor: unrecognized color model %d\n",
184                                 ffmpeg->decoder_context[current_field]->pix_fmt);
185                         break;
186         }
187         return 9;
188 }
189
190 int quicktime_ffmpeg_decode(quicktime_ffmpeg_t * ffmpeg,
191                 quicktime_t *file, unsigned char **row_pointers, int track)
192 {
193         quicktime_video_map_t *vtrack = &(file->vtracks[track]);
194         int current_field = vtrack->current_position % ffmpeg->fields;
195         int input_cmodel = BC_TRANSPARENCY;
196         int i, seeking_done = 0;
197         int result = quicktime_get_frame(vtrack->frame_cache, vtrack->current_position,
198                          &ffmpeg->picture[current_field].data[0],
199                          &ffmpeg->picture[current_field].data[1],
200                          &ffmpeg->picture[current_field].data[2]);
201
202         if( !result ) {
203                 if( ffmpeg->last_frame[current_field] == -1 && ffmpeg->ffmpeg_id != CODEC_ID_H264 ) {
204                         int current_frame = vtrack->current_position;
205                         result = decode_wrapper(file, vtrack, ffmpeg,
206                                         current_field, current_field, track, 0);
207                         quicktime_set_video_position(file, current_frame, track);
208                         ffmpeg->last_frame[current_field] = current_field;
209                 }
210
211                 if( quicktime_has_keyframes(file, track) &&
212                     vtrack->current_position != ffmpeg->last_frame[current_field] + ffmpeg->fields &&
213                     vtrack->current_position != ffmpeg->last_frame[current_field]) {
214                         int frame1;
215                         int first_frame;
216                         int frame2 = vtrack->current_position;
217                         int current_frame = frame2;
218
219                         if( !quicktime_has_frame(vtrack->frame_cache, vtrack->current_position + 1) )
220                                 quicktime_reset_cache(vtrack->frame_cache);
221
222                         frame1 = current_frame;
223                         do {
224                                 frame1 = quicktime_get_keyframe_before(file, frame1 - 1, track);
225                         } while( frame1 > 0 && (frame1 % ffmpeg->fields) != current_field );
226
227                         if( 0 /* frame1 > 0 && ffmpeg->ffmpeg_id == CODEC_ID_MPEG4 */ ) {
228                                 do {
229                                         frame1 = quicktime_get_keyframe_before(file, frame1 - 1, track);
230                                 } while( frame1 > 0 && (frame1 & ffmpeg->fields) != current_field );
231                         }
232
233                         if( frame1 < ffmpeg->last_frame[current_field] &&
234                             frame2 > ffmpeg->last_frame[current_field]) {
235                                 frame1 = ffmpeg->last_frame[current_field] + ffmpeg->fields;
236                         }
237 /*
238  * printf("quicktime_ffmpeg_decode 2 last_frame=%d frame1=%d frame2=%d\n",
239  * ffmpeg->last_frame[current_field], frame1, frame2);
240  */
241                         first_frame = frame1;
242                         while( frame1 <= frame2 ) {
243                                 result = decode_wrapper(file, vtrack, ffmpeg,
244                                         frame1, current_field, track, 0 /* (frame1 < frame2) */ );
245                                 if( ffmpeg->picture[current_field].data[0] && frame1 > first_frame ) {
246                                         int y_size = ffmpeg->picture[current_field].linesize[0] * ffmpeg->height_i;
247                                         int u_size = y_size / get_chroma_factor(ffmpeg, current_field);
248                                         int v_size = y_size / get_chroma_factor(ffmpeg, current_field);
249                                         quicktime_put_frame(vtrack->frame_cache, frame1,
250                                                 ffmpeg->picture[current_field].data[0],
251                                                 ffmpeg->picture[current_field].data[1],
252                                                 ffmpeg->picture[current_field].data[2],
253                                                 y_size, u_size, v_size);
254                                 }
255
256                                 frame1 += ffmpeg->fields;
257                         }
258
259                         vtrack->current_position = frame2;
260                         seeking_done = 1;
261                 }
262
263                 if (!seeking_done && vtrack->current_position != ffmpeg->last_frame[current_field]) {
264                         result = decode_wrapper(file, vtrack, ffmpeg,
265                                 vtrack->current_position, current_field, track, 0);
266                 }
267
268                 ffmpeg->last_frame[current_field] = vtrack->current_position;
269         }
270
271         switch (ffmpeg->decoder_context[current_field]->pix_fmt) {
272         case PIX_FMT_YUV420P:  input_cmodel = BC_YUV420P; break;
273         case PIX_FMT_YUYV422:  input_cmodel = BC_YUV422;  break;
274         case PIX_FMT_YUVJ420P: input_cmodel = BC_YUV420P; break;
275         case PIX_FMT_YUV422P:  input_cmodel = BC_YUV422P; break;
276         case PIX_FMT_YUV410P:  input_cmodel = BC_YUV9P;   break;
277         default:
278                 input_cmodel = 0;
279                 if (!ffmpeg->picture[current_field].data[0])
280                         break;
281                 fprintf(stderr, "quicktime_ffmpeg_decode: unrecognized color model %d\n",
282                                 ffmpeg->decoder_context[current_field]->pix_fmt);
283                 break;
284         }
285
286         if (ffmpeg->picture[current_field].data[0]) {
287                 AVCodecContext *ctx = ffmpeg->decoder_context[current_field];
288                 int width = ctx->width, height = ctx->height;
289                 AVFrame *picture = &ffmpeg->picture[current_field];
290                 unsigned char *data = picture->data[0];
291                 unsigned char **input_rows = malloc(sizeof(unsigned char *) * height);
292                 int line_sz = cmodel_calculate_pixelsize(input_cmodel) * width;
293                 for( i=0; i<height; ++i ) input_rows[i] = data + i * line_sz;
294                 cmodel_transfer(row_pointers, input_rows,
295                                 row_pointers[0], row_pointers[1], row_pointers[2],
296                                 picture->data[0], picture->data[1], picture->data[2],
297                                 file->in_x, file->in_y, file->in_w, file->in_h,
298                                 0, 0, file->out_w, file->out_h,
299                                 input_cmodel, file->color_model,
300                                 0, /* BC_RGBA8888 to non-alpha background color */
301                                 ffmpeg->picture[current_field].linesize[0],
302                                 ffmpeg->width); /* For planar use the luma rowspan */
303                 free(input_rows);
304         }
305
306         return result;
307 }
308
309
310 /* assumes 16-bit, interleaved data */
311 /* always moves buffer */
312 int quicktime_decode_audio3(
313                 AVCodecContext *avctx, int16_t *samples,
314                 int *frame_size_ptr, AVPacket *avpkt)
315 {
316         int ret, got_frame = 0;
317         AVFrame *frame = av_frame_alloc();
318         if (!frame) return -1;
319
320         ret = avcodec_decode_audio4(avctx, frame, &got_frame, avpkt);
321
322         if( ret >= 0 && got_frame ) {
323                 int plane_size;
324                 int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
325                                            frame->nb_samples, avctx->sample_fmt, 1);
326                 if( *frame_size_ptr < data_size ) {
327                         printf("quicktime_decode_audio3: output buffer size is too small for "
328                                 "the current frame (%d < %d)\n", *frame_size_ptr, data_size);
329                         av_frame_free(&frame);
330                         return -1;
331                 }
332                 memcpy(samples, frame->extended_data[0], plane_size);
333                 *frame_size_ptr = data_size;
334         } else {
335                 *frame_size_ptr = 0;
336         }
337         av_frame_free(&frame);
338         return ret;
339 }
340