/*
* CINELERRA
* Copyright (C) 2008 Adam Williams <broadcast at earthling dot net>
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
+ *
*/
#include "asset.h"
// M JPEG dependancies
-static double frame_rate_codes[] =
+static double frame_rate_codes[] =
{
0,
24000.0/1001.0,
vcommand_line.remove_all_objects();
}
-void FileMPEG::get_parameters(BC_WindowBase *parent_window,
- Asset *asset,
- BC_WindowBase* &format_window,
- int audio_options,
- int video_options)
+void FileMPEG::get_parameters(BC_WindowBase *parent_window,
+ Asset *asset, BC_WindowBase* &format_window,
+ int audio_options, int video_options, EDL *edl)
{
if(audio_options && asset->format == FILE_AMPEG)
{
cp += snprintf(cp,ep-cp, _("file path:%s\n"), path);
int64_t bytes = mpeg3_get_bytes(fd);
char string[BCTEXTLEN];
- sprintf(string,"%ld",bytes);
+ sprintf(string,"%jd",bytes);
Units::punctuate(string);
cp += snprintf(cp,ep-cp, _("size: %s"), string);
cp += snprintf(cp,ep-cp, _(" v%d %s %dx%d"), vtrk, cmodel, width, height);
double frame_rate = mpeg3_frame_rate(fd, vtrk);
int64_t frames = mpeg3_video_frames(fd, vtrk);
- cp += snprintf(cp,ep-cp, _(" (%5.2f), %ld frames"), frame_rate, frames);
+ cp += snprintf(cp,ep-cp, _(" (%5.2f), %jd frames"), frame_rate, frames);
if( frame_rate > 0 ) {
double secs = (double)frames / frame_rate;
cp += snprintf(cp,ep-cp, _(" (%0.3f secs)"),secs);
int sample_rate = mpeg3_sample_rate(fd, atrk);
cp += snprintf(cp,ep-cp, _(" ch%d (%d)"), channels, sample_rate);
int64_t samples = mpeg3_audio_samples(fd, atrk);
- cp += snprintf(cp,ep-cp, " %ld",samples);
+ cp += snprintf(cp,ep-cp, " %jd",samples);
int64_t nudge = mpeg3_get_audio_nudge(fd, atrk);
*cp++ = nudge >= 0 ? '+' : (nudge=-nudge, '-');
- cp += snprintf(cp,ep-cp, _("%ld samples"),nudge);
+ cp += snprintf(cp,ep-cp, _("%jd samples"),nudge);
if( sample_rate > 0 ) {
double secs = (double)(samples+nudge) / sample_rate;
cp += snprintf(cp,ep-cp, _(" (%0.3f secs)"),secs);
asset->video_data = mpeg3_has_video(fd);
if( !result && asset->video_data ) {
- asset->interlace_mode = BC_ILACE_MODE_UNDETECTED;
+//TODO: this is not as easy as just looking at headers.
+//most interlaced media is rendered as FRM, not TOP/BOT in coding ext hdrs.
+//currently, just using the assetedit menu to set the required result as needed.
+// if( asset->interlace_mode == ILACE_MODE_UNDETECTED )
+// asset->interlace_mode = mpeg3_detect_interlace(fd, 0);
if( !asset->layers ) {
asset->layers = mpeg3_total_vstreams(fd);
}
eprintf(_("Couldn't open %s: failed.\n"), asset->path);
}
}
-
+
if( !result && wr && asset->format == FILE_VMPEG ) {
// Heroine Virtual encoder
// this one is cinelerra-x.x.x/mpeg2enc
if(!result)
{
const char *exec_path = File::get_cinlib_path();
- sprintf(mjpeg_command, "%s/%s", exec_path, HVPEG_EXE);
+ snprintf(mjpeg_command, sizeof(mjpeg_command),
+ "%s/%s", exec_path, HVPEG_EXE);
append_vcommand_line(mjpeg_command);
if(asset->aspect_ratio > 0)
// this one is cinelerra-x.x.x/thirdparty/mjpegtools/mpeg2enc
{
const char *exec_path = File::get_cinlib_path();
- sprintf(mjpeg_command, "%s/%s -v 0 ", exec_path, MJPEG_EXE);
+ snprintf(mjpeg_command, sizeof(mjpeg_command),
+ "%s/%s -v 0 ", exec_path, MJPEG_EXE);
// Must disable interlacing if MPEG-1
switch (asset->vmpeg_preset)
case 2: asset->vmpeg_progressive = 1; break;
}
-// Be quiet
- strcat(mjpeg_command, " -v0");
-
char string[BCTEXTLEN];
// The current usage of mpeg2enc requires bitrate of 0 when quantization is fixed and
// quantization of 1 when bitrate is fixed. Perfectly intuitive.
if(asset->vmpeg_fix_bitrate)
{
- sprintf(string, " -b %d -q 1", asset->vmpeg_bitrate / 1000);
+ snprintf(string, sizeof(string),
+ " -b %d -q 1", asset->vmpeg_bitrate / 1000);
}
else
{
- sprintf(string, " -b 0 -q %d", asset->vmpeg_quantization);
+ snprintf(string, sizeof(string),
+ " -b 0 -q %d", asset->vmpeg_quantization);
}
- strcat(mjpeg_command, string);
-
-
-
-
-
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
// Aspect ratio
int aspect_ratio_code = -1;
// Square pixels
if(EQUIV((double)asset->width / asset->height, asset->aspect_ratio))
aspect_ratio_code = 1;
-
+
if(aspect_ratio_code < 0)
{
eprintf(_("Unsupported aspect ratio %f\n"), asset->aspect_ratio);
aspect_ratio_code = 2;
}
sprintf(string, " -a %d", aspect_ratio_code);
- strcat(mjpeg_command, string);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
eprintf(_("Unsupported frame rate %f\n"), asset->frame_rate);
}
sprintf(string, " -F %d", frame_rate_code);
- strcat(mjpeg_command, string);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
- strcat(mjpeg_command,
- asset->vmpeg_progressive ? " -I 0" : " -I 1");
-
+ strncat(mjpeg_command,
+ asset->vmpeg_progressive ? " -I 0" : " -I 1",
+ sizeof(mjpeg_command));
+
sprintf(string, " -M %d", file->cpus);
- strcat(mjpeg_command, string);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
if(!asset->vmpeg_progressive)
{
- strcat(mjpeg_command, asset->vmpeg_field_order ? " -z b" : " -z t");
+ strncat(mjpeg_command,
+ asset->vmpeg_field_order ? " -z b" : " -z t",
+ sizeof(mjpeg_command));
}
- sprintf(string, " -f %d", asset->vmpeg_preset);
- strcat(mjpeg_command, string);
+ snprintf(string, sizeof(string), " -f %d", asset->vmpeg_preset);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
- sprintf(string, " -g %d -G %d", asset->vmpeg_iframe_distance, asset->vmpeg_iframe_distance);
- strcat(mjpeg_command, string);
+ snprintf(string, sizeof(string),
+ " -g %d -G %d", asset->vmpeg_iframe_distance, asset->vmpeg_iframe_distance);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
- if(asset->vmpeg_seq_codes) strcat(mjpeg_command, " -s");
+ if(asset->vmpeg_seq_codes)
+ strncat(mjpeg_command, " -s", sizeof(mjpeg_command));
- sprintf(string, " -R %d", CLAMP(asset->vmpeg_pframe_distance, 0, 2));
- strcat(mjpeg_command, string);
+ snprintf(string, sizeof(string),
+ " -R %d", CLAMP(asset->vmpeg_pframe_distance, 0, 2));
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
- sprintf(string, " -o '%s'", asset->path);
- strcat(mjpeg_command, string);
+ snprintf(string, sizeof(string), " -o '%s'", asset->path);
+ strncat(mjpeg_command, string, sizeof(mjpeg_command));
// lame_set_brate(lame_global, asset->ampeg_bitrate / 1000);
lame_set_brate(lame_global, asset->ampeg_bitrate);
lame_set_quality(lame_global, 0);
- lame_set_in_samplerate(lame_global,
+ lame_set_in_samplerate(lame_global,
asset->sample_rate);
lame_set_num_channels(lame_global,
asset->channels);
if( mpeg->get_video_info(track, pid, framerate, width, height) ) return 1;
if( pid < 0 || framerate <= 0 ) return 1;
double position = framenum / framerate;
-//printf("t%d/%03x f"_LD", %dx%d %dx%d\n",track,pid,framenum,mw,mh,width,height);
+//printf("t%d/%03x f%jd, %dx%d %dx%d\n",track,pid,framenum,mw,mh,width,height);
MWindow::commercials->get_frame(file, pid, position, tdat, mw, mh, width, height);
return 0;
}
// delete any existing toc files
char toc_file[BCTEXTLEN];
strcpy(toc_file, toc_path);
- remove(toc_file);
+ if( strcmp(toc_file, asset->path) )
+ remove(toc_file);
char *bp = strrchr(toc_file, '/');
if( !bp ) bp = toc_file;
char *sfx = strrchr(bp,'.');
sprintf(string, "%sETA: %jdm%jds",
progress_title, eta / 60, eta % 60);
progress.update_title(string, 1);
-// fprintf(stderr, "ETA: %dm%ds \r",
+// fprintf(stderr, "ETA: %dm%ds \r",
// bytes_processed * 100 / total_bytes,
// eta / 60, eta % 60);
// fflush(stdout);
vcommand_line.remove_all_objects();
if(twofp) {
- unsigned char opkt[1152*2];
+ unsigned char opkt[1152*2];
int ret = twolame_encode_flush(twopts, opkt, sizeof(opkt));
if( ret > 0 )
fwrite(opkt, 1, ret, twofp);
switch(driver)
{
case PLAYBACK_X11:
- return BC_RGB888;
+// return BC_RGB888;
+// the direct X11 color model requires scaling in the codec
+ return BC_BGR8888;
case PLAYBACK_X11_XV:
case PLAYBACK_ASYNCHRONOUS:
return zmpeg3_cmdl(asset->vmpeg_cmodel) > 0 ?
asset->vmpeg_cmodel : BC_RGB888;
case PLAYBACK_X11_GL:
return BC_YUV888;
- case PLAYBACK_LML:
- case PLAYBACK_BUZ:
- return BC_YUV422P;
case PLAYBACK_DV1394:
case PLAYBACK_FIREWIRE:
return BC_YUV422P;
- case VIDEO4LINUX:
case VIDEO4LINUX2:
return zmpeg3_cmdl(asset->vmpeg_cmodel) > 0 ?
asset->vmpeg_cmodel : BC_RGB888;
return BC_COMPRESSED;
case CAPTURE_YUYV_WEBCAM:
return BC_YUV422;
- case CAPTURE_BUZ:
- case CAPTURE_LML:
- return BC_YUV422;
case CAPTURE_FIREWIRE:
case CAPTURE_IEC61883:
return BC_YUV422P;
{
#if 0
if(!fd) return 1;
-
+
int channel, stream;
to_streamchannel(file->current_channel, stream, channel);
// verify colormodel supported in MPEG output
switch( output_cmodel ) {
case BC_YUV420P:
+ if( file->preferences->dvd_yuv420p_interlace &&
+ ( asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
+ asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ) )
+ output_cmodel = BC_YUV420PI;
case BC_YUV422P:
break;
default:
return 1;
}
-
+
// Height depends on progressiveness
if(asset->vmpeg_progressive || asset->vmpeg_derivative == 1)
temp_h = (int)((asset->height + 15) / 16) * 16;
temp_h = (int)((asset->height + 31) / 32) * 32;
//printf("FileMPEG::write_frames 1\n");
-
+
// Only 1 layer is supported in MPEG output
for(int i = 0; i < 1; i++)
{
for(int j = 0; j < len && !result; j++)
{
VFrame *frame = frames[i][j];
-
-
-
+
+
+
if(asset->vmpeg_cmodel == BC_YUV422P)
{
if(frame->get_w() == temp_w &&
frame->get_h() == temp_h &&
frame->get_color_model() == output_cmodel)
{
- mpeg2enc_set_input_buffers(0,
+ mpeg2enc_set_input_buffers(0,
(char*)frame->get_y(),
(char*)frame->get_u(),
(char*)frame->get_v());
if(!temp_frame)
{
- temp_frame = new VFrame(0,
- -1,
- temp_w,
- temp_h,
- output_cmodel,
- -1);
+ temp_frame = new VFrame(temp_w, temp_h,
+ output_cmodel, 0);
}
- BC_CModels::transfer(temp_frame->get_rows(),
+ BC_CModels::transfer(temp_frame->get_rows(),
frame->get_rows(),
temp_frame->get_y(),
temp_frame->get_u(),
0,
temp_frame->get_w(),
temp_frame->get_h(),
- frame->get_color_model(),
+ frame->get_color_model(),
temp_frame->get_color_model(),
- 0,
+ 0,
frame->get_w(),
temp_frame->get_w());
- mpeg2enc_set_input_buffers(0,
+ mpeg2enc_set_input_buffers(0,
(char*)temp_frame->get_y(),
(char*)temp_frame->get_u(),
(char*)temp_frame->get_v());
//printf("FileMPEG::write_frames %d\n", __LINE__);sleep(1);
if(!temp_frame)
{
- temp_frame = new VFrame(0,
- -1,
- asset->width,
- asset->height,
- output_cmodel,
- -1);
+ temp_frame = new VFrame(asset->width, asset->height,
+ output_cmodel, 0);
}
-// printf("FileMPEG::write_frames %d temp_frame=%p %p %p %p frame=%p %p %p %p color_model=%p %p\n",
+// printf("FileMPEG::write_frames %d temp_frame=%p %p %p %p frame=%p %p %p %p color_model=%p %p\n",
// __LINE__,
// temp_frame,
// temp_frame->get_w(),
// frame->get_h(),
// temp_frame->get_color_model(),
// frame->get_color_model()); sleep(1);
- BC_CModels::transfer(temp_frame->get_rows(),
- frame->get_rows(),
- temp_frame->get_y(),
- temp_frame->get_u(),
- temp_frame->get_v(),
- frame->get_y(),
- frame->get_u(),
- frame->get_v(),
- 0,
- 0,
- frame->get_w(),
- frame->get_h(),
- 0,
- 0,
- temp_frame->get_w(),
- temp_frame->get_h(),
- frame->get_color_model(),
- temp_frame->get_color_model(),
- 0,
- frame->get_w(),
- temp_frame->get_w());
+ temp_frame->transfer_from(frame);
//printf("FileMPEG::write_frames %d\n", __LINE__);sleep(1);
mjpeg_y = temp_frame->get_y();
}
int FileMPEG::zmpeg3_cmdl(int colormodel)
-{
+{
switch( colormodel ) {
case BC_BGR888: return zmpeg3_t::cmdl_BGR888;
case BC_BGR8888: return zmpeg3_t::cmdl_BGRA8888;
case BC_YUVA8888: return zmpeg3_t::cmdl_YUVA8888;
}
return -1;
-}
+}
int FileMPEG::bc_colormodel(int cmdl)
{
int stream_cmdl = mpeg3_colormodel(fd,file->current_layer);
int stream_color_model = bc_colormodel(stream_cmdl);
int frame_color_model = frame->get_color_model();
- int frame_cmdl = zmpeg3_cmdl(frame_color_model);
+ int frame_cmdl = asset->interlace_mode == ILACE_MODE_NOTINTERLACED ?
+ zmpeg3_cmdl(frame_color_model) : -1;
mpeg3_show_subtitle(fd, file->current_layer, file->playback_subtitle);
-
switch( frame_color_model ) { // check for direct copy
case BC_YUV420P:
+ if( frame_cmdl < 0 ) break;
case BC_YUV422P:
if( stream_color_model == frame_color_model &&
width == frame->get_w() && height == frame->get_h() ) {
for( int i=0; i<uvh; ++i, rp+=uvw ) rows[n++] = rp;
}
- mpeg3_read_frame(fd,
+ mpeg3_read_frame(fd,
rows, /* start of each output row */
0, 0, width, height, /* input box */
frame->get_w(), /* Dimensions of output_rows */
- frame->get_h(),
+ frame->get_h(),
frame_cmdl,
file->current_layer);
return result;
char *y, *u, *v;
mpeg3_read_yuvframe_ptr(fd, &y, &u, &v, file->current_layer);
if( y && u && v ) {
+ if( stream_color_model == BC_YUV420P &&
+ file->preferences->dvd_yuv420p_interlace && (
+ asset->interlace_mode == ILACE_MODE_TOP_FIRST ||
+ asset->interlace_mode == ILACE_MODE_BOTTOM_FIRST ) )
+ stream_color_model = BC_YUV420PI;
BC_CModels::transfer(frame->get_rows(), 0,
- frame->get_y(),
- frame->get_u(),
- frame->get_v(),
- (unsigned char*)y,
- (unsigned char*)u,
- (unsigned char*)v,
- 0, 0, width, height,
- 0, 0, frame->get_w(), frame->get_h(),
- stream_color_model,
- frame_color_model,
- 0,
- width,
- frame->get_w());
+ frame->get_y(), frame->get_u(), frame->get_v(),
+ (unsigned char*)y, (unsigned char*)u, (unsigned char*)v,
+ 0,0, width,height, 0,0, frame->get_w(),frame->get_h(),
+ stream_color_model, frame_color_model, 0, width, frame->get_w());
}
return result;
//printf("FileMPEG::read_samples 1 current_sample=%jd len=%jd channel=%d\n", file->current_sample, len, channel);
- mpeg3_set_sample(fd,
+ mpeg3_set_sample(fd,
file->current_sample,
stream);
- mpeg3_read_audio_d(fd,
+ mpeg3_read_audio_d(fd,
buffer, /* Pointer to pre-allocated buffer of doubles */
channel, /* Channel to decode */
len, /* Number of samples to decode */